text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#include <boost/graph/rmat_graph_generator.hpp>
|
{"hexsha": "23179c2bf21f3775c677e63e2e5c6ea76602b69c", "size": 48, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_graph_rmat_graph_generator.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_graph_rmat_graph_generator.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_graph_rmat_graph_generator.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 24.0, "max_line_length": 47, "alphanum_fraction": 0.8333333333, "num_tokens": 10}
|
'''
Module for holding all plotting code for MON-MON collection
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
import matplotlib.ticker as mticker
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import ScalarFormatter
def aspect_ratios_Na(neg_error_flat, pos_error_flat, chs_flat,
neg_error_rand, pos_error_rand, chs_rand, save_fig=False):
'''plot IPAS characteristic aspect ratio for
different number of aggregates to see consistency in trends
'''
s = 25 #scatter marker size
Na = [100,300,500,1000]
N = [1,2,3,4]
colors_p = ['#D5FFAD','#79E297','#328581','#111A7E']
colors_c = ['#F5B841','#DA9D58','#B05E2F','#3B0210']
cmap = cm.get_cmap('Spectral', 8)
colors = []
for i in range(cmap.N):
rgba = cmap(i)
# rgb2hex accepts rgb or rgba
colors.append(mpl.colors.rgb2hex(rgba))
colors_p = colors[:4]
colors_p.reverse()
colors_c = colors[4:]
colors_c
phio = np.logspace(-2, 2, num=20, dtype=None)
phio_p = phio[:10]
phio_c = phio[10:]
alpha=0.03
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(12,5))
plt.subplots_adjust(wspace=0.25, hspace=0.1)
###############################################
cols=[]
plates=[]
for n in range(len(Na)):
ax1.fill_between(phio_p, neg_error_flat[:10,n], pos_error_flat[:10,n], color=colors_p[n], alpha=alpha)
plate=ax1.scatter(phio_p, chs_flat[:10,n], c=colors_p[n], label="$n_a$={}".format(Na[n]),s=s)
ax1.fill_between(phio_c, neg_error_flat[10:,n], pos_error_flat[10:,n], color=colors_c[n], alpha=alpha)
col=ax1.scatter(phio_c, chs_flat[10:,n], c=colors_c[n], label="$n_a$={}".format(Na[n]), s=s)
cols.append(col)
plates.append(plate)
if n==3:
#plot regression lines QH
m, b = np.polyfit(np.log(phio_c), np.log(chs_flat[10:,n]), 1)
y_fit = np.exp(m*np.log(phio_c) + b)
fit_col=ax1.plot(phio_c, y_fit, color='navy')
m, b = np.polyfit(np.log(phio_p), np.log(chs_flat[:10,n]), 1)
y_fit = np.exp(m*np.log(phio_p) + b) # calculate the fitted values of y
ax1.plot(phio_p, y_fit, color='darkred')
ax1.plot(np.logspace(-2, 0), np.logspace(-2, 0), 'k', linestyle='dashed', alpha=0.5)
ax1.plot(np.logspace(0, 2), np.logspace(0, -2), 'k', linestyle='dashed', alpha=0.5)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylim([0.01,1.0])
ax1.set_xlim([0.01,100.0])
ax1.grid()
ax1.set_ylabel('Aggregate Aspect Ratio')
ax1.set_title('Quasi-Horizontal Orientation');
ax1.set_xlabel('Monomer Aspect Ratio')
###############################################
for n in range(len(Na)):
ax2.fill_between(phio_p, neg_error_rand[:10,n], pos_error_rand[:10,n], alpha=0.05, color=colors_p[n])
ax2.scatter(phio_p, chs_rand[:10,n], c=colors_p[n], s=s)
ax2.fill_between(phio_c, neg_error_rand[10:,n], pos_error_rand[10:,n], alpha=alpha, color=colors_c[n])
ax2.scatter(phio_c, chs_rand[10:,n], c=colors_c[n],s=s)
if n==3:
#plot regression lines - random
m, b = np.polyfit(np.log(phio_p), np.log(chs_rand[:10,n]), 1)
y_fit = np.exp(m*np.log(phio_p) + b) # calculate the fitted values of y
plt.plot(phio_p, y_fit, color='darkred')
m, b = np.polyfit(np.log(phio_c), np.log(chs_rand[10:,n]), 1)
#print(m,b)
y_fit = np.exp(m*np.log(phio_c) + b) # calculate the fitted values of y
plt.plot(phio_c, y_fit, color='navy')
#legend
leg_labels=["$n_a$={}".format(Na[n]) for n in range(len(Na))]
leg1 = ax2.legend(plates, leg_labels, title='Plates',
bbox_to_anchor=(1.5,0.5), loc="lower right",
fontsize=14, title_fontsize=14)
leg2 = ax2.legend(cols, leg_labels, title='Columns',
bbox_to_anchor=(1.5,0), loc="lower right",
fontsize=14, title_fontsize=14)
ax2.add_artist(leg1)
ax2.add_artist(leg2)
ax2.plot(np.logspace(-2, 2), np.logspace(-2, 2), 'k', linestyle='dashed', alpha=0.5)
ax2.plot(np.logspace(0, 2), np.logspace(0, -2), 'k', linestyle='dashed', alpha=0.5)
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_ylim([0.01,1.0])
ax2.set_xlim([0.01,100.0])
ax2.grid()
ax2.set_xlabel('Monomer Aspect Ratio')
ax2.set_title('Random Orientation');
for ax in [ax1, ax2]:
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.xaxis.get_major_formatter().set_scientific(False)
ax.xaxis.get_major_formatter().set_useOffset(False)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
if save_fig:
plt.savefig('../plots/partI_aspectratios_Na.pdf', bbox_inches = 'tight')
def axislengths_aspectratios(phio_p, phio_c, mono_as_p, mono_cs_p, mono_as_c, mono_cs_c,
avg_phi_flat, neg_error_flat_phis, pos_error_flat_phis,
avg_as_flat, neg_error_flat_as, pos_error_flat_as,
avg_cs_flat, neg_error_flat_cs, pos_error_flat_cs,
avg_phi_rand, neg_error_rand_phis, pos_error_rand_phis,
avg_as_rand, neg_error_rand_as, pos_error_rand_as,
avg_cs_rand, neg_error_rand_cs, pos_error_rand_cs,
delta_flat_plates_major, delta_flat_plates_minor,
delta_flat_columns_major, delta_flat_columns_minor,
delta_rand_plates_major, delta_rand_plates_minor,
delta_rand_columns_major, delta_rand_columns_minor,
save_fig=True, alpha=0.3):
'''
plot axis lengths and aspect ratio for each orientation with respect to monomer aspect ratio
averaged over 4 simulations with Na=300
'''
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True, sharey=False, figsize=(16,10))
plt.subplots_adjust(wspace=0.25, hspace=0.1)
# Flat
ax1.plot(phio_p, avg_as_flat[:10], 'b')
ax1.fill_between(phio_p, neg_error_flat_as[:10], pos_error_flat_as[:10], color='b', alpha=alpha)
ax1.plot(phio_p, mono_as_p, 'b', linestyle='dotted')
ax1.plot(phio_p, avg_cs_flat[:10], 'orange')
ax1.fill_between(phio_p, neg_error_flat_cs[:10], pos_error_flat_cs[:10], color='orange', alpha=alpha)
ax1.plot(phio_p, mono_cs_p, 'darkorange', linestyle='dotted')
ax1.plot(phio_p, avg_phi_flat[:10], 'g')
ax1.fill_between(phio_p, neg_error_flat_phis[:10], pos_error_flat_phis[:10], color='g', alpha=alpha)
ax1.plot(phio_c, avg_as_flat[10:], 'b')
ax1.fill_between(phio_c, neg_error_flat_as[10:], pos_error_flat_as[10:], color='b', alpha=alpha)
ax1.plot(phio_c, mono_cs_c, 'b', linestyle='dotted')
ax1.plot(phio_c, avg_cs_flat[10:], 'orange')
ax1.fill_between(phio_c, neg_error_flat_cs[10:], pos_error_flat_cs[10:], color='orange', alpha=alpha)
ax1.plot(phio_c, mono_as_c, 'darkorange', linestyle='dotted')
ax1.plot(phio_c, avg_phi_flat[10:], 'g')
ax1.fill_between(phio_c, neg_error_flat_phis[10:], pos_error_flat_phis[10:], color='g', alpha=alpha)
ax1.set_xlim([0.01,100.0])
ax1.set_ylim([0.005,350.00])
ax1.grid()
ax1.set_ylabel('Axis Lengths and Aspect Ratio')
ax1.set_title('Quasi-Horizontal Orientation')
ax1.set_xscale('log')
ax1.set_yscale('log')
###############################################################
# Random
ax2.plot(phio_p, avg_as_rand[:10], 'b', label='aggregate major axis')
ax2.fill_between(phio_p, neg_error_rand_as[:10], pos_error_rand_as[:10], color='b', alpha=alpha)
ax2.plot(phio_p, mono_as_p, 'b', linestyle='dotted', label='monomer major axis')
ax2.plot(phio_p, avg_cs_rand[:10], 'orange',label='aggregate minor axis')
ax2.fill_between(phio_p, neg_error_rand_cs[:10], pos_error_rand_cs[:10], color='orange', alpha=alpha)
ax2.plot(phio_p, mono_cs_p, 'darkorange', linestyle='dotted', label='monomer minor axis')
ax2.plot(phio_p, avg_phi_rand[:10], 'g', label='aggregate aspect ratio')
ax2.fill_between(phio_p, neg_error_rand_phis[:10], pos_error_rand_phis[:10], color='g', alpha=alpha)
ax2.plot(phio_c, avg_as_rand[10:], 'b')
ax2.fill_between(phio_c, neg_error_rand_as[10:], pos_error_rand_as[10:], color='b', alpha=alpha)
ax2.plot(phio_c, mono_cs_c, 'b', linestyle='dotted')
ax2.plot(phio_c, avg_cs_rand[10:], 'orange')
ax2.fill_between(phio_c, neg_error_rand_cs[10:], pos_error_rand_cs[10:], color='orange', alpha=alpha)
ax2.plot(phio_c, mono_as_c, 'darkorange', linestyle='dotted')
ax2.plot(phio_c, avg_phi_rand[10:], 'g')
ax2.fill_between(phio_c, neg_error_rand_phis[10:], pos_error_rand_phis[10:], color='g', alpha=alpha)
ax2.grid()
ax2.set_xlim([0.01,100.0])
ax2.set_ylim([0.005,350.00])
ax2.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax2.set_title('Random Orientation')
ax2.set_xscale('log')
ax2.set_yscale('log')
###############################################################
#CHANGE IN AXIS LENGTHS
#plates flat
ax3.plot(phio_p, delta_flat_plates_major*100, 'b', label='Major axis')
ax3.plot(phio_p, delta_flat_plates_minor*100, 'orange', label='Minor axis')
ax3.plot(phio_c, delta_flat_columns_major*100, 'b')
ax3.plot(phio_c, delta_flat_columns_minor*100, 'orange')
ax3.legend()
ax3.grid()
ax3.set_ylabel('Change in Axis Length (%)')
ax3.set_xlabel('Monomer Aspect Ratio')
ax3.set_xlim([0.01,100.0])
#ax3.set_ylim([0,250])
ax3.set_ylim([10.0,7000])
ax3.set_xscale('log')
ax3.set_yscale('log')
###############################################################
#plates random
ax4.plot(phio_p, delta_rand_plates_major*100, 'b', label='Major axis')
ax4.plot(phio_p, delta_rand_plates_minor*100, 'orange', label='Minor axis')
ax4.plot(phio_c, delta_rand_columns_major*100, 'b')
ax4.plot(phio_c, delta_rand_columns_minor*100, 'orange')
ax4.legend()
ax4.grid()
ax4.set_xscale('log')
ax4.set_yscale('log')
ax4.set_xlim([0.01,100.0])
ax4.set_ylim([10.0,7000])
ax4.set_xlabel('Monomer Aspect Ratio')
for ax in [ax1, ax2, ax3, ax4]:
if ax == ax1 or ax == ax2:
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
if ax == ax3 or ax == ax4:
ax.yaxis.set_major_formatter(mticker.ScalarFormatter())
ax.yaxis.get_major_formatter().set_scientific(False)
ax.yaxis.get_major_formatter().set_useOffset(False)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax.xaxis.set_major_formatter(mticker.ScalarFormatter())
ax.xaxis.get_major_formatter().set_scientific(False)
ax.xaxis.get_major_formatter().set_useOffset(False)
if save_fig:
plt.savefig('../plots/partI_axislengths_aspectratios.pdf', bbox_inches = 'tight')
|
{"hexsha": "d92cbe96143504b1374bd44e9dace94d792ac26f", "size": 11606, "ext": "py", "lang": "Python", "max_stars_repo_path": "ipas/visualizations/part_I_plots.py", "max_stars_repo_name": "vprzybylo/IPAS", "max_stars_repo_head_hexsha": "9c9268097b9d7d02be1b14671b8fbfc1818e02c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ipas/visualizations/part_I_plots.py", "max_issues_repo_name": "vprzybylo/IPAS", "max_issues_repo_head_hexsha": "9c9268097b9d7d02be1b14671b8fbfc1818e02c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-05-09T02:22:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:53:05.000Z", "max_forks_repo_path": "ipas/visualizations/part_I_plots.py", "max_forks_repo_name": "vprzybylo/IPAS", "max_forks_repo_head_hexsha": "9c9268097b9d7d02be1b14671b8fbfc1818e02c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.1788617886, "max_line_length": 110, "alphanum_fraction": 0.6328623126, "include": true, "reason": "import numpy", "num_tokens": 3349}
|
// Copyright (c) 2019-2021 Xenios SEZC
// https://www.veriblock.org
// Distributed under the MIT software license, see the accompanying
// file LICENSE or http://www.opensource.org/licenses/mit-license.php.
#include <boost/test/unit_test.hpp>
#include <algorithm>
#include <chain.h>
#include <test/util/setup_common.h>
#include <validation.h>
#include <wallet/wallet.h>
#include "vbk/genesis_common.hpp"
#include "vbk/merkle.hpp"
BOOST_AUTO_TEST_SUITE(vbk_merkle_tests)
struct MerkleFixture {
// this inits veriblock services
TestChain100Setup blockchain;
CScript cbKey = CScript() << ToByteVector(blockchain.coinbaseKey.GetPubKey()) << OP_CHECKSIG;
};
BOOST_FIXTURE_TEST_CASE(genesis_block_hash_is_valid, MerkleFixture)
{
CBlock block = VeriBlock::CreateGenesisBlock(
1337, 36282504, 0x1d0fffff, 1, 50 * COIN,
"047c62bbf7f5aa4dd5c16bad99ac621b857fac4e93de86e45f5ada73404eeb44dedcf377b03c14a24e9d51605d9dd2d8ddaef58760d9c4bb82d9c8f06d96e79488",
"VeriBlock");
BlockValidationState state;
bool result = VeriBlock::VerifyTopLevelMerkleRoot(block, nullptr, state);
BOOST_CHECK(result);
BOOST_CHECK(state.IsValid());
}
BOOST_FIXTURE_TEST_CASE(TestChain100Setup_has_valid_merkle_roots, MerkleFixture)
{
SelectParams("regtest");
BlockValidationState state;
CBlock block;
int MAX = 1000;
while(ChainActive().Height() < MAX) {
blockchain.CreateAndProcessBlock({}, cbKey);
}
for (int i = 0; i <= ChainActive().Height(); i++) {
CBlockIndex* index = ChainActive()[i];
BOOST_REQUIRE_MESSAGE(index != nullptr, "can not find block at given height");
BOOST_REQUIRE_MESSAGE(ReadBlockFromDisk(block, index, Params().GetConsensus()), "can not read block");
BOOST_CHECK_MESSAGE(VeriBlock::VerifyTopLevelMerkleRoot(block, index->pprev, state), strprintf("merkle root of block %d is invalid", i));
}
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "6c2d5b0141df14d72e3a20b057052c27b2ce78db", "size": 1947, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vbk/test/unit/vbk_merkle_tests.cpp", "max_stars_repo_name": "VeriBlock/b", "max_stars_repo_head_hexsha": "1c2dccb1f87251b72049b75cc4db630c4da1b5c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2020-05-14T11:49:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T19:54:54.000Z", "max_issues_repo_path": "src/vbk/test/unit/vbk_merkle_tests.cpp", "max_issues_repo_name": "VeriBlock/b", "max_issues_repo_head_hexsha": "1c2dccb1f87251b72049b75cc4db630c4da1b5c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 125.0, "max_issues_repo_issues_event_min_datetime": "2020-01-16T11:02:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T12:27:13.000Z", "max_forks_repo_path": "src/vbk/test/unit/vbk_merkle_tests.cpp", "max_forks_repo_name": "VeriBlock/b", "max_forks_repo_head_hexsha": "1c2dccb1f87251b72049b75cc4db630c4da1b5c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2020-04-06T14:31:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-30T07:50:29.000Z", "avg_line_length": 33.5689655172, "max_line_length": 145, "alphanum_fraction": 0.7365177196, "num_tokens": 545}
|
\section*{Introduction to Volume II}
\label{sec:introduction-2}
\addcontentsline{toc}{section}{\nameref{sec:introduction-2}}
\markboth{Introduction to Volume II}{Introduction to Volume II}
This report is submitted to the Attorney General pursuant to 28~C.F.R. \S~600.8(c), which states that, ``[a]t the conclusion of the Special Counsel's work, he \dots\ shall provide the Attorney General a confidential report explaining the prosecution or declination decisions [the Special Counsel] reached.''
Beginning in 2017, the President of the United States took a variety of actions towards the ongoing FBI investigation into Russia's interference in the 2016 presidential election and related matters that raised questions about whether he had obstructed justice.
The Order appointing the Special Counsel gave this Office jurisdiction to investigate matters that arose directly from the FBI's Russia investigation, including whether the President had obstructed justice in connection with Russia-related investigations.
The Special Counsel's jurisdiction also covered potentially obstructive acts related to the Special Counsel's investigation itself.
This Volume of our report summarizes our obstruction-of-justice investigation of the President.
We first describe the considerations that guided our obstruction-of-justice investigation, and then provide an overview of this Volume:
\textit{First}, a traditional prosecution or declination decision entails a binary determination to initiate or decline a prosecution, but we determined not to make a traditional prosecutorial judgment.
The Office of Legal Counsel (OLC) has issued an opinion finding that ``the indictment or criminal prosecution of a sitting President would impermissibly undermine the capacity of the executive branch to perform its constitutionally assigned functions'' in violation of ``the constitutional separation of powers.''% 1
\footnote{\textit{A Sitting President's Amenability to Indictment and Criminal Prosecution}, 24~Op.\ O.L.C. 222, [] 260~(2000) (OLC Op.).}
Given the role of the Special Counsel as an attorney in the Department of Justice and the framework of the Special Counsel regulations, \textit{see} 28~U.S.C. \S~515; 28~C.F.R. \S~600.7(a), this Office accepted OLC's legal conclusion for the purpose of exercising prosecutorial jurisdiction.
And apart from OLC's constitutional view, we recognized that a federal criminal accusation against a sitting President would place burdens on the President's capacity to govern and potentially preempt constitutional processes for addressing presidential misconduct.% 2
\footnote{\textit{See} \textsc{U.S. Const.\ Art.~I} \S~2, cl.~5; \S~3, cl.~6; \textit{cf.} OLC Op.\ at~257--258 (discussing relationship between impeachment and criminal prosecution of a sitting President).}
\textit{Second}, while the OLC opinion concludes that a sitting President may not be prosecuted, it recognizes that a criminal investigation during the President's term is permissible.% 3
\footnote{OLC Op.\ at~257 n.36 (``A grand jury could continue to gather evidence throughout the period of immunity'').}
The OLC opinion also recognizes that a President does not have immunity after he leaves office.% 4
\footnote{OLC Op.\ at~255 (``Recognizing an immunity from prosecution for a sitting President would not preclude such prosecution once the President's term is over or he is otherwise removed from office by resignation or impeachment'').}
And if individuals other than the President committed an obstruction offense, they may be prosecuted at this time.
Given those considerations, the facts known to us, and the strong public interest in safeguarding the integrity of the criminal justice system, we conducted a thorough factual investigation in order to preserve the evidence when memories were fresh and documentary materials were available.
\textit{Third}, we considered whether to evaluate the conduct we investigated under the Justice Manual standards governing prosecution and declination decisions, but we determined not to apply an approach that could potentially result in a judgment that the President committed crimes.
The threshold step under the Justice Manual standards is to assess whether a person's conduct
``constitutes a federal offense.''
U.S. Dep't of Justice, Justice Manual \S~9-27.220~(2018) (Justice Manual).
Fairness concerns counseled against potentially reaching that judgment when no charges can be brought.
The ordinary means for an individual to respond to an accusation is through a speedy and public trial, with all the procedural protections that surround a criminal case.
An individual who believes he was wrongly accused can use that process to seek to clear his name.
In contrast, a prosecutor's judgment that crimes were committed, but that no charges will be brought, affords no such adversarial opportunity for public name-clearing before an impartial adjudicator.% 5
\footnote{For that reason, criticisms have been lodged against the practice of naming unindicted co-conspirators in an indictment.
\textit{See United States~v.\ Briggs}, 514~F.2d 794, 802 (5th~Cir.~1975) (``The courts have struck down with strong language efforts by grand juries to accuse persons of crime while affording them no forum in which to vindicate themselves.'');
\textit{see also} Justice Manual \S~9-11.130.}
The concerns about the fairness of such a determination would be heightened in the case of a sitting President, where a federal prosecutor's accusation of a crime, even in an internal report, could carry consequences that extend beyond the realm of criminal justice.
OLC noted similar concerns about sealed indictments.
Even if an indictment were sealed during the President's term, OLC reasoned, ``it would be very difficult to preserve [an indictment's] secrecy,'' and if an indictment became public, ``[t]he stigma and opprobrium'' could imperil the President's ability to govern.''% 6
\footnote{OLC Op.\ at~259 \& n.38 (citation omitted).}
Although a prosecutor's internal report would not represent a formal public accusation akin to an indictment, the possibility of the report's public disclosure and the absence of a neutral adjudicatory forum to review its findings counseled against potentially determining ``that the person's conduct constitutes a federal offense.''
Justice Manual \S~9-27.220.
\textit{Fourth}, if we had confidence after a thorough investigation of the facts that the President clearly did not commit obstruction of justice, we would so state.
Based on the facts and the applicable legal standards, however, we are unable to reach that judgment.
The evidence we obtained about the President's actions and intent presents difficult issues that prevent us from
conclusively determining that no criminal conduct occurred.
Accordingly, while this report does not conclude that the President committed a crime, it also does not exonerate him.
\hr
This report on our investigation consists of four parts.
\hyperlink{section.2.1}{Section~I} provides an overview of obstruction-of-justice principles and summarizes certain investigatory and evidentiary considerations.
\hyperlink{section.2.2}{Section~II} sets forth the factual results of our obstruction investigation and analyzes the evidence.
\hyperlink{section.2.3}{Section~III} addresses statutory and constitutional defenses.
\hyperlink{section.2.4}{Section~IV} states our conclusion.
|
{"hexsha": "26213f001e5024c61c70508eb0fb9d9223f53d0c", "size": 7416, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/volume-2/introduction.tex", "max_stars_repo_name": "ascherer/mueller-report", "max_stars_repo_head_hexsha": "3aa16a20104f48623ce8e12c8502ecb1867a40f8", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2019-04-20T13:29:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T13:32:17.000Z", "max_issues_repo_path": "src/volume-2/introduction.tex", "max_issues_repo_name": "ascherer/mueller-report", "max_issues_repo_head_hexsha": "3aa16a20104f48623ce8e12c8502ecb1867a40f8", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2019-04-20T13:38:54.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-02T03:01:19.000Z", "max_forks_repo_path": "src/volume-2/introduction.tex", "max_forks_repo_name": "mds08011/multi-publish", "max_forks_repo_head_hexsha": "d8fac96fa2d04aa31516d7079533b20703d8dfee", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-04-20T21:02:20.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-10T19:38:44.000Z", "avg_line_length": 121.5737704918, "max_line_length": 333, "alphanum_fraction": 0.8077130529, "num_tokens": 1564}
|
using JSON
using BytePairEncoding
using BytePairEncoding: UnMap
using Transformers.Basic
abstract type GPT2 <: PretrainedTokenizer end
# wrapper for GPT2 Tokenizer with required functionalities
"""
struct GPT2Tokenizer <: GPT2
encoder::Vocabulary{String}
bpe_encode::GenericBPE
bpe_decode::UnMap
vocab::Dict{String, Any}
unk_token::String
unk_id::Int
eos_token::String
eos_token_id::Int
pad_token::String
pad_token_id::Int
end
Structure to hold all essential information / functions for GPT2 tokenizer
"""
struct GPT2Tokenizer <: GPT2
encoder::Vocabulary{String}
bpe_encode::GenericBPE
bpe_decode::UnMap
vocab::Dict{String, Any}
unk_token::String
unk_id::Int
eos_token::String
eos_token_id::Int
pad_token::String
pad_token_id::Int
end
"""
load_pretrained_tokenizer(ty::Type{T}; unk_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token="<|endoftext|>") where T<:PretrainedTokenizer
Load GPT2 tokenizer using Datadeps for pretrained bpe and vocab. Returns tokenizer as `GPT2Tokenizer` structure.
"""
function load_pretrained_tokenizer(ty::Type{T}; unk_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token="<|endoftext|>") where T<:PretrainedTokenizer
path_bpe = joinpath(datadep"BPE", readdir(datadep"BPE")[1])
path_vocab = joinpath(datadep"Vocab", readdir(datadep"Vocab")[1])
load_pretrained_tokenizer(path_bpe, path_vocab, unk_token, eos_token, pad_token)
end
"""
load_pretrained_tokenizer(path_bpe, path_vocab, unk_token, eos_token, pad_token)
Load pretrained tokenizer for GPT2 from provided bpe and vocab file path. Initialises `unk_token`, `eos_token`, `pad_token` as provided with the function. Returns tokenizer as `GPT2Tokenizer` structure.
"""
function load_pretrained_tokenizer(path_bpe, path_vocab, unk_token, eos_token, pad_token)
vocab = JSON.parsefile(path_vocab)
labels = map(x->x[1], sort!(collect(pairs(vocab)), by=x->x[2]))
encoder = Vocabulary(labels, unk_token)
bpe_encode = ByteLevelBPE(path_bpe)
bpe_decode = BytePairEncoding.UnMap(bpe_encode.codemap)
GPT2Tokenizer(encoder, bpe_encode, bpe_decode, vocab, unk_token, encoder(unk_token),
eos_token, encoder(eos_token), pad_token, encoder(eos_token))
end
"""
tokenize(t::GPT2Tokenizer, text::AbstractString)
Function to tokenize given `text` with tokenizer bpe encoder (`t.bpe_encode`). Returns a string vector of tokens.
"""
function tokenize(t::GPT2Tokenizer, text::AbstractString)
t.bpe_encode(text)
end
"""
encode(t::GPT2Tokenizer, text::AbstractString; add_prefix_space=false)
Returns the encoded vector of tokens (mapping from vocab of Tokenizer) for `text`. If `add_prefix_space`=true, add space at the start of 'text' before tokenization.
# Example
For single text:
```julia
encode(tokenizer, text)
```
For vector of text:
```julia
map(x->encode(tokenizer, x), text_vector)
```
"""
function encode(t::GPT2Tokenizer, text::AbstractString; add_prefix_space=false)
if add_prefix_space==true
text = string(" ", text)
end
tokens = tokenize(t, text)
t.encoder(tokens)
end
"""
encode(t::GPT2Tokenizer, tokens::Vector{String})
Function to encode tokens vectors to their integer mapping from vocab of tokenizer.
"""
function encode(t::GPT2Tokenizer, tokens::Vector{String})
t.encoder(tokens)
end
"""
(t::GPT2Tokenizer)(text::AbstractString; add_prefix_space=false)
Encode the text with tokenizer and returns the encoded vector. If `add_prefix_space`=true, add space at the start of 'text' before tokenization.
# Examples:
For a single text:
```julia
tokenizer(text; add_prefix_space=true)
```
For vector of texts, use:
```julia
map(x->encode(tokenizer, x), text_vector)
# or
tokenizer.(text_vector)
```
Also checkout [`encode`](@ref PPLM.encode)
"""
function (t::GPT2Tokenizer)(text::AbstractString; add_prefix_space=false)
encode(t, text; add_prefix_space=add_prefix_space)
end
decode(vocab::Vocabulary{T}, i::Int) where T = 0 <= i <= length(vocab) ? vocab.list[i] : vocab.unk
"""
decode(vocab::Vocabulary{T}, is::Vector{Int}) where T
Return decoded vector of `string` tokens from the indices vector `is`, using the vocab.
"""
function decode(vocab::Vocabulary{T}, is::Vector{Int}) where T
tokens = Vector{String}(undef, length(is))
for (idx, i) ∈ enumerate(is)
token = decode(vocab, i)
tokens[idx] = token
end
tokens
end
"""
decode(t::GPT2Tokenizer, tokens_ids::Vector{Int})
Return decoded vector of `string` tokens from the indices vector `tokens_ids`, using the tokenizer `t` encoder .
"""
function decode(t::GPT2Tokenizer, tokens_ids::Vector{Int})
decode(t.encoder, tokens_ids)
end
"""
detokenize(t::GPT2Tokenizer, tokens::Vector{String})
BPE Decode the vector of strings, using the tokenizer `t`.
"""
function detokenize(t::GPT2Tokenizer, tokens::Vector{String})
t.bpe_decode(join(tokens))
end
# Example for detokenizing multiple examples: map(token_ids->detokenize(tokenizer, token_ids), tokens)
"""
detokenize(t::GPT2Tokenizer, tokens_ids::Vector{Int})
Decode and Detokenize the vector of indices `token_ids`. Returns the final sentence after detokenization.
# Example
For single vector of token_ids:
```julia
detokenize(tokenizer, token_ids)
```
For vector of vector of `token_ids`, use:
``` julia
map(x->decode(tokenizer, x), tokens_id_vector_of_vector)
```
"""
function detokenize(t::GPT2Tokenizer, tokens_ids::Vector{Int})
tokens_list = decode(t, tokens_ids)
detokenize(t, tokens_list)
end
|
{"hexsha": "7d3c40beef169a1fc4eb33a7fdb74b02b240532e", "size": 5595, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/tokenizer.jl", "max_stars_repo_name": "AdarshKumar712/PPLM.jl", "max_stars_repo_head_hexsha": "0b8e2a202b05fad450a49cb8a114f78216f798f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-28T08:35:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-27T08:43:30.000Z", "max_issues_repo_path": "src/tokenizer.jl", "max_issues_repo_name": "AdarshKumar712/PPLM.jl", "max_issues_repo_head_hexsha": "0b8e2a202b05fad450a49cb8a114f78216f798f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-30T12:50:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-18T09:04:57.000Z", "max_forks_repo_path": "src/tokenizer.jl", "max_forks_repo_name": "AdarshKumar712/PPLM.jl", "max_forks_repo_head_hexsha": "0b8e2a202b05fad450a49cb8a114f78216f798f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.975, "max_line_length": 202, "alphanum_fraction": 0.7285075961, "num_tokens": 1487}
|
[STATEMENT]
lemma minus_mat_limit:
fixes X :: "nat \<Rightarrow> complex mat" and A :: "complex mat" and m :: nat and B :: "complex mat"
assumes dimB: "B \<in> carrier_mat m m" and limX: "limit_mat X A m"
shows "limit_mat (mat_seq_minus X B) (A - B) m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. limit_mat (mat_seq_minus X B) (A - B) m
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. limit_mat (mat_seq_minus X B) (A - B) m
[PROOF STEP]
have dimXAB: "\<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m
[PROOF STEP]
using index_minus_mat dimB
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?i < dim_row ?B; ?j < dim_col ?B\<rbrakk> \<Longrightarrow> (?A - ?B) $$ (?i, ?j) = ?A $$ (?i, ?j) - ?B $$ (?i, ?j)
dim_row (?A - ?B) = dim_row ?B
dim_col (?A - ?B) = dim_col ?B
B \<in> carrier_mat m m
goal (1 subgoal):
1. \<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m
goal (1 subgoal):
1. limit_mat (mat_seq_minus X B) (A - B) m
[PROOF STEP]
have "(\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)" if i: "i < m" and j: "j < m" for i j
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
[PROOF STEP]
from limX i j
[PROOF STATE]
proof (chain)
picking this:
limit_mat X A m
i < m
j < m
[PROOF STEP]
have "(\<lambda>n. (X n) $$ (i, j)) \<longlonglongrightarrow> (A) $$ (i, j)"
[PROOF STATE]
proof (prove)
using this:
limit_mat X A m
i < m
j < m
goal (1 subgoal):
1. (\<lambda>n. X n $$ (i, j)) \<longlonglongrightarrow> A $$ (i, j)
[PROOF STEP]
unfolding limit_mat_def
[PROOF STATE]
proof (prove)
using this:
\<forall>n. X n \<in> carrier_mat m m \<and> A \<in> carrier_mat m m \<and> (\<forall>i<m. \<forall>j<m. (\<lambda>n. X n $$ (i, j)) \<longlonglongrightarrow> A $$ (i, j))
i < m
j < m
goal (1 subgoal):
1. (\<lambda>n. X n $$ (i, j)) \<longlonglongrightarrow> A $$ (i, j)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<lambda>n. X n $$ (i, j)) \<longlonglongrightarrow> A $$ (i, j)
goal (1 subgoal):
1. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>n. X n $$ (i, j)) \<longlonglongrightarrow> A $$ (i, j)
[PROOF STEP]
have X: "\<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>n. X n $$ (i, j)) \<longlonglongrightarrow> A $$ (i, j)
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
[PROOF STEP]
unfolding LIMSEQ_def
[PROOF STATE]
proof (prove)
using this:
\<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
goal (1 subgoal):
1. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
[PROOF STEP]
have XB: "\<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r" if r: "r > 0" for r
[PROOF STATE]
proof (prove)
using this:
\<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
goal (1 subgoal):
1. \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
obtain no where "\<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using r X
[PROOF STATE]
proof (prove)
using this:
0 < r
\<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
goal (1 subgoal):
1. (\<And>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
[PROOF STEP]
have dist: "\<forall>n\<ge>no. norm (X n $$ (i, j) - A $$ (i, j)) < r"
[PROOF STATE]
proof (prove)
using this:
\<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r
goal (1 subgoal):
1. \<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
[PROOF STEP]
unfolding dist_norm
[PROOF STATE]
proof (prove)
using this:
\<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
goal (1 subgoal):
1. \<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
[PROOF STEP]
have "norm ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r" if n: "n \<ge> no" for n
[PROOF STATE]
proof (prove)
using this:
\<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
goal (1 subgoal):
1. cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r \<Longrightarrow> cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
have "(X n - B) $$ (i, j) - (A - B) $$ (i, j) = (X n) $$ (i, j) - A $$ (i, j)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (X n - B) $$ (i, j) - (A - B) $$ (i, j) = X n $$ (i, j) - A $$ (i, j)
[PROOF STEP]
using dimB i j
[PROOF STATE]
proof (prove)
using this:
B \<in> carrier_mat m m
i < m
j < m
goal (1 subgoal):
1. (X n - B) $$ (i, j) - (A - B) $$ (i, j) = X n $$ (i, j) - A $$ (i, j)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(X n - B) $$ (i, j) - (A - B) $$ (i, j) = X n $$ (i, j) - A $$ (i, j)
goal (1 subgoal):
1. \<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r \<Longrightarrow> cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(X n - B) $$ (i, j) - (A - B) $$ (i, j) = X n $$ (i, j) - A $$ (i, j)
[PROOF STEP]
have "norm ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) = norm ((X n) $$ (i, j) - A $$ (i, j))"
[PROOF STATE]
proof (prove)
using this:
(X n - B) $$ (i, j) - (A - B) $$ (i, j) = X n $$ (i, j) - A $$ (i, j)
goal (1 subgoal):
1. cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) = cmod (X n $$ (i, j) - A $$ (i, j))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) = cmod (X n $$ (i, j) - A $$ (i, j))
goal (1 subgoal):
1. \<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r \<Longrightarrow> cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) = cmod (X n $$ (i, j) - A $$ (i, j))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) = cmod (X n $$ (i, j) - A $$ (i, j))
goal (1 subgoal):
1. cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
using dist n
[PROOF STATE]
proof (prove)
using this:
cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) = cmod (X n $$ (i, j) - A $$ (i, j))
\<forall>n\<ge>no. cmod (X n $$ (i, j) - A $$ (i, j)) < r
no \<le> n
goal (1 subgoal):
1. cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cmod ((X n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
no \<le> ?n \<Longrightarrow> cmod ((X ?n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist (X n $$ (i, j)) (A $$ (i, j)) < r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
no \<le> ?n \<Longrightarrow> cmod ((X ?n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
no \<le> ?n \<Longrightarrow> cmod ((X ?n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
goal (1 subgoal):
1. \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
using dist_norm
[PROOF STATE]
proof (prove)
using this:
no \<le> ?n \<Longrightarrow> cmod ((X ?n - B) $$ (i, j) - (A - B) $$ (i, j)) < r
dist ?x ?y = norm (?x - ?y)
goal (1 subgoal):
1. \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
0 < ?r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < ?r
goal (1 subgoal):
1. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < ?r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < ?r
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 < ?r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < ?r
goal (1 subgoal):
1. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
[PROOF STEP]
unfolding LIMSEQ_def
[PROOF STATE]
proof (prove)
using this:
0 < ?r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < ?r
goal (1 subgoal):
1. \<forall>r>0. \<exists>no. \<forall>n\<ge>no. dist ((X n - B) $$ (i, j)) ((A - B) $$ (i, j)) < r
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<lbrakk>?i < m; ?j < m\<rbrakk> \<Longrightarrow> (\<lambda>n. (X n - B) $$ (?i, ?j)) \<longlonglongrightarrow> (A - B) $$ (?i, ?j)
goal (1 subgoal):
1. limit_mat (mat_seq_minus X B) (A - B) m
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?i < m; ?j < m\<rbrakk> \<Longrightarrow> (\<lambda>n. (X n - B) $$ (?i, ?j)) \<longlonglongrightarrow> (A - B) $$ (?i, ?j)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?i < m; ?j < m\<rbrakk> \<Longrightarrow> (\<lambda>n. (X n - B) $$ (?i, ?j)) \<longlonglongrightarrow> (A - B) $$ (?i, ?j)
goal (1 subgoal):
1. limit_mat (mat_seq_minus X B) (A - B) m
[PROOF STEP]
unfolding limit_mat_def mat_seq_minus_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?i < m; ?j < m\<rbrakk> \<Longrightarrow> (\<lambda>n. (X n - B) $$ (?i, ?j)) \<longlonglongrightarrow> (A - B) $$ (?i, ?j)
goal (1 subgoal):
1. \<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m \<and> (\<forall>i<m. \<forall>j<m. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j))
[PROOF STEP]
using dimXAB
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?i < m; ?j < m\<rbrakk> \<Longrightarrow> (\<lambda>n. (X n - B) $$ (?i, ?j)) \<longlonglongrightarrow> (A - B) $$ (?i, ?j)
\<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m
goal (1 subgoal):
1. \<forall>n. X n - B \<in> carrier_mat m m \<and> A - B \<in> carrier_mat m m \<and> (\<forall>i<m. \<forall>j<m. (\<lambda>n. (X n - B) $$ (i, j)) \<longlonglongrightarrow> (A - B) $$ (i, j))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
limit_mat (mat_seq_minus X B) (A - B) m
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6155, "file": "QHLProver_Matrix_Limit", "length": 54}
|
import numpy as np
import numpy.testing as npt
import pandas as pd
from ioos_qartod.qc_tests import qc
# from ioos_qartod.qc_tests.qc import QCFlags
import quantities as pq
import unittest
class QartodQcTest(unittest.TestCase):
def test_lon_lat_bbox(self):
"""
Ensure that longitudes and latitudes are within reasonable bounds.
"""
lon = np.array([80.0, -78.5, 500.500])
lat = np.array([np.NaN, 50, -60])
npt.assert_array_equal(qc.location_set_check(lon, lat),
np.array([4, 1, 4]))
def test_distance_threshold(self):
"""
Tests a user defined distance threshold between successive points.
"""
lon = np.array([-71.05, -71.06, -80.0])
lat = np.array([41.0, 41.02, 45.05])
npt.assert_array_equal(qc.location_set_check(lon, lat,
range_max=3000.0),
np.array([1, 1, 3]))
def test_range_check_mixed(self):
"""See if user and sensor ranges are picked up."""
sensor_span = (10, 50)
user_span = (20, 40)
vals = np.array([5, 10, # Sensor range.
15, 20, # User range.
25, 30, 35, # Valid.
40, 45, # User range.
51]) # Sensor range.
npt.assert_array_equal(qc.range_check(vals, sensor_span, user_span),
np.array([4, 3, 3, 1, 1, 1, 1, 1, 3, 4]))
def test_climatology_check(self):
# 14 vals - 2010-01-03 to 2010-04-04,
dates = pd.date_range('2010-01-01', '2010-04-10', freq='W')
# Monthly values.
monthly_clim = {1: (6.0, 10), 2: (1.4, 6.4), 3: (4.2, 13.0)}
ts = pd.Series(np.array([12.1, 9.0, 1.3, 6.2, 9.9, # Jan
1.6, 2.0, 9.0, 4.0, # Feb
5.0, 5.5, 10.6, 16.0, # Mar
17.2 # Apr
]), dates)
expected = np.array([3, 1, 3, 1, 1, # Jan
1, 1, 3, 1, # Feb
1, 1, 1, 3, # Mar
2], # Apr should be unknown as w/o clim value.
dtype='i4')
results = qc.climatology_check(ts, monthly_clim, lambda t: t.month)
npt.assert_array_equal(results, expected)
def test_overlapping_threshold_ranges(self):
"""
Test to see if overlapping sensor and user ranges will throw an
exception.
"""
vals = np.array([50, 40, 20, 29])
sensor_span = (10, 30)
user_span = (20, 40)
self.assertRaises(ValueError, qc.range_check, vals, sensor_span,
user_span)
def test_bad_extent(self):
"""Tests to make sure invalid extents can't be passed in."""
vals = np.array([2, 1.0, 3.4, 5.6])
sensor_span = (1,)
user_span = np.array([[50, 60], [30, 90]])
self.assertRaises(ValueError, qc.range_check, vals, sensor_span,
user_span)
def test_spike_detection(self):
"""
Test to make ensure single value spike detection works properly.
"""
low_thresh, high_thresh = 25, 50
arr = np.array([10, 12, 999.99, 13, 15, 40, 9, 9])
times = np.array([1, 2, 3, 4, 5, 6, 7, 8]) # Contiguous time array
# First and last elements should always be good data, unless someone
# has set a threshold to zero.
expected = [1, 4, 4, 4, 1, 3, 1, 1]
npt.assert_array_equal(qc.spike_check(times, arr, low_thresh, high_thresh),
expected)
def test_spike_detection_with_time_gaps(self):
"""
Test to make ensure single value spike detection works properly with
large time gaps
"""
low_thresh, high_thresh = 25, 50
arr = np.array([10, 12, 200, 190, 180, 170, 160, 150])
times = np.array([1, 2, 13, 14, 15, 16, 17, 18]) # Adds a gap
# First and last elements should always be good data, unless someone
# has set a threshold to zero.
expected = [1, 3, 3, 1, 1, 1, 1, 1]
npt.assert_array_equal(qc.spike_check(times, arr, low_thresh, high_thresh),
expected)
def test_spike_invalid_threshold_raises_value_error(self):
"""Test that invalid ranges cause an exception to be raised."""
low_thresh, high_thresh = 50, 50
arr = np.array([10, 12, 999.99, 13, 15, 40, 9, 9])
times = np.array([1, 2, 3, 4, 5, 6, 7, 8]) # Contiguous time array
self.assertRaises(ValueError, qc.spike_check, times, arr, low_thresh,
high_thresh)
def test_rate_of_change(self):
"""
Test the rate of change with default (seconds) along with hourly
rate of change.
"""
times = np.arange('2015-01-01 00:00:00', '2015-01-01 00:00:12',
step=np.timedelta64(1, 's'), dtype=np.datetime64)
arr = np.array([2, 10, 2.1, 3, 4, 5, 7, 10, 0, 2, 2.2, 2])
prev_qc = np.array([3])
thresh_val = 5
expected = np.array([3, 3, 3, 1, 1, 1, 1, 1, 3, 1, 1, 1])
result = qc.rate_of_change_check(times, arr, thresh_val, prev_qc)
npt.assert_array_equal(expected, result)
# Now try roughly the same test with 12 hours instead of 12 seconds
# and with hourly rate of change specified.
times_hr = np.arange('2015-01-01 00:00:00', '2015-01-01 12:00:00',
step=np.timedelta64(1, 'h'), dtype=np.datetime64)
thresh_val_hr = 5 / pq.hour
result_hr = qc.rate_of_change_check(times_hr, arr, thresh_val_hr,
prev_qc)
npt.assert_array_equal(expected, result_hr)
times = np.array([], dtype=np.datetime64)
arr = np.array([])
expected = np.array([])
result = qc.rate_of_change_check(times, arr, thresh_val)
npt.assert_array_equal(result, expected)
def test_flat_line_check(self):
"""Make sure flat line check returns expected flag values."""
low_thresh = 3
high_thresh = 5
eps = 0.01
vals = np.array([1, 2, 2.0001, 2, 2.0001, 2, 2.0001, 2,
4, 5, 3, 3.0001, 3.0005, 3.00001])
npt.assert_array_equal(qc.flat_line_check(vals, low_thresh,
high_thresh, eps),
[1, 1, 1, 1, 3, 3, 4, 4, 1, 1, 1, 1, 1, 3])
# test empty array - should return empty result
arr = np.array([])
expected = np.array([])
result = qc.flat_line_check(arr, low_thresh, high_thresh, eps)
npt.assert_array_equal(expected, result)
def test_time_series_flat_line_check(self):
"""
Make sure time series flat line check returns expected flag values.
"""
# Using the default values for low_reps and high_thresh.
eps = 0.01
vals = np.array([1, 2, 2.0001, 2, 2.0001, 2, 2.0001, 2,
4, 5, 3, 3.0001, 3.0005, 3.00001])
res = qc.time_series_flat_line_check(vals, eps=eps)
npt.assert_array_equal(res, [1, 1, 1, 1, 3, 3, 4, 4, 1, 1, 1, 1, 1, 3])
def test_bad_reps(self):
"""
Test that low_reps >= high_reps raises an error in flat line check.
"""
self.assertRaises(ValueError, qc.flat_line_check, np.ones(12), 10, 6,
0.01)
def test_float_reps_raises_exception(self):
"""
Check that non-integer values for repetitions raises a TypeError
in flat line check.
"""
self.assertRaises(TypeError, qc.flat_line_check, np.ones(12),
4.5, 6.93892, 0.01)
def test_attenuated_signal_check(self):
signal = np.array([1.01, 1.02, 1.01, 1.01, 1.02, 1.03, 1.01,
1.0, 1.0, 1.0, 1.02, 1.01])
# Half hour increments.
times = np.arange('2005-02-01T00:00Z', '2005-02-01T06:00Z',
dtype='datetime64[30m]')
time_range = (np.datetime64('2005-02-01T01:30Z'),
np.datetime64('2005-02-01T04:30Z'))
min_var_fail = 0.5
min_var_warn = 0.7
flags = qc.attenuated_signal_check(signal, times, min_var_warn,
min_var_fail, time_range)
npt.assert_array_equal(flags,
np.array([2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 2, 2]))
def test_attenuated_signal_check_range(self):
"""
Test a time segment for an attenuated signal, comparing against
range.
"""
signal = np.array([3, 4, 5, 8.1, 9, 8.5, 8.7, 8.4, 8.2, 8.35, 2, 1])
# Half hour increments.
times = np.arange('2005-02-01T00:00Z', '2005-02-01T06:00Z',
dtype='datetime64[30m]')
time_range = (np.datetime64('2005-02-01T01:30Z'),
np.datetime64('2005-02-01T04:30Z'))
min_var_fail = 0.1
min_var_warn = 0.15
flags = qc.attenuated_signal_check(signal, times, min_var_warn,
min_var_fail, time_range,
check_type='range')
npt.assert_array_equal(flags, np.array([2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
2, 2]))
def test_qc_compare(self):
"""
Tests that the compare function works as intended.
"""
range_flags = np.array([1, 1, 1, 9, 1, 1, 9, 9])
spike_flags = np.array([2, 1, 1, 1, 1, 1, 9, 9])
grdtn_flags = np.array([1, 3, 3, 4, 3, 1, 2, 9])
primary_flags = qc.qc_compare([range_flags, spike_flags, grdtn_flags])
np.testing.assert_array_equal(primary_flags,
np.array([1, 3, 3, 4, 3, 1, 2, 9]))
|
{"hexsha": "b1520b64f6070d1b43e453e3e3bc8d3cbcf593e3", "size": 10069, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_qartod_qc.py", "max_stars_repo_name": "ioos/qartod", "max_stars_repo_head_hexsha": "eb4f1962836eec6f9ec93e56b54f5832f9b47e4a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-10-29T07:50:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-07T21:08:34.000Z", "max_issues_repo_path": "test/test_qartod_qc.py", "max_issues_repo_name": "ioos/qartod", "max_issues_repo_head_hexsha": "eb4f1962836eec6f9ec93e56b54f5832f9b47e4a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2015-07-12T01:14:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T13:15:30.000Z", "max_forks_repo_path": "test/test_qartod_qc.py", "max_forks_repo_name": "ioos/qartod", "max_forks_repo_head_hexsha": "eb4f1962836eec6f9ec93e56b54f5832f9b47e4a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2015-07-12T01:11:22.000Z", "max_forks_repo_forks_event_max_datetime": "2017-11-08T20:13:59.000Z", "avg_line_length": 43.7782608696, "max_line_length": 83, "alphanum_fraction": 0.5263680604, "include": true, "reason": "import numpy", "num_tokens": 2977}
|
# Class for storing data for solving with Ising methods.
# 2015-04-30
from __future__ import division
import numpy as np
from misc_fcns import *
import workspace.utils as ws
from scipy.spatial.distance import squareform
import entropy.entropy as entropy
import fast
import itertools
class Data():
"""
Class for keeping the data and any notes.
2015-05-05
"""
def __init__(self,binary,mask=None,notes=''):
self.binary = binary # binary data {0,1}
self.sym = 2*self.binary-1 # symmetrized formulation of binary data
self.mask = mask
self.notes = notes
self.N = self.binary.shape[1]
#self.oData = # original form of data
self.si,self.sisj = entropy.calc_sisj(self.binary)
return
def get_binary(self,sym=False):
if sym:
return self.binary*2-1
else:
return self.binary
def get_correl():
"""Return all correlations as list."""
return [self.si,self.sisj]
class IsingModel():
"""
Given details in {0,1} formulation, makes retrieval of basic quantities easy.
2015-07-12
"""
def __init__(self,N,J,data,correls=None):
import importlib
assert 11>N>0
assert len(J)==(N*(N-1)/2+N)
tosolve = importlib.import_module('tosolve01.tosolve%d'%N)
self.N = N
self.J = J # all parameters. bias field first
self.data = data
self.binStates = entropy.bin_states(self.N)
self.E = np.array([fast.calc_e( self.J, s[None,:] ) for s in entropy.xbin_states(self.N)]).ravel()
if N<11 and correls is None:
self.correls = tosolve.get_stats(self.J)
self.h1,self.J1 = self.convert_params( self.hi('0'),self.Jij('0'),convertTo='11' )
def si(self,form='1'):
if form=='1':
return self.si(form='0')*2-1.
return self.correls[:self.N]
def sisj(self,form='1'):
if form=='1':
return entropy.convert_sisj( self.sisj(form='0'), self.si(form='0'), '11' )
return self.correls[self.N:]
def hi(self,form='1'):
if form=='1':
return self.h1
return self.J[:self.N]
def Jij(self,form='1'):
if form=='1':
return self.J1
return self.J[self.N:]
def identify_basins(self):
"""
Find all the minima in the energy landscape
2015-08-19
"""
dMat = np.zeros((2**self.N,2**self.N))
for (i,j) in itertools.combinations(range(2**self.N),2):
dMat[i,j] = np.sum(np.abs(self.binStates[i].astype(float)-self.binStates[j]))
dMat += dMat.T
singleIx = dMat==1
ix = np.zeros((2**self.N))
for i,s in enumerate(entropy.bin_states(self.N,sym=True)):
dE = self.E[dMat[i]==1] - self.E[i]
if np.all(dE>0):
ix[i] = 1
basinsIx = ix==1
return self.binStates[basinsIx]
def find_basin(self,s):
"""
Return energy basins for given state.
"""
atMin = False
neighborEnergies = np.zeros((self.N))
currentState = s.copy()
currentEnergy = self.E[ np.sum(currentState[None,:]==self.binStates,1)==self.N ]
while not atMin:
neighborEnergies = self.neighbor_energies(currentState)
dE = neighborEnergies - currentEnergy
if np.any( dE<0 ):
ix = dE.argmin()
currentState[ix] = 1 - currentState[ix]
currentEnergy = neighborEnergies[ix]
else:
atMin = True
return (np.sum(currentState==self.binStates,1)==self.N).nonzero()[0]
def neighbor_energies(self,currentState):
neighborEnergies = np.zeros((self.N))
for i in range(self.N):
neighborState = currentState.copy()
neighborState[i] = 1 - neighborState[i]
neighborEnergies[i] = self.E[ self.state_ix(neighborState).nonzero()[0] ]
return neighborEnergies
def state_ix(self,s):
"""Return the index of the state.
2015-07-12"""
if s.ndim==1:
return np.sum( s[None,:]==self.binStates,1 )==self.N
else:
return np.sum( s==self.binStates,1 )==self.N
@staticmethod
def convert_params(h,J,convertTo='01'):
"""
Convert parameters from 0,1 formulation to +/-1 and vice versa.
2014-05-12
"""
from entropy.entropy import squareform
if len(J.shape)!=2:
Jmat = squareform(J)
else:
Jmat = J
J = squareform(J)
if convertTo=='11':
# Convert from 0,1 to -/+1
Jp = J/4.
hp = h/2 + np.sum(Jmat,1)/4.
elif convertTo=='01':
# Convert from -/+1 to 0,1
hp = 2.*(h - np.sum(Jmat,1))
Jp = J*4.
return hp,Jp
@staticmethod
def resort_couplings(J,sortIx):
"""
Reorder given couplings into a desired order.
2015-07-12
Params:
-------
J (ndarray)
vector of length N*(N-1)/2
sortIx (ndarray)
"""
return
def cij(self):
"""2015-07-12"""
sisj = self.sisj()
si = self.si()
cij = []
k = 0
for i in xrange(self.N-1):
for j in xrange(i+1,self.N):
cij.append( sisj[k] - si[i]*si[j] )
k += 1
return np.array(cij)
|
{"hexsha": "484014711477ef756f532245d3b00f8b67f2f193", "size": 5579, "ext": "py", "lang": "Python", "max_stars_repo_path": "coniii/ising.py", "max_stars_repo_name": "bcdaniels/coniii", "max_stars_repo_head_hexsha": "50218dc571135dd08b441361da33fed64a8eebc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-01-26T09:52:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-02T13:34:53.000Z", "max_issues_repo_path": "coniii/ising.py", "max_issues_repo_name": "bcdaniels/coniii", "max_issues_repo_head_hexsha": "50218dc571135dd08b441361da33fed64a8eebc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2017-04-19T17:05:50.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-20T20:54:06.000Z", "max_forks_repo_path": "coniii/ising.py", "max_forks_repo_name": "bcdaniels/coniii", "max_forks_repo_head_hexsha": "50218dc571135dd08b441361da33fed64a8eebc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-04-19T16:58:05.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-22T19:14:04.000Z", "avg_line_length": 29.8342245989, "max_line_length": 106, "alphanum_fraction": 0.5350421222, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1472}
|
import numpy as np
import tensorflow as tf
from loss import yolo3_loss
from anchors import compute_normalized_anchors
from layers import cnn_block, csp_block, scale_prediction
from tensorflow.keras.layers import Concatenate, MaxPool2D, UpSampling2D, Input, Lambda
from configs.train_config import NUM_CLASSES, MAX_NUM_BBOXES, SCORE_THRESHOLD, USE_CUSTOM_ANCHORS, loss_params
def csp_darknet53(input_shape):
inputs = tf.keras.Input(shape=input_shape)
x = cnn_block(inputs, num_filters=32, kernel_size=3, strides=1, activation="mish")
x = cnn_block(
x,
num_filters=64,
kernel_size=3,
strides=2,
zero_padding=True,
padding="valid",
activation="mish",
)
route = cnn_block(x, num_filters=64, kernel_size=1, strides=1, activation="mish")
shortcut = cnn_block(x, num_filters=64, kernel_size=1, strides=1, activation="mish")
x = cnn_block(shortcut, num_filters=32, kernel_size=1, strides=1, activation="mish")
x = cnn_block(x, num_filters=64, kernel_size=3, strides=1, activation="mish")
x = x + shortcut
x = cnn_block(x, num_filters=64, kernel_size=1, strides=1, activation="mish")
x = Concatenate()([x, route])
x = cnn_block(x, num_filters=64, kernel_size=1, strides=1, activation="mish")
x = csp_block(x, filters=128, num_blocks=2)
output_1 = csp_block(x, filters=256, num_blocks=8)
output_2 = csp_block(output_1, filters=512, num_blocks=8)
output_3 = csp_block(output_2, filters=1024, num_blocks=4)
return tf.keras.Model(inputs, [output_1, output_2, output_3], name="CSPDarknet53")
def yolov4_neck(input_shapes):
input_1 = tf.keras.Input(shape=filter(None, input_shapes[0]))
input_2 = tf.keras.Input(shape=filter(None, input_shapes[1]))
input_3 = tf.keras.Input(shape=filter(None, input_shapes[2]))
x = cnn_block(input_3, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=1024, kernel_size=3, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu")
maxpool_1 = MaxPool2D((5, 5), strides=1, padding="same")(x)
maxpool_2 = MaxPool2D((9, 9), strides=1, padding="same")(x)
maxpool_3 = MaxPool2D((13, 13), strides=1, padding="same")(x)
spp = Concatenate()([maxpool_3, maxpool_2, maxpool_1, x])
x = cnn_block(spp, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=1024, kernel_size=3, strides=1, activation="leaky_relu")
output_3 = cnn_block(
x, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu"
)
x = cnn_block(
output_3, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu"
)
upsampled = UpSampling2D()(x)
x = cnn_block(input_2, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu")
x = Concatenate()([x, upsampled])
x = cnn_block(x, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=3, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=3, strides=1, activation="leaky_relu")
output_2 = cnn_block(
x, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu"
)
x = cnn_block(
output_2, num_filters=128, kernel_size=1, strides=1, activation="leaky_relu"
)
upsampled = UpSampling2D()(x)
x = cnn_block(input_1, num_filters=128, kernel_size=1, strides=1, activation="leaky_relu")
x = tf.keras.layers.Concatenate()([x, upsampled])
x = cnn_block(x, num_filters=128, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=256, kernel_size=3, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=128, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=256, kernel_size=3, strides=1, activation="leaky_relu")
output_1 = cnn_block(
x, num_filters=128, kernel_size=1, strides=1, activation="leaky_relu"
)
return tf.keras.Model(
[input_1, input_2, input_3], [output_1, output_2, output_3], name="YOLOv4_neck"
)
def yolov3_head(
input_shapes,
anchors,
num_classes):
input_1 = tf.keras.Input(shape=filter(None, input_shapes[0]))
input_2 = tf.keras.Input(shape=filter(None, input_shapes[1]))
input_3 = tf.keras.Input(shape=filter(None, input_shapes[2]))
x = cnn_block(input_1, num_filters=256, kernel_size=3, strides=1, activation="leaky_relu")
output_1 = scale_prediction(
x, num_anchors_stage=len(anchors[0]), num_classes=num_classes, num=93
)
x = cnn_block(
input_1,
num_filters=256,
kernel_size=3,
strides=2,
zero_padding=True,
padding="valid",
activation="leaky_relu",
)
x = Concatenate()([x, input_2])
x = cnn_block(x, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=3, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=3, strides=1, activation="leaky_relu")
connection = cnn_block(
x, num_filters=256, kernel_size=1, strides=1, activation="leaky_relu"
)
x = cnn_block(
connection, num_filters=512, kernel_size=3, strides=1, activation="leaky_relu"
)
output_2 = scale_prediction(
x, num_anchors_stage=len(anchors[1]), num_classes=num_classes, num=101
)
x = cnn_block(
connection,
num_filters=512,
kernel_size=3,
strides=2,
zero_padding=True,
padding="valid",
activation="leaky_relu",
)
x = Concatenate()([x, input_3])
x = cnn_block(x, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=1024, kernel_size=3, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=1024, kernel_size=3, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=512, kernel_size=1, strides=1, activation="leaky_relu")
x = cnn_block(x, num_filters=1024, kernel_size=3, strides=1, activation="leaky_relu")
output_3 = scale_prediction(
x, num_anchors_stage=len(anchors[2]), num_classes=num_classes, num=109
)
return tf.keras.Model([input_1, input_2, input_3], [output_1, output_2, output_3], name="YOLOv3_head")
class YOLOv4(tf.keras.Model):
def __init__(self, input_shape,
num_classes,
anchors,
yolo_max_boxes=MAX_NUM_BBOXES,
yolo_iou_threshold=loss_params['iou_threshold'],
yolo_score_threshold=SCORE_THRESHOLD):
super().__init__(name='YOLOv4')
self.num_classes = num_classes
self.anchors = anchors
self.yolo_max_boxes = yolo_max_boxes
self.yolo_iou_threshold = yolo_iou_threshold
self.yolo_score_threshold = yolo_score_threshold
if (input_shape[0] % 32 != 0) | (input_shape[1] % 32 != 0):
raise ValueError(
f"Provided height and width in input_shape {input_shape} is not a multiple of 32"
)
backbone = csp_darknet53(input_shape)
neck = yolov4_neck(input_shapes=backbone.output_shape)
self.normalized_anchors = compute_normalized_anchors(anchors, input_shape)
head = yolov3_head(
input_shapes=neck.output_shape,
anchors=self.normalized_anchors,
num_classes=num_classes)
inputs = tf.keras.Input(shape=input_shape)
lower_features = backbone(inputs)
medium_features = neck(lower_features)
upper_features = head(medium_features)
anchors = np.array([anchor for subl in self.normalized_anchors for anchor in subl])
y_true = [Input(shape=(None, None, len(self.anchors[l]), NUM_CLASSES+5), name='y_true_{}'.format(l)) for l in range(3)]
self.model_body = tf.keras.Model(inputs=inputs, outputs=upper_features, name="YOLOv4")
model_loss, location_loss, confidence_loss, class_loss = Lambda(
yolo3_loss, name='yolo_loss',
arguments={'anchors': anchors,
'anchor_masks': 'custom' if USE_CUSTOM_ANCHORS else 'yolo',
'num_layers': 3,
'num_classes': NUM_CLASSES,
'ignore_thresh': loss_params['iou_threshold'],
'label_smoothing': loss_params['smooth_factor'],
'elim_grid_sense': loss_params['elim_grid_sense'],
'use_vf_loss': loss_params['use_vf_loss'],
'use_focal_loss': loss_params['use_focal_loss'],
'use_focal_obj_loss': loss_params['use_focal_obj_loss'],
'use_diou_loss': loss_params['use_diou'],
'use_giou_loss': loss_params['use_giou'],
'use_ciou_loss': loss_params['use_ciou'],
'focal_gamma': loss_params['focal_gamma']})([*upper_features, *y_true])
self.model = tf.keras.Model([self.model_body.input, *y_true], model_loss)
loss_dict = {'location_loss':location_loss, 'confidence_loss':confidence_loss, 'class_loss':class_loss}
self.add_metrics(loss_dict)
def add_metrics(self, metric_dict):
'''
add metric scalar tensor into model, which could be tracked in training
log and tensorboard callback
'''
for (name, metric) in metric_dict.items():
self.model.add_metric(metric, name=name, aggregation='mean')
def call(self, x, training=False):
return self.model(x, training)
def train_step(self, data):
x, y = data['image'], list(data['label'])
with tf.GradientTape() as tape:
y_pred = self([x, *y], training=True) # Forward pass
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
metrics = {m.name: m.result() for m in self.metrics}
metrics['reg_loss'] = (loss - y_pred)[0]
return metrics
def test_step(self, data):
x, y = data['image'], list(data['label'])
y_pred = self([x, *y], training=True)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred)
metrics = {m.name: m.result() for m in self.metrics}
metrics['reg_loss'] = (loss - y_pred)[0]
return metrics
def predict_step(self, data):
if isinstance(data, dict):
x = data['image']
else:
x = data
return self.model_body(x)
|
{"hexsha": "27ca9c57fceaabee26f9a39d46bcb3a444c4542e", "size": 11283, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/yolo_v4.py", "max_stars_repo_name": "vairodp/AstroNet", "max_stars_repo_head_hexsha": "33602d8e954246f5e2571f11cf331168f82198f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-09-24T18:54:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T15:40:26.000Z", "max_issues_repo_path": "src/yolo_v4.py", "max_issues_repo_name": "vairodp/AstroNet", "max_issues_repo_head_hexsha": "33602d8e954246f5e2571f11cf331168f82198f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/yolo_v4.py", "max_forks_repo_name": "vairodp/AstroNet", "max_forks_repo_head_hexsha": "33602d8e954246f5e2571f11cf331168f82198f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-27T14:34:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-13T23:50:43.000Z", "avg_line_length": 41.3296703297, "max_line_length": 127, "alphanum_fraction": 0.6618807055, "include": true, "reason": "import numpy", "num_tokens": 3032}
|
(* Require Import Paco.paco. *)
(* ViNo - VerIfiable Nock *)
(* The aim of this project is to provide a Nock interpreter
with jets whose semantic equivalence can be verified w/r/t
the Gallina (eventually, OCaml) code that uses them *)
Require Import Common.
(* Require Import Applicative *)
(* Require Import NockParse *)
Require Import ZArith.
(* NB we use positive (Z) because without them we will
spend stupendous amounts
of time just reading our input *)
Section Nock.
Open Scope N.
Inductive noun : Set :=
| atom : N -> noun
| cell : noun -> noun -> noun
.
Inductive instr : Set :=
| slice : instr
| const : instr
| nock : instr
| isatom : instr
| succ : instr
| equ : instr
.
Definition toinstr (n : N) : sum instr N :=
match n with
| 0 => inl slice
| 1 => inl const
| 2 => inl nock
| 3 => inl isatom
| 4 => inl succ
| 5 => inl equ
| _ => inr n
end
.
(* Indexing a noun as a tree ("slot")
Notice how big of a bullet we dodged here.
*)
Fixpoint slice_rec (n : positive) (nn : noun) : noun :=
match n with
| xH => nn
| xO n' =>
match slice_rec n' nn with
| cell a1 a2 => a1
| _ => nn
end
| xI n' =>
match slice_rec n' nn with
| cell a1 a2 => a2
| _ => nn
end
end.
Coercion atom : N >-> noun.
Definition slice' (nn b : noun) : noun :=
match nn with
| atom (Npos n) => slice_rec n b
| _ => nn
end.
Definition isatom' (n : noun) : noun :=
match n with
| cell _ _ => atom 0
| atom n => atom 1
end.
Definition succ' (n : noun) : noun :=
match n with
| atom n => atom (n + 1)
| cell _ _ => n
end.
Definition eq' (n : noun) : noun :=
match n with
| cell (atom a) (atom b) =>
if N.eqb a b
then atom 0
else atom 1
| _ => n
end.
Fixpoint nock' (fuel : nat) (subj : noun) (form : noun) : option noun :=
match fuel with
| O => None
| S fuel =>
match form with
| cell (atom n) arg =>
match (toinstr n) with
| inl i => match i with
| slice => Some $ slice' arg subj
| const => Some arg
| nock =>
match arg with
| cell b c =>
match nock' fuel subj b, nock' fuel subj c with
| Some l, Some r => nock' fuel l r
| _, _ => None
end
| _ => Some $ cell subj form
end
| isatom =>
match nock' fuel subj arg with
| Some x => Some $ isatom' x
| _ => None
end
| succ =>
match nock' fuel subj arg with
| Some x => Some $ succ' x
| _ => None
end
| equ =>
match nock' fuel subj arg with
| Some x => Some $ eq' x
| _ => None
end
end
| inr _ _ => Some subj
end
| _ => Some subj
end
end.
End Nock.
(* Inspired by ImpParser, we use strings to represent nouns
to get around a limitation of Coq's parser *)
Section NockParse.
Require Import Coq.Strings.String.
Require Import Coq.Strings.Ascii.
Open Scope string_scope.
(* Utilities, lovingly lifted from Software Foundations (ImpParser.v) *)
Definition isWhite (c : ascii) : bool :=
let n := nat_of_ascii c in
orb (orb (beq_nat n 32) (* space *)
(beq_nat n 9)) (* tab *)
(orb (beq_nat n 10) (* linefeed *)
(beq_nat n 13)). (* Carriage return. *)
SearchAbout (nat -> N).
Definition getDigit (c : ascii) : option N :=
let n := nat_of_ascii c in
if andb (48 <=? n) (n <=? 57)
then Some $ N.of_nat (n - 48)%nat
else None.
Definition isObrac (c : ascii) : bool :=
beq_nat $ nat_of_ascii c $ 91%nat.
Definition isCbrac (c : ascii) : bool :=
beq_nat $ nat_of_ascii c $ 93%nat.
Inductive token :=
| OBRAC : token
| CBRAC : token
| NUM : N -> token
.
(* Lex' takes:
s: remaining string to lex
n: accumulated N so far *)
Require Import Coq.Lists.List.
Import ListNotations.
Open Scope list_scope.
(* log, needed for pretty printer *)
Print Init.Nat.log2.
Print Init.Nat.log2_iter.
Require Import Coq.Arith.PeanoNat.
SearchAbout (nat -> nat -> nat).
(* I'm skeptical about this. *)
Definition log (base : nat) (n : nat) :=
(Nat.log2 n / Nat.log2 base).
(*
Fixpoint log10' (n : nat) (place : nat) (newskip : nat) (skip : nat) : nat :=
match n with
| 0%nat => place
| 1%nat => place
| 2%nat => place
| 3%nat => place
| 4%nat => place
| 5%nat => place
| 6%nat => place
| 7%nat => place
| 8%nat => place
| 9%nat => place
| S n' =>
match skip with
| O => log10' n' (S place) (S place) newskip
| S skip' => log10' n' place (S newskip) skip'
end
end.
*)
(* this is not correct. *)
Definition log10 (n : nat) : nat :=
log 10 n.
Eval compute in (log10 120).
(* Open Scope N *)
Open Scope nat.
Opaque apply.
Fixpoint foopoint (n : nat) :=
match n with
| O => O
| S n' => apply (fun i => foopoint i) n'
end.
Fixpoint lex' (s : string) (n : option N) : list token :=
match s with
| String a s' =>
if
isObrac a then
match n with
| Some d' => NUM d' :: OBRAC :: lex' s' None
| _ => OBRAC :: lex' s' None
end
else if
isCbrac a then
match n with
| Some d' => NUM d' :: CBRAC :: lex' s' None
| _ => CBRAC :: lex' s' None
end
else
match getDigit a with
| Some d =>
match n with
| Some d' => lex' s' (Some $ 10 * d' + d)
| None => lex' s' $ Some d
end
| _ =>
if isWhite a then
match n with
| Some d' => NUM d' :: lex' s' None
| _ => lex' s' None
end
else []
end
| EmptyString =>
match n with
| Some n => NUM n :: nil
| _ => nil
end
end.
Definition lex (s : string) : list token :=
lex' s None.
Ltac step :=
match goal with
| |- context[?L = _] => eval red in L
end.
Print Init.Nat.log2_iter.
Print Init.Nat.log2.
(* TODO: decanonize, removing as many brackets as possible *)
Fixpoint explode (s : string) : list ascii :=
match s with
| EmptyString => []
| String a s' => a :: explode s'
end.
Definition lascii := list ascii.
Coercion explode : string >-> list.
Fixpoint implode (l : list ascii) : string :=
match l with
| [] => EmptyString
| a :: l' => String a $ implode l'
end.
Eval compute in (ascii_of_N 48).
Definition nat_to_ascii (n : nat) : ascii :=
ascii_of_nat (n + 48)%nat.
Fixpoint nat_to_string' (n : nat) (fuel : nat) : string :=
if beq_nat n 0 then EmptyString
else
match fuel with
| O => EmptyString
| S fuel' =>
String (nat_to_ascii $ Nat.modulo n 10) (nat_to_string' ((n / 10)%nat) fuel')
end.
Definition nat_to_string (n : nat) : string :=
let s := implode $ List.rev (nat_to_string' n n) in
match s with
| EmptyString => "0"%string
| _ => s
end.
Fixpoint pretty (nn : noun) : string :=
match nn with
| atom x => nat_to_string (N.to_nat x)
| cell a b =>
append
"["
(append (pretty a)
(append " "
(append (pretty b) "]")))
end.
(* Applicative, for option only (for now) *)
Definition apure {T : Type} (x : T) : option T := Some x.
Definition aseq {T1 T2 : Type} (fo : option (T1 -> T2)) (xo : option T1) : option T2 :=
match fo, xo with
| None, _ => None
| _, None => None
| Some f, Some x => Some $ f x
end.
Notation "f <*> x" := (aseq f x) (at level 85).
Notation "f <$> x" := (aseq (apure f) x) (at level 84).
(* TODO: Polymorphic coercion?
Maybe this can be simulated with canonical structures *)
Fixpoint nn_insert (n : N) (nn : noun) : noun :=
match nn with
| atom n' => cell (atom n') (atom n)
| cell nl nr => cell nl (nn_insert n nr)
end.
Fixpoint nn_insert_opt (n : N) (o : option noun) : noun :=
match o with
| None => atom n
| Some nn => nn_insert n nn
end.
Definition Niszero (n : nat) : bool :=
match n with
| 0%nat => true
| _ => false
end.
Definition isnil {T : Type} (l : list T) : bool :=
match l with
| [] => true
| _ => false
end.
(* add more braces to make fully explicit *)
Fixpoint canonize' (l : list token) (brackets : nat) {struct l} : list token :=
let bkt := repeat CBRAC brackets in
match l with
| h1 :: lt =>
match lt with
| h2 :: ltt =>
match ltt with
| h3 :: lttt =>
match h1, h2, h3 with
| NUM _, NUM _, CBRAC =>
h1 :: h2 :: bkt ++ canonize' ltt 0
| NUM _, NUM _, _=>
h1 :: OBRAC :: canonize' lt (S brackets)
| CBRAC, _, _ => h1 :: bkt ++ canonize' lt 0
| _, _, _ => h1 :: canonize' lt brackets
end
| _ => l ++ bkt
end
| _ => l ++ bkt
end
| _ => l ++ bkt
end.
Definition canonize (l : list token) : list token :=
canonize' l 0.
Eval compute in (canonize (lex "[1 2 3 4]")).
(* only works on canonized things *)
(* idea: left stores things we've already seen but can't
quite output yet, default stores whether we return none
or a designated cell when we run out of list
*)
Print positive.
SearchAbout list.
Eval compute in (pretty 900).
(* This is getting things backwards sometimes *)
Fixpoint parse'
(l : list token)
(depth : nat)
(lefts : list (option noun))
: option noun :=
match l with
| [] => None
(*if Niszero depth then
match lefts with
| Some lh :: [] => Some lh
| _ => None
end
else None *)
| t :: l' =>
match t with
| OBRAC =>
parse' l' (depth + 1) (None :: lefts)
| CBRAC =>
if negb $ Niszero depth then
match lefts with
| Some lh :: t =>
(* lookahead! *)
if negb $ isnil l' then
match parse' l' (depth - 1) t with
| Some n' => Some (cell lh n')
| _ => None
end
else Some lh
| None :: t => parse' l' (depth - 1) t
| [] => None
end
else None
| NUM n =>
(*
match parse' l' depth left with
| Some n' => Some $ cell n n'
| None => None
end *)
match lefts with
| [] => Some (atom n)
| lefth :: leftt =>
parse' l' depth (Some (nn_insert_opt n lefth) :: leftt)
end
end
end.
Definition parse (s : string) : option noun :=
parse' (canonize (lex s)) 0 [].
Definition ex_noun1 : string := "[1 2]".
Eval compute in (canonize (lex ex_noun1)).
(* TODO canonize is broken but i think this is otherwise right *)
Definition ex_noun2 : string := "1".
Definition ex_noun3 : string := "[1 [2 3] 4]".
Eval compute in (canonize (lex "[1 [2 3]]")).
Eval compute in (parse' (lex "[1 [2 3]]") 0 []).
Definition ex_noun4 : string := "[[1 2] 3 4]".
Definition ex_noun5 : string := "[1 2 3 4]".
Eval compute in (parse $ ex_noun1).
Eval compute in (parse $ ex_noun5).
Eval compute in
(parse' (canonize $ lex ex_noun1) 0 None).
Eval compute in (
Eval compute in (parse'
Definition parse'2 :=
(* do_parse - parse, returning a bogus value on fail *)
(* Coercion do_parse : string >-> noun *)
(* Coercion print : noun >-> string *)
End NockParse.
(*
Definition example1 :=
cell (cell 4 5) (cell 6 (cell 14 15)).
Eval compute in (slice_rec 15 example1).
*)
|
{"author": "mmalvarez", "repo": "vino", "sha": "7d2c9ed84fbe660f791ed70471a464da3ab8ce2d", "save_path": "github-repos/coq/mmalvarez-vino", "path": "github-repos/coq/mmalvarez-vino/vino-7d2c9ed84fbe660f791ed70471a464da3ab8ce2d/src/Nock.v"}
|
import numpy as np
from bokeh.layouts import column, gridplot, row
from bokeh.plotting import figure, output_file, show
N = 1000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = ["#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]
TOOLS="hover,crosshair,pan,wheel_zoom,zoom_in,zoom_out,box_zoom,undo,redo,reset,tap,save,box_select,poly_select,lasso_select"
def mkplot(xaxis="below", yaxis="left"):
p = figure(width=300, height=300, tools=TOOLS, x_axis_location=xaxis, y_axis_location=yaxis)
p.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
return p
def mkgrid(plots, location):
return gridplot(plots, width=300, height=300, toolbar_location=location)
l_al = mkgrid([[mkplot(), mkplot()], [mkplot(), mkplot()]], "above")
l_ar = mkgrid([[mkplot(), mkplot()], [mkplot(), mkplot()]], "below")
l_bl = mkgrid([[mkplot(), mkplot()], [mkplot(), mkplot()]], "left")
l_br = mkgrid([[mkplot(), mkplot()], [mkplot(), mkplot()]], "right")
layout = column(row(l_al, l_ar), row(l_bl, l_br))
output_file("toolbars2.html", title="toolbars2.py example")
show(layout)
|
{"hexsha": "866095c9628555ae38ef5da788ee28e6ccf95919", "size": 1197, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/models/file/toolbars2.py", "max_stars_repo_name": "g-parki/bokeh", "max_stars_repo_head_hexsha": "664ead5306bba64609e734d4105c8aa8cfb76d81", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15193, "max_stars_repo_stars_event_min_datetime": "2015-01-01T05:11:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:30:20.000Z", "max_issues_repo_path": "examples/models/file/toolbars2.py", "max_issues_repo_name": "g-parki/bokeh", "max_issues_repo_head_hexsha": "664ead5306bba64609e734d4105c8aa8cfb76d81", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9554, "max_issues_repo_issues_event_min_datetime": "2015-01-01T03:16:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:59:39.000Z", "max_forks_repo_path": "examples/models/file/toolbars2.py", "max_forks_repo_name": "g-parki/bokeh", "max_forks_repo_head_hexsha": "664ead5306bba64609e734d4105c8aa8cfb76d81", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4829, "max_forks_repo_forks_event_min_datetime": "2015-01-02T03:35:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T16:40:26.000Z", "avg_line_length": 37.40625, "max_line_length": 125, "alphanum_fraction": 0.6908939014, "include": true, "reason": "import numpy", "num_tokens": 388}
|
import io
import re
import sys
import numpy as np
_INPUT_ = """\
10
6 7 5 18 2
3 8 1 6 3
7 2 8 7 7
6 3 3 4 7
12 8 9 15 9
9 8 6 1 10
12 9 7 8 2
10 3 17 4 10
3 1 3 19 3
3 14 7 13 1
"""
#sys.stdin = io.StringIO(_INPUT_)
# copied from https://atcoder.jp/contests/zone2021/editorial/1197
# added some comments
# refered https://blog.hamayanhamayan.com/entry/2021/05/01/231111
N = int(input())
A = [tuple(map(int, input().split())) for i in range(N)]
def check(x):
s = set()
for a in A:
# 5つの能力がX以上かどうかを 0b01011 みたいに表わす
# a[0] >= x だったら 1 を0個左シフトして 0b001
# a[1] >= x だったら 1 を1個左シフトして 0b010
# これらを足し合わせる
s.add(sum(1 << i for i in range(5) if a[i] >= x))
#print(x, [bin(ss) for ss in s])
# 3つ選ぶループを回す。圧縮されて set に入っているので、
# 最大でも 2^5 C 3 通り
# 全部の bit が1ならそれが最大値。
for x in s:
for y in s:
for z in s:
if x | y | z == 0b11111:
return True
return False
ok = 0
ng = 10**9 + 1
while ng - ok > 1:
cen = (ok+ng) //2
if check(cen):
ok = cen
else:
ng = cen
print(ok)
|
{"hexsha": "7882028f18db528687449055b6e0f70b5aab09ee", "size": 1110, "ext": "py", "lang": "Python", "max_stars_repo_path": "competitive/AtCoder/zone2021/C_shakyo.py", "max_stars_repo_name": "pn11/benkyokai", "max_stars_repo_head_hexsha": "9ebdc46b529e76b7196add26dbc1e62ad48e72b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "competitive/AtCoder/zone2021/C_shakyo.py", "max_issues_repo_name": "pn11/benkyokai", "max_issues_repo_head_hexsha": "9ebdc46b529e76b7196add26dbc1e62ad48e72b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-03-24T16:24:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T15:51:18.000Z", "max_forks_repo_path": "competitive/AtCoder/zone2021/C_shakyo.py", "max_forks_repo_name": "pn11/benkyokai", "max_forks_repo_head_hexsha": "9ebdc46b529e76b7196add26dbc1e62ad48e72b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.1379310345, "max_line_length": 65, "alphanum_fraction": 0.5504504505, "include": true, "reason": "import numpy", "num_tokens": 523}
|
// Copyright Joseph Dobson 2014
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "modes.hpp"
#include <sstream>
#include <algorithm>
#include <boost/spirit/include/qi.hpp>
#include <boost/fusion/include/std_pair.hpp>
namespace irc {
mode_block::const_iterator mode_block::find(char sym) const {
return std::find_if(modes_.begin(), modes_.end(),
[&](const value_type& v) { return v.first==sym; });
}
mode_block::iterator mode_block::find(char sym) {
return std::find_if(modes_.begin(), modes_.end(),
[&](const value_type& v) { return v.first==sym; });
}
mode_block::const_iterator mode_block::begin() const {
return modes_.begin();
}
mode_block::const_iterator mode_block::end() const {
return modes_.end();
}
void mode_block::set_mode_impl(char sym, const optional_string& param) {
auto it=find(sym);
if(it==modes_.end()) {
modes_.emplace_back(sym, param);
}
else {
it->second=param;
}
}
void mode_block::apply_mode_diff(const prefix& p, const mode_diff& md) {
if(md.change==mode_change::set) {
for(const auto& m : md.modes)
set_mode_impl(m.first, m.second);
}
else {
for(const auto& m : md.modes)
unset_mode_impl(m.first);
}
on_mode_change(p, md);
}
void mode_block::unset_mode_impl(char sym) {
auto it=find(sym);
if(it!=modes_.end()) {
modes_.erase(it);
}
}
bool mode_block::is_mode_set(char sym) const {
return modes_.end()==find(sym);
}
optional_string mode_block::try_get_mode_param(char sym) {
auto it=find(sym);
if(it==modes_.end()) {
throw std::runtime_error(
"tring to retreive parater for non existant mode");
}
return it->second;
}
std::ostream& operator<<(std::ostream& os, const mode_block& mb) {
for(const auto& mode : mb) {
os << mode.first;
if(mode.second) os << '(' << *mode.second << ')';
}
return os;
}
std::ostream& operator<<(std::ostream& os, const mode_list& ml) {
for(const auto& m : ml) {
os << m.first;
if(m.second) os << '(' << *m.second << ')';
}
return os;
}
std::ostream& operator<<(std::ostream& os, const mode_diff& md) {
return os << ( md.change == mode_change::set ? '+' : '-' )
<< md.modes;
}
std::string to_string(const mode_block& mb) {
std::ostringstream oss;
oss << mb;
return oss.str();
}
std::string to_string(const mode_list& ml) {
std::ostringstream oss;
oss << ml;
return oss.str();
}
std::string to_string(const mode_diff& md) {
std::ostringstream oss;
oss << md;
return oss.str();
}
} //namespace irc
BOOST_FUSION_ADAPT_STRUCT(
irc::mode_diff,
(irc::mode_change, change)
(irc::mode_list, modes)
)
namespace irc {
namespace qi=boost::spirit::qi;
mode_diff parse_modes(const std::string& entries) {
using iterator=std::string::const_iterator;
qi::symbols<char, mode_change> add_remove;
add_remove.add("+", mode_change::set)("-", mode_change::unset);
//TODO should I really use lexeme?
qi::rule<iterator, mode_block::value_type()> mode=
qi::lexeme[ ~qi::char_(' ') >> -( ' ' >> +qi::char_ )];
qi::rule<iterator, mode_diff(), qi::space_type> modes_diff=
add_remove >> +mode;
mode_diff md;
qi::phrase_parse(entries.begin(), entries.end(),
modes_diff, qi::space, md);
return md;
}
} //namespace irc
|
{"hexsha": "0cfa052c4511ef80ff71c22aab7c5a446a3c2d49", "size": 3309, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/modes.cpp", "max_stars_repo_name": "libircpp/libircpp", "max_stars_repo_head_hexsha": "b7df7f3b20881c11c842b81224bc520bc742cdb1", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-02-01T19:57:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T22:59:29.000Z", "max_issues_repo_path": "src/modes.cpp", "max_issues_repo_name": "libircpp/libircpp", "max_issues_repo_head_hexsha": "b7df7f3b20881c11c842b81224bc520bc742cdb1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/modes.cpp", "max_forks_repo_name": "libircpp/libircpp", "max_forks_repo_head_hexsha": "b7df7f3b20881c11c842b81224bc520bc742cdb1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3581081081, "max_line_length": 72, "alphanum_fraction": 0.6687821094, "num_tokens": 912}
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/interprocess for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_INTERPROCESS_NAMED_MUTEX_HPP
#define BOOST_INTERPROCESS_NAMED_MUTEX_HPP
#if (defined _MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif
#include <boost/interprocess/detail/config_begin.hpp>
#include <boost/interprocess/detail/workaround.hpp>
#include <boost/interprocess/creation_tags.hpp>
#include <boost/interprocess/exceptions.hpp>
#include <boost/interprocess/detail/interprocess_tester.hpp>
#include <boost/interprocess/detail/posix_time_types_wrk.hpp>
#include <boost/interprocess/permissions.hpp>
#if defined(BOOST_INTERPROCESS_NAMED_MUTEX_USES_POSIX_SEMAPHORES)
#include <boost/interprocess/sync/posix/named_mutex.hpp>
#define BOOST_INTERPROCESS_USE_POSIX_SEMAPHORES
#elif !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION) && defined (BOOST_INTERPROCESS_WINDOWS)
#include <boost/interprocess/sync/windows/named_mutex.hpp>
#define BOOST_INTERPROCESS_USE_WINDOWS
#else
#include <boost/interprocess/sync/shm/named_mutex.hpp>
#endif
//!\file
//!Describes a named mutex class for inter-process synchronization
namespace boost {
namespace interprocess {
class named_condition;
//!A mutex with a global name, so it can be found from different
//!processes. This mutex can't be placed in shared memory, and
//!each process should have it's own named_mutex.
class named_mutex
{
/// @cond
//Non-copyable
named_mutex();
named_mutex(const named_mutex &);
named_mutex &operator=(const named_mutex &);
friend class named_condition;
/// @endcond
public:
//!Creates a global interprocess_mutex with a name.
//!Throws interprocess_exception on error.
named_mutex(create_only_t create_only, const char *name, const permissions &perm = permissions());
//!Opens or creates a global mutex with a name.
//!If the mutex is created, this call is equivalent to
//!named_mutex(create_only_t, ... )
//!If the mutex is already created, this call is equivalent
//!named_mutex(open_only_t, ... )
//!Does not throw
named_mutex(open_or_create_t open_or_create, const char *name, const permissions &perm = permissions());
//!Opens a global mutex with a name if that mutex is previously
//!created. If it is not previously created this function throws
//!interprocess_exception.
named_mutex(open_only_t open_only, const char *name);
//!Destroys *this and indicates that the calling process is finished using
//!the resource. The destructor function will deallocate
//!any system resources allocated by the system for use by this process for
//!this resource. The resource can still be opened again calling
//!the open constructor overload. To erase the resource from the system
//!use remove().
~named_mutex();
//!Unlocks a previously locked
//!interprocess_mutex.
void unlock();
//!Locks interprocess_mutex, sleeps when interprocess_mutex is already locked.
//!Throws interprocess_exception if a severe error is found
void lock();
//!Tries to lock the interprocess_mutex, returns false when interprocess_mutex
//!is already locked, returns true when success.
//!Throws interprocess_exception if a severe error is found
bool try_lock();
//!Tries to lock the interprocess_mutex until time abs_time,
//!Returns false when timeout expires, returns true when locks.
//!Throws interprocess_exception if a severe error is found
bool timed_lock(const boost::posix_time::ptime &abs_time);
//!Erases a named mutex from the system.
//!Returns false on error. Never throws.
static bool remove(const char *name);
/// @cond
private:
friend class ipcdetail::interprocess_tester;
void dont_close_on_destruction();
public:
#if defined(BOOST_INTERPROCESS_USE_POSIX_SEMAPHORES)
typedef ipcdetail::posix_named_mutex internal_mutex_type;
#undef BOOST_INTERPROCESS_USE_POSIX_SEMAPHORES
#elif defined(BOOST_INTERPROCESS_USE_WINDOWS)
typedef ipcdetail::windows_named_mutex internal_mutex_type;
#undef BOOST_INTERPROCESS_USE_WINDOWS
#else
typedef ipcdetail::shm_named_mutex internal_mutex_type;
#endif
internal_mutex_type &internal_mutex()
{ return m_mut; }
internal_mutex_type m_mut;
/// @endcond
};
/// @cond
inline named_mutex::named_mutex(create_only_t, const char *name, const permissions &perm)
: m_mut(create_only_t(), name, perm)
{}
inline named_mutex::named_mutex(open_or_create_t, const char *name, const permissions &perm)
: m_mut(open_or_create_t(), name, perm)
{}
inline named_mutex::named_mutex(open_only_t, const char *name)
: m_mut(open_only_t(), name)
{}
inline void named_mutex::dont_close_on_destruction()
{ ipcdetail::interprocess_tester::dont_close_on_destruction(m_mut); }
inline named_mutex::~named_mutex()
{}
inline void named_mutex::lock()
{ m_mut.lock(); }
inline void named_mutex::unlock()
{ m_mut.unlock(); }
inline bool named_mutex::try_lock()
{ return m_mut.try_lock(); }
inline bool named_mutex::timed_lock(const boost::posix_time::ptime &abs_time)
{ return m_mut.timed_lock(abs_time); }
inline bool named_mutex::remove(const char *name)
{ return internal_mutex_type::remove(name); }
/// @endcond
} //namespace interprocess {
} //namespace boost {
#include <boost/interprocess/detail/config_end.hpp>
#endif //BOOST_INTERPROCESS_NAMED_MUTEX_HPP
|
{"hexsha": "7deb400ab10de8a0f7caaa741d21fbfcb5ee566d", "size": 5772, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/boost/interprocess/sync/named_mutex.hpp", "max_stars_repo_name": "randolphwong/mcsema", "max_stars_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 61.0, "max_stars_repo_stars_event_min_datetime": "2015-12-05T19:34:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-25T09:07:09.000Z", "max_issues_repo_path": "boost/boost/interprocess/sync/named_mutex.hpp", "max_issues_repo_name": "randolphwong/mcsema", "max_issues_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 38.0, "max_issues_repo_issues_event_min_datetime": "2015-07-22T07:35:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-14T16:03:06.000Z", "max_forks_repo_path": "boost/boost/interprocess/sync/named_mutex.hpp", "max_forks_repo_name": "randolphwong/mcsema", "max_forks_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 39.0, "max_forks_repo_forks_event_min_datetime": "2015-01-07T02:03:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-15T00:41:50.000Z", "avg_line_length": 33.5581395349, "max_line_length": 107, "alphanum_fraction": 0.7311157311, "num_tokens": 1333}
|
from MeArmKinematics import MeArmKinematics
#from MeArm import MeArm
from DQ import *
import numpy as np
import time
import sys
if len(sys.argv) == 1:
print("Input the desired coordinates")
quit()
else:
position = np.array([float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3])])
kinematics = MeArmKinematics()
#mearm = MeArm(baseServoPin = 4, rightServoPin = 17, leftServoPin = 27, handServoPin = 22)
theta = np.array([0, 0, 0])
theta1 = np.arctan2(position[1], position[0])
r = DQ([np.cos(theta1/2), 0, 0, np.sin(theta1/2), 0, 0, 0, 0])
p = DQ([0, position[0], position[1], position[2] , 0, 0, 0, 0])
xd = r + (1/2) * DQ.E_ * p * r
error = 1
epsilon = 0.1
dt = 0.1
while np.linalg.norm(error) > epsilon:
x = kinematics.fkm(theta)
J = kinematics.jacobian(theta)
error = vec8(xd - x)
theta = theta + np.dot(np.linalg.pinv(J), error) * dt
# mearm.setTheta(theta * 180/np.pi)
time.sleep(0.0001)
print(theta * 180 / np.pi)
#mearm.setHand(95)
raw_input()
#mearm.closeConn()
|
{"hexsha": "7a1083fa38e124aa25f40948c9c043c62b65ad4b", "size": 1026, "ext": "py", "lang": "Python", "max_stars_repo_path": "position_control.py", "max_stars_repo_name": "glauberrleite/mearm-experience", "max_stars_repo_head_hexsha": "cde04a32929c082f8aa93f8cb8cb2368c13661b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "position_control.py", "max_issues_repo_name": "glauberrleite/mearm-experience", "max_issues_repo_head_hexsha": "cde04a32929c082f8aa93f8cb8cb2368c13661b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "position_control.py", "max_forks_repo_name": "glauberrleite/mearm-experience", "max_forks_repo_head_hexsha": "cde04a32929c082f8aa93f8cb8cb2368c13661b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8, "max_line_length": 90, "alphanum_fraction": 0.6442495127, "include": true, "reason": "import numpy", "num_tokens": 372}
|
"""
Module to train a new models to create user's profiles
This is a quick & dirty script for testing. The proper wenet data will be used by using proper API
"""
import pickle
import re
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timedelta
from functools import partial
from glob import glob
import numpy as np
import pandas as pd
from progress.bar import Bar
from sklearn.decomposition import LatentDirichletAllocation
from personal_context_builder.wenet_algo import (
estimate_stay_points,
estimate_stay_regions,
labelize_stay_region,
)
from personal_context_builder.wenet_analysis import BagOfWordsVectorizer
from personal_context_builder.wenet_analysis_models import BaseModelWrapper
from personal_context_builder.wenet_models import LocationPoint, UserPlaceTimeOnly
from personal_context_builder.wenet_tools import time_difference_ms
def get_locations_from_df_without_time(df):
locations = []
for index, row in df.iterrows():
try:
accuracy = row["accuracy"]
pts_t = None
location = LocationPoint(pts_t, row["latitude"], row["longitude"], accuracy)
locations.append(location)
except ValueError:
locations.append(None)
return locations
def get_locations_from_df(df):
locations = []
for index, row in df.iterrows():
try:
accuracy = row["accuracy"]
pts_t = datetime.fromtimestamp(row["timestamp"])
location = LocationPoint(pts_t, row["latitude"], row["longitude"], accuracy)
locations.append(location)
except ValueError:
locations.append(None)
return locations
def get_labelled_stay_regions(df, stay_regions, user, stay_points):
"""TODO use stay_regions_set instead of stay regions"""
user_places = []
for index, row in df.iterrows():
pts_t = datetime.strptime(row["timestamp"], "%Y-%m-%d %H:%M:%S")
if row["place_type"] == "personal":
place = row["place_id_name"]
else:
place = row["place_type"]
user_place_time_only = UserPlaceTimeOnly(pts_t, place, user)
user_place = user_place_time_only.to_user_place_from_stay_points(
stay_points, max_delta_time_ms=1000 * 60 * 3
)
if user_place is not None:
user_places.append(user_place)
labelled_stay_regions = labelize_stay_region(stay_regions, user_places)
stay_regions_set = set(stay_regions) - labelled_stay_regions
return labelled_stay_regions
def create_df_all():
locations_glob_expr = "/idiap/temp/wdroz/locations/*.csv"
all_location_files = glob(locations_glob_expr)
user_regex = re.compile(r"\/([^/\\_]+)_location\.csv")
df_list = []
users_list = []
bar = Bar("processing location files", max=len(all_location_files))
for location_file in all_location_files:
bar.next()
df = pd.read_csv(location_file)
df["date"] = pd.to_datetime(df["timestamp"] + df["timezone"], unit="s")
df = df.set_index("date")
df = df[~df.index.duplicated(keep="first")]
current_user = re.search(user_regex, location_file).group(1)
users_list.append(current_user)
df_list.append(df)
bar.finish()
df_all = pd.concat(df_list)
return df_all, users_list
def get_users_stay_regions(users_list, df_all, df_ambiance):
users_labelled_stay_regions = dict()
users_stay_regions = dict()
for user in users_list:
df_user_locations = df_all[df_all["userid"] == user]
user_locations = get_locations_from_df(df_user_locations)
stay_points = estimate_stay_points(user_locations)
if len(stay_points) < 1:
continue
df_user_ambiance = df_ambiance[df_ambiance["user"] == user]
stay_regions = estimate_stay_regions(stay_points, distance_threshold_m=20)
labelled_stay_regions = get_labelled_stay_regions(
df_user_ambiance, stay_regions, user, stay_points
)
users_stay_regions[user] = stay_regions
users_labelled_stay_regions[user] = labelled_stay_regions
return users_stay_regions, users_labelled_stay_regions
def create_user_night_activities(df_all, users_labelled_stay_regions):
user_night_activities = defaultdict(dict)
users_vectorizer = dict()
for name, grouped in df_all.groupby("night"):
for user in users_labelled_stay_regions.keys():
df_user_night = grouped[grouped["userid"] == user]
night = str(name)
year = night[:4]
month = night[4:6]
days = night[6:]
start_date = datetime.strptime(
f"{year}-{month}-{days} 20:00:00", "%Y-%m-%d %H:%M:%S"
)
end_date = start_date + timedelta(hours=8)
df_median = df_user_night.resample("30T").median()
df_user_activity = df_median.reindex(
pd.date_range(start=start_date, end=end_date, freq="30T")
)
if user not in users_vectorizer:
labelled_stay_regions = users_labelled_stay_regions[user]
stay_regions = users_stay_regions[user]
bow_user = BagOfWordsVectorizer(
labelled_stay_regions, stay_regions, "yn/yn_regions_mapping.json"
)
users_vectorizer[user] = bow_user
locations = get_locations_from_df_without_time(df_user_activity)
activity_vector = users_vectorizer[user].vectorize(locations)
user_night_activities[user][str(night)] = activity_vector
return user_night_activities
if __name__ == "__main__":
df_ambiance = pd.read_csv(
"/idiap/temp/wdroz/wenet/surveys/ambiance_survey.csv",
sep=",",
encoding="ISO-8859-1",
)
df_all, users_list = create_df_all()
print(f"number of elements in df_all : {len(df_all)}")
print(f"number of elements in users_list : {len(users_list)}")
users_stay_regions, users_labelled_stay_regions = get_users_stay_regions(
users_list, df_all, df_ambiance
)
print(f"number of elements in users_stay_regions : {len(users_stay_regions)}")
print(
f"number of elements in users_labelled_stay_regions : {len(users_labelled_stay_regions)}"
)
user_night_activities = create_user_night_activities(
df_all, users_labelled_stay_regions
)
file_user_night_activities = "/idiap/temp/wdroz/wenet/yn_user_night_activities.p"
print("saving user_night_activities")
with open(file_user_night_activities, "wb") as f:
pickle.dump(user_night_activities, f)
print(f"number of elements in user_night_activities : {len(user_night_activities)}")
X = [
v
for user, nights in user_night_activities.items()
for night, v in nights.items()
]
my_lda = partial(
LatentDirichletAllocation, n_components=15, random_state=0, n_jobs=-1
)
my_model = BaseModelWrapper(my_lda, "lda YN")
print(f"training model {my_model._name}")
my_model.fit(X)
model_name = "yn_model.p"
print(f"saving model {model_name}")
my_model.save(model_name)
|
{"hexsha": "acbed7c54fbf47e39c92424c8bfce09ca7474f9b", "size": 7206, "ext": "py", "lang": "Python", "max_stars_repo_path": "yn/yn_train.py", "max_stars_repo_name": "InternetOfUs/personal-context-builder", "max_stars_repo_head_hexsha": "89e7388d622bc0efbf708542566fdcdca667a4e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "yn/yn_train.py", "max_issues_repo_name": "InternetOfUs/personal-context-builder", "max_issues_repo_head_hexsha": "89e7388d622bc0efbf708542566fdcdca667a4e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "yn/yn_train.py", "max_forks_repo_name": "InternetOfUs/personal-context-builder", "max_forks_repo_head_hexsha": "89e7388d622bc0efbf708542566fdcdca667a4e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5347593583, "max_line_length": 98, "alphanum_fraction": 0.6809603109, "include": true, "reason": "import numpy", "num_tokens": 1656}
|
"""
CUB-200-2011 classification dataset.
"""
import os
import numpy as np
import pandas as pd
from PIL import Image
import torch.utils.data as data
from .imagenet1k_cls_dataset import ImageNet1KMetaInfo
class CUB200_2011(data.Dataset):
"""
CUB-200-2011 fine-grained classification dataset.
Parameters
----------
root : str, default '~/.torch/datasets/CUB_200_2011'
Path to the folder stored the dataset.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and transforms it.
target_transform : function, default None
A function that takes label and transforms it.
"""
def __init__(self,
root=os.path.join("~", ".torch", "datasets", "CUB_200_2011"),
mode="train",
transform=None,
target_transform=None):
super(CUB200_2011, self).__init__()
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
images_file_name = "images.txt"
images_file_path = os.path.join(root_dir_path, images_file_name)
if not os.path.exists(images_file_path):
raise Exception("Images file doesn't exist: {}".format(images_file_name))
class_file_name = "image_class_labels.txt"
class_file_path = os.path.join(root_dir_path, class_file_name)
if not os.path.exists(class_file_path):
raise Exception("Image class file doesn't exist: {}".format(class_file_name))
split_file_name = "train_test_split.txt"
split_file_path = os.path.join(root_dir_path, split_file_name)
if not os.path.exists(split_file_path):
raise Exception("Split file doesn't exist: {}".format(split_file_name))
images_df = pd.read_csv(
images_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "image_path"],
dtype={"image_id": np.int32, "image_path": np.unicode})
class_df = pd.read_csv(
class_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "class_id"],
dtype={"image_id": np.int32, "class_id": np.uint8})
split_df = pd.read_csv(
split_file_path,
sep="\s+",
header=None,
index_col=False,
names=["image_id", "split_flag"],
dtype={"image_id": np.int32, "split_flag": np.uint8})
df = images_df.join(class_df, rsuffix="_class_df").join(split_df, rsuffix="_split_df")
split_flag = 1 if mode == "train" else 0
subset_df = df[df.split_flag == split_flag]
self.image_ids = subset_df["image_id"].values.astype(np.int32)
self.class_ids = subset_df["class_id"].values.astype(np.int32) - 1
self.image_file_names = subset_df["image_path"].values.astype(np.unicode)
images_dir_name = "images"
self.images_dir_path = os.path.join(root_dir_path, images_dir_name)
assert os.path.exists(self.images_dir_path)
self._transform = transform
self._target_transform = target_transform
def __getitem__(self, index):
image_file_name = self.image_file_names[index]
image_file_path = os.path.join(self.images_dir_path, image_file_name)
img = Image.open(image_file_path).convert("RGB")
label = int(self.class_ids[index])
if self._transform is not None:
img = self._transform(img)
if self._target_transform is not None:
label = self._target_transform(label)
return img, label
def __len__(self):
return len(self.image_ids)
class CUB200MetaInfo(ImageNet1KMetaInfo):
def __init__(self):
super(CUB200MetaInfo, self).__init__()
self.label = "CUB200_2011"
self.short_label = "cub"
self.root_dir_name = "CUB_200_2011"
self.dataset_class = CUB200_2011
self.num_training_samples = None
self.num_classes = 200
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.net_extra_kwargs = {"aux": False}
self.load_ignore_extra = True
def add_dataset_parser_arguments(self,
parser,
work_dir_path):
super(CUB200MetaInfo, self).add_dataset_parser_arguments(parser, work_dir_path)
parser.add_argument(
"--no-aux",
dest="no_aux",
action="store_true",
help="no `aux` mode in model")
def update(self,
args):
"""
Update CUB-200-2011 dataset metainfo after user customizing.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
super(CUB200MetaInfo, self).update(args)
if args.no_aux:
self.net_extra_kwargs = None
self.load_ignore_extra = False
|
{"hexsha": "c7c96f8b4dd854375674df45ab3156d5e2c0ee1d", "size": 5319, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch/datasets/cub200_2011_cls_dataset.py", "max_stars_repo_name": "oliviaweng/imgclsmob", "max_stars_repo_head_hexsha": "80fffbb46f986614b162c725b21f3d208597ac77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-14T08:40:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T09:30:41.000Z", "max_issues_repo_path": "pytorch/datasets/cub200_2011_cls_dataset.py", "max_issues_repo_name": "ibrahim85/Sandbox-for-training-convolutional-networks-for-computer-vision", "max_issues_repo_head_hexsha": "a1f1f52eecbb841fa878bff4d3c311b79864835d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch/datasets/cub200_2011_cls_dataset.py", "max_forks_repo_name": "ibrahim85/Sandbox-for-training-convolutional-networks-for-computer-vision", "max_forks_repo_head_hexsha": "a1f1f52eecbb841fa878bff4d3c311b79864835d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-01T12:22:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-24T22:02:35.000Z", "avg_line_length": 35.6979865772, "max_line_length": 94, "alphanum_fraction": 0.610641098, "include": true, "reason": "import numpy", "num_tokens": 1197}
|
import numpy as np
import matplotlib.pyplot as plt
from project_utilities import *
import time
init_mpl(150,mat_settings = True)
from IPython.display import clear_output
import pygame
from numba import prange
@numba.njit()
def set_bnd(N,b,x):
if b == 0:
for i in prange(N+2):
x[0,i] = x[1,i]
x[i,0] = x[i,1]
x[N+1,i] = x[N,i]
x[i,N+1] = x[i,N]
elif b == 1:
for i in prange(N+2):
x[0,i] = -x[1,i]
x[N+1,i] = -x[N,i]
x[i,0] = x[i,1]
x[i,N+1] = x[i,N]
elif b == 2:
for i in prange(N+2):
x[0,i] = x[1,i]
x[N+1,i] = x[N,i]
x[i,0] = -x[i,1]
x[i,N+1] = -x[i,N]
x[0,0] = 1/2*(x[1,0]+x[0,1])
x[0,N+1] = 1/2*(x[1,N+1]+x[0,N])
x[N+1,0] = 1/2*(x[N,0]+x[N+1,1])
x[N+1,N+1] = 1/2*(x[N,N+1]+x[N+1,N])
@numba.njit()
def add_source(x,s,dt):
x += dt*s
@numba.njit()
def diffuse(N,b,x,x0,diff,dt):
a = dt*diff*N**2
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
x[i,j] = (x0[i,j] + a*(x[i-1,j]+x[i+1,j]+x[i,j-1]+x[i,j+1]))/(1+4*a)
set_bnd(N,b,x)
@numba.njit()
def advect(N,b,d,d0,u,v,dt):
dt0 = N*dt
for i in prange(1,N+1):
for j in prange(1,N+1):
x = i-dt0*u[i,j]
y = j-dt0*v[i,j]
if x < 0.5:
x = 0.5
elif x > N + 0.5:
x = N + 0.5
if y < 0.5:
y = 0.5
elif y > N + 0.5:
y = N + 0.5
i0 = int(np.floor(x))
i1 = i0+1
j0 = int(np.floor(y))
j1 = j0+1
s1 = x -i0
s0 = 1 - s1
t1 = y-j0
t0= 1-t1
d[i,j] = s0*(t0*d0[i0,j0]+t1*d0[i0,j1]) + s1*(t0*d0[i1,j0] + t1*d0[i1,j1])
set_bnd(N,b,d)
@numba.njit()
def dens_step(N,x,x0,u,v,diff,dt,s):
add_source(x,s,dt)
diffuse(N,0,x0,x,diff,dt)
advect(N,0,x,x0,u,v,dt)
@numba.njit()
def project(N,u,v,p,div):
h = 1/N
for i in prange(1,N+1):
for j in prange(1,N+1):
div[i,j] = -0.5*h*(u[i+1,j]-u[i-1,j] + v[i,j+1]-v[i,j-1])
p[i,j] = 0
set_bnd(N,0,div)
set_bnd(N,0,p)
for k in range(20):
for i in range(1,N+1):
for j in range(1,N+1):
p[i,j] = (div[i,j]+p[i-1,j]+p[i+1,j]+p[i,j-1]+p[i,j+1])/4
set_bnd(N,0,p)
for i in prange(1,N+1):
for j in prange(1,N+1):
u[i,j] -= 0.5*(p[i+1,j]-p[i-1,j])/h
v[i,j] -= 0.5*(p[i,j+1]-p[i,j-1])/h
set_bnd(N,1,u)
set_bnd(N,2,v)
@numba.njit()
def vel_step(N,u,v,u0,v0,visc,dt,su,sv):
add_source(u,su,dt)
add_source(v,sv,dt)
diffuse(N,1,u0,u,visc,dt)
diffuse(N,1,v0,v,visc,dt)
project(N,u0,v0,u,v)
advect(N,1,u,u0,u0,v0,dt)
advect(N,2,v,v0,u0,v0,dt)
project(N,u,v,u0,v0)
import matplotlib as mpl
from matplotlib import cm
class MplColorHelper:
def __init__(self, cmap_name, start_val, stop_val):
self.cmap_name = cmap_name
self.cmap = plt.get_cmap(cmap_name)
self.norm = mpl.colors.Normalize(vmin=start_val, vmax=stop_val)
self.scalarMap = cm.ScalarMappable(norm=self.norm, cmap=self.cmap)
def get_rgb(self, val):
return self.scalarMap.to_rgba(val)
|
{"hexsha": "9a1cc80184dd1e4640c67902386cce44b3a677b1", "size": 3333, "ext": "py", "lang": "Python", "max_stars_repo_path": "methods.py", "max_stars_repo_name": "tobyvg/Jos-Stam-Fluid", "max_stars_repo_head_hexsha": "035f9d9525078dc99be6eec3adb5c621a6d18c19", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "methods.py", "max_issues_repo_name": "tobyvg/Jos-Stam-Fluid", "max_issues_repo_head_hexsha": "035f9d9525078dc99be6eec3adb5c621a6d18c19", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "methods.py", "max_forks_repo_name": "tobyvg/Jos-Stam-Fluid", "max_forks_repo_head_hexsha": "035f9d9525078dc99be6eec3adb5c621a6d18c19", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0390625, "max_line_length": 86, "alphanum_fraction": 0.4704470447, "include": true, "reason": "import numpy,from numba", "num_tokens": 1340}
|
open import FRP.JS.RSet using ( ⟦_⟧ )
open import FRP.JS.Behaviour using ( Beh )
open import FRP.JS.DOM using ( DOM )
module FRP.JS.Main where
postulate
Main : Set
reactimate : ⟦ Beh DOM ⟧ → Main
{-# COMPILED_JS reactimate require("agda.frp").reactimate #-}
|
{"hexsha": "5e722121e2272374451f9aa4a4fb5a0076e9213c", "size": 265, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/agda/FRP/JS/Main.agda", "max_stars_repo_name": "agda/agda-frp-js", "max_stars_repo_head_hexsha": "c7ccaca624cb1fa1c982d8a8310c313fb9a7fa72", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2015-04-20T21:47:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T09:46:14.000Z", "max_issues_repo_path": "src/agda/FRP/JS/Main.agda", "max_issues_repo_name": "agda/agda-frp-js", "max_issues_repo_head_hexsha": "c7ccaca624cb1fa1c982d8a8310c313fb9a7fa72", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/agda/FRP/JS/Main.agda", "max_forks_repo_name": "agda/agda-frp-js", "max_forks_repo_head_hexsha": "c7ccaca624cb1fa1c982d8a8310c313fb9a7fa72", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2016-11-07T21:50:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:39:38.000Z", "avg_line_length": 22.0833333333, "max_line_length": 61, "alphanum_fraction": 0.6943396226, "num_tokens": 86}
|
import argparse
import sys
from pathlib import Path
import joblib
import numpy as np
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
def get_parser():
parser = argparse.ArgumentParser(description="Fit scalers")
parser.add_argument("utt_list", type=str, help="utternace list")
parser.add_argument("in_dir", type=str, help="in directory")
parser.add_argument("out_path", type=str, help="Output path")
parser.add_argument("--external_scaler", type=str, help="External scaler")
return parser
if __name__ == "__main__":
args = get_parser().parse_args(sys.argv[1:])
in_dir = Path(args.in_dir)
if args.external_scaler is not None:
scaler = joblib.load(args.external_scaler)
else:
scaler = StandardScaler()
with open(args.utt_list) as f:
for utt_id in tqdm(f):
c = np.load(in_dir / f"{utt_id.strip()}-feats.npy")
scaler.partial_fit(c)
joblib.dump(scaler, args.out_path)
|
{"hexsha": "03dec7e3f766e18ff21b05bed65094b33cc6f1f9", "size": 990, "ext": "py", "lang": "Python", "max_stars_repo_path": "recipes/common/fit_scaler.py", "max_stars_repo_name": "kunosato-mado/ttslearn", "max_stars_repo_head_hexsha": "1230ce8d5256a7438c485a337968ce086620a88e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 146, "max_stars_repo_stars_event_min_datetime": "2021-08-11T16:08:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T23:15:07.000Z", "max_issues_repo_path": "recipes/common/fit_scaler.py", "max_issues_repo_name": "kunosato-mado/ttslearn", "max_issues_repo_head_hexsha": "1230ce8d5256a7438c485a337968ce086620a88e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2021-08-16T01:13:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-20T01:18:26.000Z", "max_forks_repo_path": "recipes/common/fit_scaler.py", "max_forks_repo_name": "kunosato-mado/ttslearn", "max_forks_repo_head_hexsha": "1230ce8d5256a7438c485a337968ce086620a88e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2021-08-11T16:10:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T14:39:31.000Z", "avg_line_length": 30.9375, "max_line_length": 78, "alphanum_fraction": 0.6919191919, "include": true, "reason": "import numpy", "num_tokens": 234}
|
\paragraph{print\_level:} Output verbosity level. $\;$ \\
Sets the default verbosity level for console
output. The larger this value the more detailed
is the output. The valid range for this integer option is
$0 \le {\tt print\_level } \le 11$
and its default value is $4$.
\paragraph{print\_user\_options:} Print all options set by the user. $\;$ \\
If selected, the algorithm will print the list of
all options set by the user including their
values and whether they have been used.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: don't print options
\item yes: print options
\end{itemize}
\paragraph{print\_options\_documentation:} Switch to print all algorithmic options. $\;$ \\
If selected, the algorithm will print the list of
all available algorithmic options with some
documentation before solving the optimization
problem.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: don't print list
\item yes: print list
\end{itemize}
\paragraph{output\_file:} File name of desired output file (leave unset for no file output). $\;$ \\
NOTE: This option only works when read from the
ipopt.opt options file! An output file with this
name will be written (leave unset for no file
output). The verbosity level is by default set
to "print\_level", but can be overridden with
"file\_print\_level". The file name is changed
to use only small letters.
The default value for this string option is "".
\\
Possible values:
\begin{itemize}
\item *: Any acceptable standard file name
\end{itemize}
\paragraph{file\_print\_level:} Verbosity level for output file. $\;$ \\
NOTE: This option only works when read from the
ipopt.opt options file! Determines the verbosity
level for the file specified by "output\_file".
By default it is the same as "print\_level". The valid range for this integer option is
$0 \le {\tt file\_print\_level } \le 11$
and its default value is $4$.
\paragraph{tol:} Desired convergence tolerance (relative). $\;$ \\
Determines the convergence tolerance for the
algorithm. The algorithm terminates
successfully, if the (scaled) NLP error becomes
smaller than this value, and if the (absolute)
criteria according to "dual\_inf\_tol",
"primal\_inf\_tol", and "cmpl\_inf\_tol" are met.
(This is epsilon\_tol in Eqn. (6) in
implementation paper). See also
"acceptable\_tol" as a second termination
criterion. Note, some other algorithmic features
also use this quantity to determine thresholds
etc. The valid range for this real option is
$0 < {\tt tol } < {\tt +inf}$
and its default value is $1 \cdot 10^{-08}$.
\paragraph{max\_iter:} Maximum number of iterations. $\;$ \\
The algorithm terminates with an error message if
the number of iterations exceeded this number. The valid range for this integer option is
$0 \le {\tt max\_iter } < {\tt +inf}$
and its default value is $3000$.
\paragraph{compl\_inf\_tol:} Desired threshold for the complementarity conditions. $\;$ \\
Absolute tolerance on the complementarity.
Successful termination requires that the max-norm
of the (unscaled) complementarity is less than
this threshold. The valid range for this real option is
$0 < {\tt compl\_inf\_tol } < {\tt +inf}$
and its default value is $0.0001$.
\paragraph{dual\_inf\_tol:} Desired threshold for the dual infeasibility. $\;$ \\
Absolute tolerance on the dual infeasibility.
Successful termination requires that the max-norm
of the (unscaled) dual infeasibility is less than
this threshold. The valid range for this real option is
$0 < {\tt dual\_inf\_tol } < {\tt +inf}$
and its default value is $0.0001$.
\paragraph{constr\_viol\_tol:} Desired threshold for the constraint violation. $\;$ \\
Absolute tolerance on the constraint violation.
Successful termination requires that the max-norm
of the (unscaled) constraint violation is less
than this threshold. The valid range for this real option is
$0 < {\tt constr\_viol\_tol } < {\tt +inf}$
and its default value is $0.0001$.
\paragraph{acceptable\_tol:} "Acceptable" convergence tolerance (relative). $\;$ \\
Determines which (scaled) overall optimality
error is considered to be "acceptable." There are
two levels of termination criteria. If the usual
"desired" tolerances (see tol, dual\_inf\_tol
etc) are satisfied at an iteration, the algorithm
immediately terminates with a success message.
On the other hand, if the algorithm encounters
"acceptable\_iter" many iterations in a row that
are considered "acceptable", it will terminate
before the desired convergence tolerance is met.
This is useful in cases where the algorithm might
not be able to achieve the "desired" level of
accuracy. The valid range for this real option is
$0 < {\tt acceptable\_tol } < {\tt +inf}$
and its default value is $1 \cdot 10^{-06}$.
\paragraph{acceptable\_compl\_inf\_tol:} "Acceptance" threshold for the complementarity conditions. $\;$ \\
Absolute tolerance on the complementarity.
"Acceptable" termination requires that the
max-norm of the (unscaled) complementarity is
less than this threshold; see also
acceptable\_tol. The valid range for this real option is
$0 < {\tt acceptable\_compl\_inf\_tol } < {\tt +inf}$
and its default value is $0.01$.
\paragraph{acceptable\_constr\_viol\_tol:} "Acceptance" threshold for the constraint violation. $\;$ \\
Absolute tolerance on the constraint violation.
"Acceptable" termination requires that the
max-norm of the (unscaled) constraint violation
is less than this threshold; see also
acceptable\_tol. The valid range for this real option is
$0 < {\tt acceptable\_constr\_viol\_tol } < {\tt +inf}$
and its default value is $0.01$.
\paragraph{acceptable\_dual\_inf\_tol:} "Acceptance" threshold for the dual infeasibility. $\;$ \\
Absolute tolerance on the dual infeasibility.
"Acceptable" termination requires that the
(max-norm of the unscaled) dual infeasibility is
less than this threshold; see also
acceptable\_tol. The valid range for this real option is
$0 < {\tt acceptable\_dual\_inf\_tol } < {\tt +inf}$
and its default value is $0.01$.
\paragraph{diverging\_iterates\_tol:} Threshold for maximal value of primal iterates. $\;$ \\
If any component of the primal iterates exceeded
this value (in absolute terms), the optimization
is aborted with the exit message that the
iterates seem to be diverging. The valid range for this real option is
$0 < {\tt diverging\_iterates\_tol } < {\tt +inf}$
and its default value is $1 \cdot 10^{+20}$.
\paragraph{barrier\_tol\_factor:} Factor for mu in barrier stop test. $\;$ \\
The convergence tolerance for each barrier
problem in the monotone mode is the value of the
barrier parameter times "barrier\_tol\_factor".
This option is also used in the adaptive mu
strategy during the monotone mode. (This is
kappa\_epsilon in implementation paper). The valid range for this real option is
$0 < {\tt barrier\_tol\_factor } < {\tt +inf}$
and its default value is $10$.
\paragraph{obj\_scaling\_factor:} Scaling factor for the objective function. $\;$ \\
This option sets a scaling factor for the
objective function. The scaling is seen
internally by Ipopt but the unscaled objective is
reported in the console output. If additional
scaling parameters are computed (e.g.
user-scaling or gradient-based), both factors are
multiplied. If this value is chosen to be
negative, Ipopt will maximize the objective
function instead of minimizing it. The valid range for this real option is
${\tt -inf} < {\tt obj\_scaling\_factor } < {\tt +inf}$
and its default value is $1$.
\paragraph{nlp\_scaling\_method:} Select the technique used for scaling the NLP. $\;$ \\
Selects the technique used for scaling the
problem internally before it is solved. For
user-scaling, the parameters come from the NLP.
If you are using AMPL, they can be specified
through suffixes ("scaling\_factor")
The default value for this string option is "gradient-based".
\\
Possible values:
\begin{itemize}
\item none: no problem scaling will be performed
\item user-scaling: scaling parameters will come from the user
\item gradient-based: scale the problem so the maximum gradient at
the starting point is scaling\_max\_gradient
\end{itemize}
\paragraph{nlp\_scaling\_max\_gradient:} Maximum gradient after NLP scaling. $\;$ \\
This is the gradient scaling cut-off. If the
maximum gradient is above this value, then
gradient based scaling will be performed. Scaling
parameters are calculated to scale the maximum
gradient back to this value. (This is g\_max in
Section 3.8 of the implementation paper.) Note:
This option is only used if
"nlp\_scaling\_method" is chosen as
"gradient-based". The valid range for this real option is
$0 < {\tt nlp\_scaling\_max\_gradient } < {\tt +inf}$
and its default value is $100$.
\paragraph{bound\_relax\_factor:} Factor for initial relaxation of the bounds. $\;$ \\
Before start of the optimization, the bounds
given by the user are relaxed. This option sets
the factor for this relaxation. If it is set to
zero, then then bounds relaxation is disabled.
(See Eqn.(35) in implementation paper.) The valid range for this real option is
$0 \le {\tt bound\_relax\_factor } < {\tt +inf}$
and its default value is $1 \cdot 10^{-08}$.
\paragraph{honor\_original\_bounds:} Indicates whether final points should be projected into original bounds. $\;$ \\
Ipopt might relax the bounds during the
optimization (see, e.g., option
"bound\_relax\_factor"). This option determines
whether the final point should be projected back
into the user-provide original bounds after the
optimization.
The default value for this string option is "yes".
\\
Possible values:
\begin{itemize}
\item no: Leave final point unchanged
\item yes: Project final point back into original bounds
\end{itemize}
\paragraph{check\_derivatives\_for\_naninf:} Indicates whether it is desired to check for Nan/Inf in derivative matrices $\;$ \\
Activating this option will cause an error if an
invalid number is detected in the constraint
Jacobians or the Lagrangian Hessian. If this is
not activated, the test is skipped, and the
algorithm might proceed with invalid numbers and
fail.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: Don't check (faster).
\item yes: Check Jacobians and Hessian for Nan and Inf.
\end{itemize}
\paragraph{mu\_strategy:} Update strategy for barrier parameter. $\;$ \\
Determines which barrier parameter update
strategy is to be used.
The default value for this string option is "monotone".
\\
Possible values:
\begin{itemize}
\item monotone: use the monotone (Fiacco-McCormick) strategy
\item adaptive: use the adaptive update strategy
\end{itemize}
\paragraph{mu\_oracle:} Oracle for a new barrier parameter in the adaptive strategy. $\;$ \\
Determines how a new barrier parameter is
computed in each "free-mode" iteration of the
adaptive barrier parameter strategy. (Only
considered if "adaptive" is selected for option
"mu\_strategy").
The default value for this string option is "quality-function".
\\
Possible values:
\begin{itemize}
\item probing: Mehrotra's probing heuristic
\item loqo: LOQO's centrality rule
\item quality-function: minimize a quality function
\end{itemize}
\paragraph{quality\_function\_max\_section\_steps:} Maximum number of search steps during direct search procedure determining the optimal centering parameter. $\;$ \\
The golden section search is performed for the
quality function based mu oracle. (Only used if
option "mu\_oracle" is set to "quality-function".) The valid range for this integer option is
$0 \le {\tt quality\_function\_max\_section\_steps } < {\tt +inf}$
and its default value is $8$.
\paragraph{fixed\_mu\_oracle:} Oracle for the barrier parameter when switching to fixed mode. $\;$ \\
Determines how the first value of the barrier
parameter should be computed when switching to
the "monotone mode" in the adaptive strategy.
(Only considered if "adaptive" is selected for
option "mu\_strategy".)
The default value for this string option is "average\_compl".
\\
Possible values:
\begin{itemize}
\item probing: Mehrotra's probing heuristic
\item loqo: LOQO's centrality rule
\item quality-function: minimize a quality function
\item average\_compl: base on current average complementarity
\end{itemize}
\paragraph{mu\_init:} Initial value for the barrier parameter. $\;$ \\
This option determines the initial value for the
barrier parameter (mu). It is only relevant in
the monotone, Fiacco-McCormick version of the
algorithm. (i.e., if "mu\_strategy" is chosen as
"monotone") The valid range for this real option is
$0 < {\tt mu\_init } < {\tt +inf}$
and its default value is $0.1$.
\paragraph{mu\_max\_fact:} Factor for initialization of maximum value for barrier parameter. $\;$ \\
This option determines the upper bound on the
barrier parameter. This upper bound is computed
as the average complementarity at the initial
point times the value of this option. (Only used
if option "mu\_strategy" is chosen as "adaptive".) The valid range for this real option is
$0 < {\tt mu\_max\_fact } < {\tt +inf}$
and its default value is $1000$.
\paragraph{mu\_max:} Maximum value for barrier parameter. $\;$ \\
This option specifies an upper bound on the
barrier parameter in the adaptive mu selection
mode. If this option is set, it overwrites the
effect of mu\_max\_fact. (Only used if option
"mu\_strategy" is chosen as "adaptive".) The valid range for this real option is
$0 < {\tt mu\_max } < {\tt +inf}$
and its default value is $100000$.
\paragraph{mu\_min:} Minimum value for barrier parameter. $\;$ \\
This option specifies the lower bound on the
barrier parameter in the adaptive mu selection
mode. By default, it is set to
min("tol","compl\_inf\_tol")/("barrier\_tol\_fact-
or"+1), which should be a reasonable value. (Only
used if option "mu\_strategy" is chosen as
"adaptive".) The valid range for this real option is
$0 < {\tt mu\_min } < {\tt +inf}$
and its default value is $1 \cdot 10^{-09}$.
\paragraph{mu\_linear\_decrease\_factor:} Determines linear decrease rate of barrier parameter. $\;$ \\
For the Fiacco-McCormick update procedure the new
barrier parameter mu is obtained by taking the
minimum of mu*"mu\_linear\_decrease\_factor" and
mu\^"superlinear\_decrease\_power". (This is
kappa\_mu in implementation paper.) This option
is also used in the adaptive mu strategy during
the monotone mode. The valid range for this real option is
$0 < {\tt mu\_linear\_decrease\_factor } < 1$
and its default value is $0.2$.
\paragraph{mu\_superlinear\_decrease\_power:} Determines superlinear decrease rate of barrier parameter. $\;$ \\
For the Fiacco-McCormick update procedure the new
barrier parameter mu is obtained by taking the
minimum of mu*"mu\_linear\_decrease\_factor" and
mu\^"superlinear\_decrease\_power". (This is
theta\_mu in implementation paper.) This option
is also used in the adaptive mu strategy during
the monotone mode. The valid range for this real option is
$1 < {\tt mu\_superlinear\_decrease\_power } < 2$
and its default value is $1.5$.
\paragraph{bound\_frac:} Desired minimum relative distance from the initial point to bound. $\;$ \\
Determines how much the initial point might have
to be modified in order to be sufficiently inside
the bounds (together with "bound\_push"). (This
is kappa\_2 in Section 3.6 of implementation
paper.) The valid range for this real option is
$0 < {\tt bound\_frac } \le 0.5$
and its default value is $0.01$.
\paragraph{bound\_push:} Desired minimum absolute distance from the initial point to bound. $\;$ \\
Determines how much the initial point might have
to be modified in order to be sufficiently inside
the bounds (together with "bound\_frac"). (This
is kappa\_1 in Section 3.6 of implementation
paper.) The valid range for this real option is
$0 < {\tt bound\_push } < {\tt +inf}$
and its default value is $0.01$.
\paragraph{bound\_mult\_init\_val:} Initial value for the bound multipliers. $\;$ \\
All dual variables corresponding to bound
constraints are initialized to this value. The valid range for this real option is
$0 < {\tt bound\_mult\_init\_val } < {\tt +inf}$
and its default value is $1$.
\paragraph{constr\_mult\_init\_max:} Maximum allowed least-square guess of constraint multipliers. $\;$ \\
Determines how large the initial least-square
guesses of the constraint multipliers are allowed
to be (in max-norm). If the guess is larger than
this value, it is discarded and all constraint
multipliers are set to zero. This options is
also used when initializing the restoration
phase. By default,
"resto.constr\_mult\_init\_max" (the one used in
RestoIterateInitializer) is set to zero. The valid range for this real option is
$0 \le {\tt constr\_mult\_init\_max } < {\tt +inf}$
and its default value is $1000$.
\paragraph{bound\_mult\_init\_val:} Initial value for the bound multipliers. $\;$ \\
All dual variables corresponding to bound
constraints are initialized to this value. The valid range for this real option is
$0 < {\tt bound\_mult\_init\_val } < {\tt +inf}$
and its default value is $1$.
\paragraph{warm\_start\_init\_point:} Warm-start for initial point $\;$ \\
Indicates whether this optimization should use a
warm start initialization, where values of primal
and dual variables are given (e.g., from a
previous optimization of a related problem.)
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: do not use the warm start initialization
\item yes: use the warm start initialization
\end{itemize}
\paragraph{warm\_start\_bound\_push:} same as bound\_push for the regular initializer. $\;$ \\
The valid range for this real option is
$0 < {\tt warm\_start\_bound\_push } < {\tt +inf}$
and its default value is $0.001$.
\paragraph{warm\_start\_bound\_frac:} same as bound\_frac for the regular initializer. $\;$ \\
The valid range for this real option is
$0 < {\tt warm\_start\_bound\_frac } \le 0.5$
and its default value is $0.001$.
\paragraph{warm\_start\_mult\_bound\_push:} same as mult\_bound\_push for the regular initializer. $\;$ \\
The valid range for this real option is
$0 < {\tt warm\_start\_mult\_bound\_push } < {\tt +inf}$
and its default value is $0.001$.
\paragraph{warm\_start\_mult\_init\_max:} Maximum initial value for the equality multipliers. $\;$ \\
The valid range for this real option is
${\tt -inf} < {\tt warm\_start\_mult\_init\_max } < {\tt +inf}$
and its default value is $1 \cdot 10^{+06}$.
\paragraph{alpha\_for\_y:} Method to determine the step size for constraint multipliers. $\;$ \\
This option determines how the step size
(alpha\_y) will be calculated when updating the
constraint multipliers.
The default value for this string option is "primal".
\\
Possible values:
\begin{itemize}
\item primal: use primal step size
\item bound\_mult: use step size for the bound multipliers (good
for LPs)
\item min: use the min of primal and bound multipliers
\item max: use the max of primal and bound multipliers
\item full: take a full step of size one
\item min\_dual\_infeas: choose step size minimizing new dual
infeasibility
\item safe\_min\_dual\_infeas: like "min\_dual\_infeas", but safeguarded by
"min" and "max"
\end{itemize}
\paragraph{recalc\_y:} Tells the algorithm to recalculate the equality and inequality multipliers as least square estimates. $\;$ \\
This asks the algorithm to recompute the
multipliers, whenever the current infeasibility
is less than recalc\_y\_feas\_tol. Choosing yes
might be helpful in the quasi-Newton option.
However, each recalculation requires an extra
factorization of the linear system. If a limited
memory quasi-Newton option is chosen, this is
used by default.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: use the Newton step to update the multipliers
\item yes: use least-square multiplier estimates
\end{itemize}
\paragraph{recalc\_y\_feas\_tol:} Feasibility threshold for recomputation of multipliers. $\;$ \\
If recalc\_y is chosen and the current
infeasibility is less than this value, then the
multipliers are recomputed. The valid range for this real option is
$0 < {\tt recalc\_y\_feas\_tol } < {\tt +inf}$
and its default value is $1 \cdot 10^{-06}$.
\paragraph{max\_soc:} Maximum number of second order correction trial steps at each iteration. $\;$ \\
Choosing 0 disables the second order corrections.
(This is p\^{max} of Step A-5.9 of Algorithm A in
implementation paper.) The valid range for this integer option is
$0 \le {\tt max\_soc } < {\tt +inf}$
and its default value is $4$.
\paragraph{watchdog\_shortened\_iter\_trigger:} Number of shortened iterations that trigger the watchdog. $\;$ \\
If the number of successive iterations in which
the backtracking line search did not accept the
first trial point exceeds this number, the
watchdog procedure is activated. Choosing "0"
here disables the watchdog procedure. The valid range for this integer option is
$0 \le {\tt watchdog\_shortened\_iter\_trigger } < {\tt +inf}$
and its default value is $10$.
\paragraph{watchdog\_trial\_iter\_max:} Maximum number of watchdog iterations. $\;$ \\
This option determines the number of trial
iterations allowed before the watchdog procedure
is aborted and the algorithm returns to the
stored point. The valid range for this integer option is
$1 \le {\tt watchdog\_trial\_iter\_max } < {\tt +inf}$
and its default value is $3$.
\paragraph{expect\_infeasible\_problem:} Enable heuristics to quickly detect an infeasible problem. $\;$ \\
This options is meant to activate heuristics that
may speed up the infeasibility determination if
you expect that there is a good chance for the
problem to be infeasible. In the filter line
search procedure, the restoration phase is called
more quickly than usually, and more reduction in
the constraint violation is enforced before the
restoration phase is left. If the problem is
square, this option is enabled automatically.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: the problem probably be feasible
\item yes: the problem has a good chance to be infeasible
\end{itemize}
\paragraph{expect\_infeasible\_problem\_ctol:} Threshold for disabling "expect\_infeasible\_problem" option. $\;$ \\
If the constraint violation becomes smaller than
this threshold, the "expect\_infeasible\_problem"
heuristics in the filter line search are
disabled. If the problem is square, this options
is set to 0. The valid range for this real option is
$0 \le {\tt expect\_infeasible\_problem\_ctol } < {\tt +inf}$
and its default value is $0.001$.
\paragraph{start\_with\_resto:} Tells algorithm to switch to restoration phase in first iteration. $\;$ \\
Setting this option to "yes" forces the algorithm
to switch to the feasibility restoration phase in
the first iteration. If the initial point is
feasible, the algorithm will abort with a failure.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: don't force start in restoration phase
\item yes: force start in restoration phase
\end{itemize}
\paragraph{soft\_resto\_pderror\_reduction\_factor:} Required reduction in primal-dual error in the soft restoration phase. $\;$ \\
The soft restoration phase attempts to reduce the
primal-dual error with regular steps. If the
damped primal-dual step (damped only to satisfy
the fraction-to-the-boundary rule) is not
decreasing the primal-dual error by at least this
factor, then the regular restoration phase is
called. Choosing "0" here disables the soft
restoration phase. The valid range for this real option is
$0 \le {\tt soft\_resto\_pderror\_reduction\_factor } < {\tt +inf}$
and its default value is $0.9999$.
\paragraph{required\_infeasibility\_reduction:} Required reduction of infeasibility before leaving restoration phase. $\;$ \\
The restoration phase algorithm is performed,
until a point is found that is acceptable to the
filter and the infeasibility has been reduced by
at least the fraction given by this option. The valid range for this real option is
$0 \le {\tt required\_infeasibility\_reduction } < 1$
and its default value is $0.9$.
\paragraph{bound\_mult\_reset\_threshold:} Threshold for resetting bound multipliers after the restoration phase. $\;$ \\
After returning from the restoration phase, the
bound multipliers are updated with a Newton step
for complementarity. Here, the change in the
primal variables during the entire restoration
phase is taken to be the corresponding primal
Newton step. However, if after the update the
largest bound multiplier exceeds the threshold
specified by this option, the multipliers are all
reset to 1. The valid range for this real option is
$0 \le {\tt bound\_mult\_reset\_threshold } < {\tt +inf}$
and its default value is $1000$.
\paragraph{constr\_mult\_reset\_threshold:} Threshold for resetting equality and inequality multipliers after restoration phase. $\;$ \\
After returning from the restoration phase, the
constraint multipliers are recomputed by a least
square estimate. This option triggers when those
least-square estimates should be ignored. The valid range for this real option is
$0 \le {\tt constr\_mult\_reset\_threshold } < {\tt +inf}$
and its default value is $0$.
\paragraph{evaluate\_orig\_obj\_at\_resto\_trial:} Determines if the original objective function should be evaluated at restoration phase trial points. $\;$ \\
Setting this option to "yes" makes the
restoration phase algorithm evaluate the
objective function of the original problem at
every trial point encountered during the
restoration phase, even if this value is not
required. In this way, it is guaranteed that the
original objective function can be evaluated
without error at all accepted iterates; otherwise
the algorithm might fail at a point where the
restoration phase accepts an iterate that is good
for the restoration phase problem, but not the
original problem. On the other hand, if the
evaluation of the original objective is
expensive, this might be costly.
The default value for this string option is "yes".
\\
Possible values:
\begin{itemize}
\item no: skip evaluation
\item yes: evaluate at every trial point
\end{itemize}
\paragraph{linear\_solver:} Linear solver used for step computations. $\;$ \\
Determines which linear algebra package is to be
used for the solution of the augmented linear
system (for obtaining the search directions).
Note, the code must have been compiled with the
linear solver you want to choose. Depending on
your Ipopt installation, not all options are
available.
The default value for this string option is "ma27".
\\
Possible values:
\begin{itemize}
\item ma27: use the Harwell routine MA27
\item ma57: use the Harwell routine MA57
\item pardiso: use the Pardiso package
\item wsmp: use WSMP package
\item taucs: use TAUCS package (not yet working)
\item mumps: use MUMPS package (not yet working)
\end{itemize}
\paragraph{linear\_system\_scaling:} Method for scaling the linear system. $\;$ \\
Determines the method used to compute symmetric
scaling factors for the augmented system (see
also the "linear\_scaling\_on\_demand" option).
This scaling is independentof the NLP problem
scaling. By default, MC19 is only used if MA27
or MA57 are selected as linear solvers. This
option is only available if Ipopt has been
compiled with MC19.
The default value for this string option is "mc19".
\\
Possible values:
\begin{itemize}
\item none: no scaling will be performed
\item mc19: use the Harwell routine MC19
\end{itemize}
\paragraph{linear\_scaling\_on\_demand:} Flag indicating that linear scaling is only done if it seems required. $\;$ \\
This option is only important if a linear scaling
method (e.g., mc19) is used. If you choose "no",
then the scaling factors are computed for every
linear system from the start. This can be quite
expensive. Choosing "yes" means that the
algorithm will start the scaling method only when
the solutions to the linear system seem not good,
and then use it until the end.
The default value for this string option is "yes".
\\
Possible values:
\begin{itemize}
\item no: Always scale the linear system.
\item yes: Start using linear system scaling if solutions
seem not good.
\end{itemize}
\paragraph{max\_refinement\_steps:} Maximum number of iterative refinement steps per linear system solve. $\;$ \\
Iterative refinement (on the full unsymmetric
system) is performed for each right hand side.
This option determines the maximum number of
iterative refinement steps. The valid range for this integer option is
$0 \le {\tt max\_refinement\_steps } < {\tt +inf}$
and its default value is $10$.
\paragraph{min\_refinement\_steps:} Minimum number of iterative refinement steps per linear system solve. $\;$ \\
Iterative refinement (on the full unsymmetric
system) is performed for each right hand side.
This option determines the minimum number of
iterative refinements (i.e. at least
"min\_refinement\_steps" iterative refinement
steps are enforced per right hand side.) The valid range for this integer option is
$0 \le {\tt min\_refinement\_steps } < {\tt +inf}$
and its default value is $1$.
\paragraph{max\_hessian\_perturbation:} Maximum value of regularization parameter for handling negative curvature. $\;$ \\
In order to guarantee that the search directions
are indeed proper descent directions, Ipopt
requires that the inertia of the (augmented)
linear system for the step computation has the
correct number of negative and positive
eigenvalues. The idea is that this guides the
algorithm away from maximizers and makes Ipopt
more likely converge to first order optimal
points that are minimizers. If the inertia is not
correct, a multiple of the identity matrix is
added to the Hessian of the Lagrangian in the
augmented system. This parameter gives the
maximum value of the regularization parameter. If
a regularization of that size is not enough, the
algorithm skips this iteration and goes to the
restoration phase. (This is delta\_w\^max in the
implementation paper.) The valid range for this real option is
$0 < {\tt max\_hessian\_perturbation } < {\tt +inf}$
and its default value is $1 \cdot 10^{+20}$.
\paragraph{min\_hessian\_perturbation:} Smallest perturbation of the Hessian block. $\;$ \\
The size of the perturbation of the Hessian block
is never selected smaller than this value, unless
no perturbation is necessary. (This is
delta\_w\^min in implementation paper.) The valid range for this real option is
$0 \le {\tt min\_hessian\_perturbation } < {\tt +inf}$
and its default value is $1 \cdot 10^{-20}$.
\paragraph{first\_hessian\_perturbation:} Size of first x-s perturbation tried. $\;$ \\
The first value tried for the x-s perturbation in
the inertia correction scheme.(This is delta\_0
in the implementation paper.) The valid range for this real option is
$0 < {\tt first\_hessian\_perturbation } < {\tt +inf}$
and its default value is $0.0001$.
\paragraph{perturb\_inc\_fact\_first:} Increase factor for x-s perturbation for very first perturbation. $\;$ \\
The factor by which the perturbation is increased
when a trial value was not sufficient - this
value is used for the computation of the very
first perturbation and allows a different value
for for the first perturbation than that used for
the remaining perturbations. (This is
bar\_kappa\_w\^+ in the implementation paper.) The valid range for this real option is
$1 < {\tt perturb\_inc\_fact\_first } < {\tt +inf}$
and its default value is $100$.
\paragraph{perturb\_inc\_fact:} Increase factor for x-s perturbation. $\;$ \\
The factor by which the perturbation is increased
when a trial value was not sufficient - this
value is used for the computation of all
perturbations except for the first. (This is
kappa\_w\^+ in the implementation paper.) The valid range for this real option is
$1 < {\tt perturb\_inc\_fact } < {\tt +inf}$
and its default value is $8$.
\paragraph{perturb\_dec\_fact:} Decrease factor for x-s perturbation. $\;$ \\
The factor by which the perturbation is decreased
when a trial value is deduced from the size of
the most recent successful perturbation. (This is
kappa\_w\^- in the implementation paper.) The valid range for this real option is
$0 < {\tt perturb\_dec\_fact } < 1$
and its default value is $0.333333$.
\paragraph{jacobian\_regularization\_value:} Size of the regularization for rank-deficient constraint Jacobians. $\;$ \\
(This is bar delta\_c in the implementation
paper.) The valid range for this real option is
$0 \le {\tt jacobian\_regularization\_value } < {\tt +inf}$
and its default value is $1 \cdot 10^{-08}$.
\paragraph{hessian\_approximation:} Indicates what Hessian information is to be used. $\;$ \\
This determines which kind of information for the
Hessian of the Lagrangian function is used by the
algorithm.
The default value for this string option is "exact".
\\
Possible values:
\begin{itemize}
\item exact: Use second derivatives provided by the NLP.
\item limited-memory: Perform a limited-memory quasi-Newton
approximation
\end{itemize}
\paragraph{limited\_memory\_max\_history:} Maximum size of the history for the limited quasi-Newton Hessian approximation. $\;$ \\
This option determines the number of most recent
iterations that are taken into account for the
limited-memory quasi-Newton approximation. The valid range for this integer option is
$0 \le {\tt limited\_memory\_max\_history } < {\tt +inf}$
and its default value is $6$.
\paragraph{limited\_memory\_max\_skipping:} Threshold for successive iterations where update is skipped. $\;$ \\
If the update is skipped more than this number of
successive iterations, we quasi-Newton
approximation is reset. The valid range for this integer option is
$1 \le {\tt limited\_memory\_max\_skipping } < {\tt +inf}$
and its default value is $2$.
\paragraph{derivative\_test:} Enable derivative checker $\;$ \\
If this option is enabled, a (slow) derivative
test will be performed before the optimization.
The test is performed at the user provided
starting point and marks derivative values that
seem suspicious
The default value for this string option is "none".
\\
Possible values:
\begin{itemize}
\item none: do not perform derivative test
\item first-order: perform test of first derivatives at starting
point
\item second-order: perform test of first and second derivatives at
starting point
\end{itemize}
\paragraph{derivative\_test\_perturbation:} Size of the finite difference perturbation in derivative test. $\;$ \\
This determines the relative perturbation of the
variable entries. The valid range for this real option is
$0 < {\tt derivative\_test\_perturbation } < {\tt +inf}$
and its default value is $1 \cdot 10^{-08}$.
\paragraph{derivative\_test\_tol:} Threshold for indicating wrong derivative. $\;$ \\
If the relative deviation of the estimated
derivative from the given one is larger than this
value, the corresponding derivative is marked as
wrong. The valid range for this real option is
$0 < {\tt derivative\_test\_tol } < {\tt +inf}$
and its default value is $0.0001$.
\paragraph{derivative\_test\_print\_all:} Indicates whether information for all estimated derivatives should be printed. $\;$ \\
Determines verbosity of derivative checker.
The default value for this string option is "no".
\\
Possible values:
\begin{itemize}
\item no: Print only suspect derivatives
\item yes: Print all derivatives
\end{itemize}
\paragraph{ma27\_pivtol:} Pivot tolerance for the linear solver MA27. $\;$ \\
A smaller number pivots for sparsity, a larger
number pivots for stability. This option is only
available if Ipopt has been compiled with MA27. The valid range for this real option is
$0 < {\tt ma27\_pivtol } < 1$
and its default value is $1 \cdot 10^{-08}$.
\paragraph{ma27\_pivtolmax:} Maximum pivot tolerance for the linear solver MA27. $\;$ \\
Ipopt may increase pivtol as high as pivtolmax to
get a more accurate solution to the linear
system. This option is only available if Ipopt
has been compiled with MA27. The valid range for this real option is
$0 < {\tt ma27\_pivtolmax } < 1$
and its default value is $0.0001$.
\paragraph{ma27\_liw\_init\_factor:} Integer workspace memory for MA27. $\;$ \\
The initial integer workspace memory =
liw\_init\_factor * memory required by unfactored
system. Ipopt will increase the workspace size by
meminc\_factor if required. This option is only
available if Ipopt has been compiled with MA27. The valid range for this real option is
$1 \le {\tt ma27\_liw\_init\_factor } < {\tt +inf}$
and its default value is $5$.
\paragraph{ma27\_la\_init\_factor:} Real workspace memory for MA27. $\;$ \\
The initial real workspace memory =
la\_init\_factor * memory required by unfactored
system. Ipopt will increase the workspace size by
meminc\_factor if required. This option is only
available if Ipopt has been compiled with MA27. The valid range for this real option is
$1 \le {\tt ma27\_la\_init\_factor } < {\tt +inf}$
and its default value is $5$.
\paragraph{ma27\_meminc\_factor:} Increment factor for workspace size for MA27. $\;$ \\
If the integer or real workspace is not large
enough, Ipopt will increase its size by this
factor. This option is only available if Ipopt
has been compiled with MA27. The valid range for this real option is
$1 \le {\tt ma27\_meminc\_factor } < {\tt +inf}$
and its default value is $10$.
\paragraph{ma57\_pivtol:} Pivot tolerance for the linear solver MA57. $\;$ \\
A smaller number pivots for sparsity, a larger
number pivots for stability. This option is only
available if Ipopt has been compiled with MA57. The valid range for this real option is
$0 < {\tt ma57\_pivtol } < 1$
and its default value is $1 \cdot 10^{-08}$.
\paragraph{ma57\_pivtolmax:} Maximum pivot tolerance for the linear solver MA57. $\;$ \\
Ipopt may increase pivtol as high as
ma57\_pivtolmax to get a more accurate solution
to the linear system. This option is only
available if Ipopt has been compiled with MA57. The valid range for this real option is
$0 < {\tt ma57\_pivtolmax } < 1$
and its default value is $0.0001$.
\paragraph{ma57\_pre\_alloc:} Safety factor for work space memory allocation for the linear solver MA57. $\;$ \\
If 1 is chosen, the suggested amount of work
space is used. However, choosing a larger number
might avoid reallocation if the suggest values do
not suffice. This option is only available if
Ipopt has been compiled with MA57. The valid range for this real option is
$1 \le {\tt ma57\_pre\_alloc } < {\tt +inf}$
and its default value is $3$.
\paragraph{pardiso\_matching\_strategy:} Matching strategy to be used by Pardiso $\;$ \\
This is IPAR(13) in Pardiso manual. This option
is only available if Ipopt has been compiled with
Pardiso.
The default value for this string option is "complete+2x2".
\\
Possible values:
\begin{itemize}
\item complete: Match complete (IPAR(13)=1)
\item complete+2x2: Match complete+2x2 (IPAR(13)=2)
\item constraints: Match constraints (IPAR(13)=3)
\end{itemize}
\paragraph{pardiso\_out\_of\_core\_power:} Enables out-of-core variant of Pardiso $\;$ \\
Setting this option to a positive integer k makes
Pardiso work in the out-of-core variant where the
factor is split in 2\^k subdomains. This is
IPARM(50) in the Pardiso manual. This option is
only available if Ipopt has been compiled with
Pardiso. The valid range for this integer option is
$0 \le {\tt pardiso\_out\_of\_core\_power } < {\tt +inf}$
and its default value is $0$.
\paragraph{wsmp\_num\_threads:} Number of threads to be used in WSMP $\;$ \\
This determines on how many processors WSMP is
running on. This option is only available if
Ipopt has been compiled with WSMP. The valid range for this integer option is
$1 \le {\tt wsmp\_num\_threads } < {\tt +inf}$
and its default value is $1$.
\paragraph{wsmp\_ordering\_option:} Determines how ordering is done in WSMP $\;$ \\
This corresponds to the value of WSSMP's
IPARM(16). This option is only available if
Ipopt has been compiled with WSMP. The valid range for this integer option is
$-2 \le {\tt wsmp\_ordering\_option } \le 3$
and its default value is $1$.
\paragraph{wsmp\_pivtol:} Pivot tolerance for the linear solver WSMP. $\;$ \\
A smaller number pivots for sparsity, a larger
number pivots for stability. This option is only
available if Ipopt has been compiled with WSMP. The valid range for this real option is
$0 < {\tt wsmp\_pivtol } < 1$
and its default value is $0.0001$.
\paragraph{wsmp\_pivtolmax:} Maximum pivot tolerance for the linear solver WSMP. $\;$ \\
Ipopt may increase pivtol as high as pivtolmax to
get a more accurate solution to the linear
system. This option is only available if Ipopt
has been compiled with WSMP. The valid range for this real option is
$0 < {\tt wsmp\_pivtolmax } < 1$
and its default value is $0.1$.
\paragraph{wsmp\_scaling:} Determines how the matrix is scaled by WSMP. $\;$ \\
This corresponds to the value of WSSMP's
IPARM(10). This option is only available if Ipopt
has been compiled with WSMP. The valid range for this integer option is
$0 \le {\tt wsmp\_scaling } \le 3$
and its default value is $0$.
|
{"hexsha": "3596af91bee8f22220beeaa462be51d5a6a7e60a", "size": 41640, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Ipopt-3.2.1/Ipopt/doc/options.tex", "max_stars_repo_name": "FredericLiu/CarND-MPC-P5", "max_stars_repo_head_hexsha": "e4c68920edd0468ae73357864dde6d61bc1c4205", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Ipopt-3.2.1/Ipopt/doc/options.tex", "max_issues_repo_name": "FredericLiu/CarND-MPC-P5", "max_issues_repo_head_hexsha": "e4c68920edd0468ae73357864dde6d61bc1c4205", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Ipopt-3.2.1/Ipopt/doc/options.tex", "max_forks_repo_name": "FredericLiu/CarND-MPC-P5", "max_forks_repo_head_hexsha": "e4c68920edd0468ae73357864dde6d61bc1c4205", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4741035857, "max_line_length": 166, "alphanum_fraction": 0.754610951, "num_tokens": 10689}
|
from microsetta_public_api.repo._alpha_repo import AlphaRepo
from unittest.mock import patch, PropertyMock
from microsetta_public_api.api.diversity.alpha import (
available_metrics_alpha, get_alpha, alpha_group, exists_single,
exists_group,
available_metrics_alpha_alt,
get_alpha_alt,
alpha_group_alt,
exists_single_alt,
exists_group_alt,
)
from microsetta_public_api.config import DictElement, AlphaElement
from microsetta_public_api.exceptions import (
UnknownResource, UnknownID, IncompatibleOptions,
)
from microsetta_public_api.repo._metadata_repo import MetadataRepo
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import json
from math import sqrt
from microsetta_public_api.utils.testing import (
MockMetadataElement,
MockedJsonifyTestCase,
TrivialVisitor,
)
class AlphaDiversityImplementationTests(MockedJsonifyTestCase):
# need to choose where jsonify is being loaded from
# see https://stackoverflow.com/a/46465025
jsonify_to_patch = [
'microsetta_public_api.api.diversity.alpha.jsonify',
'microsetta_public_api.utils._utils.jsonify',
]
@classmethod
def setUpClass(cls):
cls.post_body = {'sample_ids': ['sample-foo-bar',
'sample-baz-bat']
}
def test_alpha_diveristy_available_metrics(self):
with patch('microsetta_public_api.repo._alpha_repo.AlphaRepo'
'.resources', new_callable=PropertyMock
) as mock_resources:
mock_resources.return_value = {
'faith_pd': '/some/path', 'chao1': '/some/other/path',
}
exp_metrics = ['faith_pd', 'chao1']
response, code = available_metrics_alpha()
obs = json.loads(response)
self.assertIn('alpha_metrics', obs)
self.assertListEqual(exp_metrics, obs['alpha_metrics'])
def test_alpha_diveristy_exists_single(self):
with patch('microsetta_public_api.repo._alpha_repo.AlphaRepo'
'.resources', new_callable=PropertyMock
) as mock_resources, \
patch.object(AlphaRepo, 'exists') as mock_exists:
mock_resources.return_value = {
'faith_pd': '/some/path', 'chao1': '/some/other/path',
}
mock_exists.return_value = True
response, code = exists_single(alpha_metric='faith_pd',
sample_id='sample_1')
obs = json.loads(response)
self.assertTrue(obs)
self.assertEqual(200, code)
def test_alpha_diveristy_exists_single_404(self):
with patch('microsetta_public_api.repo._alpha_repo.AlphaRepo'
'.resources', new_callable=PropertyMock
) as mock_resources, \
patch.object(AlphaRepo, 'exists') as mock_exists:
mock_resources.return_value = {
'faith_pd': '/some/path', 'chao1': '/some/other/path',
}
mock_exists.side_effect = [True]
with self.assertRaises(UnknownResource):
exists_single(alpha_metric='other-metric',
sample_id='sample_1')
def test_alpha_diveristy_exists_group(self):
with patch('microsetta_public_api.repo._alpha_repo.AlphaRepo'
'.resources', new_callable=PropertyMock
) as mock_resources, \
patch.object(AlphaRepo, 'exists') as mock_exists:
mock_resources.return_value = {
'faith_pd': '/some/path', 'chao1': '/some/other/path',
}
mock_exists.return_value = [True, False, True]
response, code = exists_group(alpha_metric='faith_pd',
body=['s1', 's2', 's3'])
obs = json.loads(response)
self.assertListEqual(obs, [True, False, True])
self.assertEqual(200, code)
def test_alpha_diveristy_exists_group_404(self):
with patch('microsetta_public_api.repo._alpha_repo.AlphaRepo'
'.resources', new_callable=PropertyMock
) as mock_resources, \
patch.object(AlphaRepo, 'exists') as mock_exists:
mock_resources.return_value = {
'faith_pd': '/some/path', 'chao1': '/some/other/path',
}
mock_exists.side_effect = [True, False, True]
with self.assertRaises(UnknownResource):
exists_group(alpha_metric='other-metric',
body=['s1', 's2', 's3'])
def test_alpha_diversity_single_sample(self):
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists:
mock_exists.return_value = [True]
mock_method.return_value = pd.Series({
'sample-foo-bar': 8.25}, name='observed_otus')
response, code = get_alpha('sample-foo-bar', 'observed_otus')
exp = {
'sample_id': 'sample-foo-bar',
'alpha_metric': 'observed_otus',
'data': 8.25,
}
obs = json.loads(response)
self.assertDictEqual(exp, obs)
self.assertEqual(code, 200)
def test_alpha_diversity_unknown_id(self):
with patch.object(AlphaRepo, 'exists') as mock_exists:
mock_exists.return_value = [False]
with self.assertRaises(UnknownID):
get_alpha('sample-foo-bar', 'observed-otus')
def test_alpha_diversity_improper_parameters(self):
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.return_value = [True, True]
mock_method.return_value = pd.Series({
'sample-foo-bar': 8.25, 'sample-baz-bat': 9.01},
name='observed_otus'
)
metric = 'observed_otus'
with self.assertRaises(IncompatibleOptions):
alpha_group(self.post_body,
alpha_metric=metric,
summary_statistics=False,
return_raw=False)
def test_alpha_diversity_group_return_raw_only(self):
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.return_value = [True, True]
mock_method.return_value = pd.Series({
'sample-foo-bar': 8.25, 'sample-baz-bat': 9.01},
name='observed_otus'
)
metric = 'observed_otus'
response, code = alpha_group(self.post_body,
alpha_metric=metric,
summary_statistics=False,
return_raw=True)
exp = {
'alpha_metric': 'observed_otus',
'alpha_diversity': {'sample-foo-bar': 8.25,
'sample-baz-bat': 9.01,
}
}
obs = json.loads(response.data)
self.assertDictEqual(exp, obs)
self.assertEqual(code, 200)
def test_alpha_diversity_group_return_raw_only_metadata_query_OR(self):
post_body = {
'sample_ids': [
'sample-foo-bar',
'sample-baz-bat',
],
'metadata_query': {
'some query': 'value'
},
'condition': "OR",
}
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo,
'available_metrics') as mock_metrics, \
patch.object(MetadataRepo,
'sample_id_matches') as mock_matches:
mock_metrics.return_value = ['observed_otus']
# first two values are used on checking requested ids, the other
# two are used for checking ids that match metadata query
mock_exists.side_effect = [True, True, True, False]
mock_method.return_value = pd.Series({
'sample-foo-bar': 8.25, 'sample-baz-bat': 9.01},
name='observed_otus'
)
mock_matches.return_value = ['sample-3', 'sample-4']
metric = 'observed_otus'
response, code = alpha_group(post_body,
alpha_metric=metric,
summary_statistics=False,
return_raw=True)
self.assertEqual(code, 200)
args, kwargs = mock_method.call_args
self.assertCountEqual(args[0], ['sample-foo-bar', 'sample-baz-bat',
'sample-3'])
def test_alpha_diversity_group_return_raw_only_metadata_query_AND(self):
post_body = {
'sample_ids': [
'sample-foo-bar',
'sample-baz-bat',
],
'metadata_query': {
'some query': 'value'
},
'condition': "AND",
}
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo,
'available_metrics') as mock_metrics, \
patch.object(MetadataRepo,
'sample_id_matches') as mock_matches:
mock_metrics.return_value = ['observed_otus']
# first two values are used on checking requested ids, the other
# two are used for checking ids that match metadata query
mock_exists.side_effect = [True, True, True, True]
mock_method.return_value = pd.Series({
'sample-foo-bar': 8.25, 'sample-baz-bat': 9.01},
name='observed_otus'
)
mock_matches.return_value = ['sample-foo-bar', 'sample-4']
metric = 'observed_otus'
response, code = alpha_group(post_body,
alpha_metric=metric,
summary_statistics=False,
return_raw=True)
self.assertEqual(code, 200)
args, kwargs = mock_method.call_args
self.assertCountEqual(args[0], ['sample-foo-bar'])
def test_alpha_diversity_group_return_raw_only_metadata_query_only(self):
post_body = {
'metadata_query': {
'some query': 'value'
},
}
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo,
'available_metrics') as mock_metrics, \
patch.object(MetadataRepo,
'sample_id_matches') as mock_matches:
mock_metrics.return_value = ['observed_otus']
# first two values are used on checking requested ids, the other
# two are used for checking ids that match metadata query
mock_exists.side_effect = [True, True, True, True]
mock_method.return_value = pd.Series({
'sample-foo-bar': 8.25, 'sample-baz-bat': 9.01},
name='observed_otus'
)
mock_matches.return_value = ['sample-foo-bar', 'sample-4']
metric = 'observed_otus'
response, code = alpha_group(post_body,
alpha_metric=metric,
summary_statistics=False,
return_raw=True)
self.assertEqual(code, 200)
args, kwargs = mock_method.call_args
self.assertCountEqual(args[0], ['sample-foo-bar', 'sample-4'])
def test_alpha_diversity_group_return_summary_and_raw(self):
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.return_value = [True, True]
mock_method.return_value = pd.Series(
{
'sample-foo-bar': 7,
'sample-baz-bat': 9.5,
'sample-qux-quux': 7.5,
'sample-4': 8,
},
name='observed_otus'
)
metric = 'observed_otus'
response, code = alpha_group(
body={
'sample_ids': [
'sample-foo-bar',
'sample-baz-bat',
'sample-qux-quux',
'sample-4',
]
},
alpha_metric=metric,
summary_statistics=True,
percentiles=[100, 0, 50, 20],
return_raw=True,
)
exp = {
'alpha_metric': 'observed_otus',
'alpha_diversity': {
'sample-foo-bar': 7,
'sample-baz-bat': 9.5,
'sample-qux-quux': 7.5,
'sample-4': 8,
},
'group_summary': {
'mean': 8,
'median': 7.75,
'std': sqrt(0.875),
'group_size': 4,
'percentile': [100, 0, 50, 20],
'percentile_values': [9.5, 7, 7.75, 7.3]
}
}
self.assertEqual(code, 200)
obs = json.loads(response.data)
self.assertCountEqual(exp.keys(), obs.keys())
self.assertCountEqual(exp['alpha_metric'], obs['alpha_metric'])
self.assertDictEqual(exp['alpha_diversity'],
obs['alpha_diversity'])
self.assertCountEqual(exp['group_summary'].keys(),
obs['group_summary'].keys()
)
gs_exp = exp['group_summary']
gs_obs = obs['group_summary']
npt.assert_array_almost_equal(gs_exp.pop('percentile'),
gs_obs.pop('percentile'))
npt.assert_array_almost_equal(gs_exp.pop('percentile_values'),
gs_obs.pop('percentile_values'))
# checks of the numerical parts of the expected and observed are
# almost the same
pdt.assert_series_equal(pd.Series(gs_exp), pd.Series(gs_obs),
check_exact=False)
def test_alpha_diversity_group_return_summary_only(self):
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.return_value = [True, True]
mock_method.return_value = pd.Series(
{
'sample-foo-bar': 7,
'sample-baz-bat': 9.5,
'sample-qux-quux': 7.5,
'sample-4': 8,
},
name='observed_otus'
)
metric = 'observed_otus'
response, code = alpha_group(
body={
'sample_ids': [
'sample-foo-bar',
'sample-baz-bat',
'sample-qux-quux',
'sample-4',
]
},
alpha_metric=metric,
summary_statistics=True,
percentiles=[100, 0, 50, 20],
return_raw=False,
)
exp = {
'alpha_metric': 'observed_otus',
'group_summary': {
'mean': 8,
'median': 7.75,
'std': sqrt(0.875),
'group_size': 4,
'percentile': [100, 0, 50, 20],
'percentile_values': [9.5, 7, 7.75, 7.3]
}
}
self.assertEqual(code, 200)
obs = json.loads(response.data)
self.assertCountEqual(exp.keys(), obs.keys())
self.assertCountEqual(exp['alpha_metric'], obs['alpha_metric'])
self.assertCountEqual(exp['group_summary'].keys(),
obs['group_summary'].keys()
)
gs_exp = exp['group_summary']
gs_obs = obs['group_summary']
npt.assert_array_almost_equal(gs_exp.pop('percentile'),
gs_obs.pop('percentile'))
npt.assert_array_almost_equal(gs_exp.pop('percentile_values'),
gs_obs.pop('percentile_values'))
# checks of the numerical parts of the expected and observed are
# almost the same
pdt.assert_series_equal(pd.Series(gs_exp), pd.Series(gs_obs),
check_exact=False)
def test_alpha_diversity_group_percentiles_none(self):
with patch.object(AlphaRepo, 'get_alpha_diversity') as mock_method, \
patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.return_value = [True, True]
mock_method.return_value = pd.Series(
{
'sample-foo-bar': 7,
'sample-baz-bat': 9.5,
'sample-qux-quux': 7.5,
'sample-4': 8,
},
name='observed_otus'
)
metric = 'observed_otus'
response, code = alpha_group(
body={
'sample_ids': [
'sample-foo-bar',
'sample-baz-bat',
'sample-qux-quux',
'sample-4',
]
},
alpha_metric=metric,
summary_statistics=True,
percentiles=None,
return_raw=False,
)
self.assertEqual(code, 200)
obs = json.loads(response.data)
self.assertIn('group_summary', obs)
summary = obs['group_summary']
self.assertIn('percentile', summary)
perc = summary['percentile']
# check default value of percentiles is returned
npt.assert_array_almost_equal(perc, list(range(10, 91, 10)))
def test_alpha_diversity_group_unknown_metric(self):
with patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['metric-a', 'metric-b']
metric = 'observed_otus'
with self.assertRaises(UnknownResource):
alpha_group(self.post_body,
alpha_metric=metric,
summary_statistics=False,
return_raw=True)
def test_alpha_diversity_group_unknown_sample(self):
# One ID not found (out of two)
with patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.side_effect = [True, False]
with self.assertRaises(UnknownID):
alpha_group(self.post_body, 'observed_otus')
# Multiple IDs do not exist
with patch.object(AlphaRepo, 'exists') as mock_exists, \
patch.object(AlphaRepo, 'available_metrics') as mock_metrics:
mock_metrics.return_value = ['observed_otus']
mock_exists.side_effect = [False, False]
with self.assertRaises(UnknownID):
alpha_group(self.post_body, 'observed_otus')
class AlphaAltTests(MockedJsonifyTestCase):
jsonify_to_patch = [
'microsetta_public_api.api.diversity.alpha.jsonify',
'microsetta_public_api.utils._utils.jsonify',
]
def setUp(self):
super().setUp()
faith_pd_values = [1, 2, 3, 4]
faith_pd_index = ['s01', 's02', 's04', 's05']
shannon_values = [7.24, 9.05, 8.25]
shannon_index = ['s01', 's02', 'sOther']
metadata = MockMetadataElement(pd.DataFrame({
'age_cat': ['30s', '40s', '50s', '30s', '30s'],
'num_var': [3, 4, 5, 6, 7],
}, index=['s01', 's02', 's04', 's05', 'sOther']))
self.resources = DictElement({
'datasets': DictElement({
'dataset1': DictElement({
'__metadata__': metadata,
'__alpha__': AlphaElement({
'faith_pd': pd.Series(faith_pd_values,
index=faith_pd_index),
'shannon': pd.Series(shannon_values,
index=shannon_index),
})
}),
'dataset2': DictElement({
'__metadata__': metadata,
}),
}),
})
self.resources.accept(TrivialVisitor())
self.res_patcher = patch(
'microsetta_public_api.api.diversity.alpha.get_resources')
self.mock_resources = self.res_patcher.start()
self.mock_resources.return_value = self.resources
def tearDown(self):
self.res_patcher.stop()
super().tearDown()
def test_availalbe_metrics_alt(self):
response, code = available_metrics_alpha_alt('dataset1')
avail = json.loads(response)
self.assertEqual(200, code)
self.assertDictEqual(avail, {'alpha_metrics': ['faith_pd', 'shannon']})
with self.assertRaises(UnknownResource):
# check for dataset that exists but has no alpha
available_metrics_alpha_alt('dataset2')
with self.assertRaises(UnknownResource):
# check for dataset that does not exist
available_metrics_alpha_alt('dataset3')
def test_get_alpha_alt(self):
faith_pd, code1 = get_alpha_alt('dataset1', 's01',
'faith_pd')
shannon, code2 = get_alpha_alt('dataset1', 's02', 'shannon')
faith_pd_value = json.loads(faith_pd)
shannon_value = json.loads(shannon)
self.assertEqual(faith_pd_value['data'], 1)
self.assertEqual(200, code1)
self.assertEqual(shannon_value['data'], 9.05)
self.assertEqual(200, code2)
def test_get_alpha_alt_404(self):
with self.assertRaisesRegex(UnknownResource, 'dataset3'):
get_alpha_alt('dataset3', 's03', 'faith_pd')
with self.assertRaisesRegex(UnknownResource, '__alpha__'):
get_alpha_alt('dataset2', 's03', 'faith_pd')
def test_alpha_group_alt(self):
request = {'sample_ids': ['s01', 's04']}
response, code = alpha_group_alt(request, 'dataset1', 'faith_pd',
return_raw=True)
self.assertEqual(code, 200)
obs = json.loads(response)
self.assertDictEqual({'s01': 1, 's04': 3},
obs['alpha_diversity'])
def test_alpha_group_alt_404_sample_id(self):
request = {'sample_ids': ['s01', 'dne']}
with self.assertRaises(UnknownID):
alpha_group_alt(request, 'dataset1', 'faith_pd',
return_raw=True)
def test_alpha_group_alt_404_metric(self):
request = {'sample_ids': ['s01', 's04']}
with self.assertRaises(UnknownResource):
alpha_group_alt(request, 'dataset1', 'bad-metric',
return_raw=True)
def test_alpha_group_alt_filter_metadata_OR(self):
post_body = {
'sample_ids': [
's04',
],
'metadata_query': {
"condition": "AND",
"rules": [
{
"id": "age_cat",
"field": "age_cat",
"operator": "equal",
"value": "30s",
},
],
},
'condition': "OR",
}
response, code = alpha_group_alt(post_body, 'dataset1', 'faith_pd',
return_raw=True)
obs = json.loads(response)
self.assertEqual(code, 200)
sample_ids = obs['alpha_diversity'].keys()
self.assertCountEqual(['s01', 's04', 's05'], sample_ids)
def test_alpha_exists_single_alt(self):
response, code = exists_single_alt('dataset1', 'faith_pd', 's01')
self.assertEqual(code, 200)
obs = json.loads(response)
self.assertTrue(obs)
response, code = exists_single_alt('dataset1', 'faith_pd', 's-dne')
self.assertEqual(code, 200)
obs = json.loads(response)
self.assertFalse(obs)
response, code = exists_single_alt('dataset1', 'shannon', 's03')
self.assertEqual(code, 200)
obs = json.loads(response)
self.assertFalse(obs)
def test_alpha_exists_single_alt_errors(self):
with self.assertRaises(UnknownResource):
exists_single_alt('dataset2', 'shannon', 's03')
with self.assertRaises(UnknownResource):
exists_single_alt('dataset1', 'dne-metric', 's03')
def test_alpha_exists_group_alt(self):
body = ['s01', 's03', 's04']
response, code = exists_group_alt(body, 'dataset1', 'faith_pd')
self.assertEqual(code, 200)
obs = json.loads(response)
self.assertListEqual(obs, [True, False, True])
def test_alpha_exists_group_alt_errors(self):
body = ['s01', 's03', 's04']
with self.assertRaises(UnknownResource):
exists_group_alt(body, 'dataset2', 'faith_pd')
with self.assertRaises(UnknownResource):
exists_group_alt(body, 'dataset1', 'dne-metric')
|
{"hexsha": "3a2597a6cf569e1173ceb5e6381af56eb2f006c5", "size": 27124, "ext": "py", "lang": "Python", "max_stars_repo_path": "microsetta_public_api/api/diversity/tests/test_alpha.py", "max_stars_repo_name": "gwarmstrong/microsetta-public-api", "max_stars_repo_head_hexsha": "53fe464aef6df13edb48a781bad6fe6f42f7251b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-18T01:27:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-27T15:49:11.000Z", "max_issues_repo_path": "microsetta_public_api/api/diversity/tests/test_alpha.py", "max_issues_repo_name": "biocore/microsetta-public-api", "max_issues_repo_head_hexsha": "1ec4c31e11127a8f480e4921b71ad36aa7d39c76", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2020-04-14T02:29:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-11T22:43:05.000Z", "max_forks_repo_path": "microsetta_public_api/api/diversity/tests/test_alpha.py", "max_forks_repo_name": "gwarmstrong/microsetta-public-api", "max_forks_repo_head_hexsha": "53fe464aef6df13edb48a781bad6fe6f42f7251b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-10T21:43:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-23T17:12:03.000Z", "avg_line_length": 41.9876160991, "max_line_length": 79, "alphanum_fraction": 0.533807698, "include": true, "reason": "import numpy", "num_tokens": 5595}
|
from tflite_runtime.interpreter import Interpreter
import pathlib
import os
import numpy as np
import count_insects_coral.init as init
from datetime import datetime
interpreter=None
height=None
width=None
input_details=None
output_details=None
def init_interpreter_tf(model_tflite_file_path):
global input_details
global output_details
global interpreter
starttime=datetime.now()
interpreter = Interpreter(
model_path=model_tflite_file_path, num_threads=None)
init.my_logs.info(f'Load interpreter model {model_tflite_file_path}')
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
endtime=datetime.now()
init.my_logs.info(f'Initialization time {(endtime-starttime).microseconds} microseconds ')
def interpreter_evaluation_tf(image):
""" Image 240x240"""
global input_details
global output_details
global interpreter
starttime=datetime.now()
scale, zero_point = input_details[0]['quantization']
input_data= (np.float32(image) / 255.0) / (scale*1.0) + (zero_point*1.0)
#input_data =np.array((np.float32(image)/255)).astype(input_details[0]["dtype"])
input_data = np.expand_dims(input_data, axis=0).astype(input_details[0]["dtype"])
input_data = np.expand_dims(input_data, axis=3).astype(input_details[0]["dtype"])
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data1 = interpreter.get_tensor(output_details[0]['index'])
scale, zero_point = output_details[0]['quantization']
output_data = ((scale*1.0) * (output_data1 - zero_point*1.0))
predictions=np.floor(np.array(output_data).item(0))
predictions_round=int(np.around(np.array(output_data).item(0)))
endtime=datetime.now()
init.my_logs.info(f'Interpreter time {(endtime-starttime).microseconds} microseconds')
print(f'output_data1 {output_data1}')
print(f'output_data {output_data}')
print(f'predictions {predictions}')
print(f'predictions_round {predictions_round}')
return predictions_round
|
{"hexsha": "6377fcf79fa906d528474cc8bcb3940c02258cad", "size": 2145, "ext": "py", "lang": "Python", "max_stars_repo_path": "Coral_mini_dev/count_insects_coral/interpreter_tf.py", "max_stars_repo_name": "Gsarant/Edge-computing", "max_stars_repo_head_hexsha": "cc54da3e7cc35d7956cbef3dc8402e5331ec646e", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Coral_mini_dev/count_insects_coral/interpreter_tf.py", "max_issues_repo_name": "Gsarant/Edge-computing", "max_issues_repo_head_hexsha": "cc54da3e7cc35d7956cbef3dc8402e5331ec646e", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Coral_mini_dev/count_insects_coral/interpreter_tf.py", "max_forks_repo_name": "Gsarant/Edge-computing", "max_forks_repo_head_hexsha": "cc54da3e7cc35d7956cbef3dc8402e5331ec646e", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0869565217, "max_line_length": 92, "alphanum_fraction": 0.765034965, "include": true, "reason": "import numpy", "num_tokens": 515}
|
function tests = test_spm_dcm_fmri_check
% Unit Tests for spm_dcm_fmri_check
%__________________________________________________________________________
% Copyright (C) 2016 Wellcome Trust Centre for Neuroimaging
% $Id: test_spm_dcm_fmri_check.m 6790 2016-04-28 14:30:27Z guillaume $
tests = functiontests(localfunctions);
% -------------------------------------------------------------------------
function test_DCM(testCase)
% Simply tests the function doesn't crash with one DCM
import matlab.unittest.constraints.*
data_path = get_data_path();
dcm_file = fullfile(data_path,'DCM_s1_m1.mat');
DCM = spm_dcm_fmri_check(dcm_file, true);
testCase.fatalAssertThat(DCM, HasField('diagnostics'));
testCase.verifyThat(DCM.diagnostics, HasElementCount(3));
% -------------------------------------------------------------------------
function test_GCM(testCase)
% Simply tests the function doesn't crash with a GCM array
import matlab.unittest.constraints.*
data_path = get_data_path();
gcm_file = fullfile(data_path,'GCM_simulated.mat');
GCM = spm_dcm_fmri_check(gcm_file, true);
testCase.verifyThat(GCM, IsOfClass('cell'));
testCase.fatalAssertThat(GCM{1}, HasField('diagnostics'));
testCase.verifyThat(GCM{1}.diagnostics, HasElementCount(3));
% -------------------------------------------------------------------------
function data_path = get_data_path()
data_path = fullfile( spm('Dir'), 'tests', ...
'data', 'fMRI', 'simulated_2region', 'models');
|
{"author": "spm", "repo": "spm12", "sha": "3085dac00ac804adb190a7e82c6ef11866c8af02", "save_path": "github-repos/MATLAB/spm-spm12", "path": "github-repos/MATLAB/spm-spm12/spm12-3085dac00ac804adb190a7e82c6ef11866c8af02/tests/test_spm_dcm_fmri_check.m"}
|
"""Perform inference/compression on a pre-trained mean-scale hyperprior model.
Implement iterative inference with STE (A2 in Table 1 of paper), in
Yibo Yang, Robert Bamler, Stephan Mandt:
"Improving Inference for Neural Image Compression", NeurIPS 2020
https://arxiv.org/pdf/2006.04240.pdf
"""
import os
import numpy as np
import tensorflow.compat.v1 as tf
from absl import app
from tensorflow_compression.python.ops import math_ops
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
import tensorflow_compression as tfc
from nn_models import AnalysisTransform, SynthesisTransform, HyperAnalysisTransform
from nn_models import MBT2018HyperSynthesisTransform as HyperSynthesisTransform
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64
likelihood_lowerbound = 1e-9
variance_upperbound = 2e1
from configs import save_opt_record
stop_early = True
def compress(args):
"""Compresses an image, or a batch of images of the same shape in npy format."""
from configs import get_eval_batch_size
if args.input_file.endswith('.npy'):
# .npy file should contain N images of the same shapes, in the form of an array of shape [N, H, W, 3]
X = np.load(args.input_file)
else:
# Load input image and add batch dimension.
from PIL import Image
x = np.asarray(Image.open(args.input_file).convert('RGB'))
X = x[None, ...]
num_images = int(X.shape[0])
img_num_pixels = int(np.prod(X.shape[1:-1]))
X = X.astype('float32')
X /= 255.
eval_batch_size = get_eval_batch_size(img_num_pixels)
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset = dataset.batch(batch_size=eval_batch_size)
# https://www.tensorflow.org/api_docs/python/tf/compat/v1/data/Iterator
# Importantly, each sess.run(op) call will consume a new batch, where op is any operation that depends on
# x. Therefore if multiple ops need to be evaluated on the same batch of data, they have to be grouped like
# sess.run([op1, op2, ...]).
# x = dataset.make_one_shot_iterator().get_next()
x_next = dataset.make_one_shot_iterator().get_next()
x_ph = x = tf.placeholder('float32', (None, *X.shape[1:])) # keep a reference around for feed_dict
#### BEGIN build compression graph ####
# Instantiate model.
analysis_transform = AnalysisTransform(args.num_filters)
synthesis_transform = SynthesisTransform(args.num_filters)
hyper_analysis_transform = HyperAnalysisTransform(args.num_filters)
hyper_synthesis_transform = HyperSynthesisTransform(args.num_filters, num_output_filters=2 * args.num_filters)
entropy_bottleneck = tfc.EntropyBottleneck()
# Initial values for optimization
y_init = analysis_transform(x)
z_init = hyper_analysis_transform(y_init)
y = tf.placeholder('float32', y_init.shape)
from utils import round_with_identity_STE as round_with_STE
y_tilde = round_with_STE(y)
x_tilde = synthesis_transform(y_tilde)
x_shape = tf.shape(x)
x_tilde = x_tilde[:, :x_shape[1], :x_shape[2], :] # crop reconstruction to have the same shape as input
# # sample z_tilde from q(z_tilde|x) = q(z_tilde|h_a(g_a(x))), and compute the pdf of z_tilde under the flexible prior
# # p(z_tilde) ("z_likelihoods")
# z_tilde, z_likelihoods = entropy_bottleneck(z, training=training)
z = tf.placeholder('float32', z_init.shape)
z_tilde = round_with_STE(z)
_ = entropy_bottleneck(z, training=False) # dummy call to ensure entropy_bottleneck is properly built
z_likelihoods = entropy_bottleneck._likelihood(z_tilde) # p(\tilde z)
if entropy_bottleneck.likelihood_bound > 0:
likelihood_bound = entropy_bottleneck.likelihood_bound
z_likelihoods = math_ops.lower_bound(z_likelihoods, likelihood_bound)
# compute parameters of p(y_tilde|z_tilde)
mu, sigma = tf.split(hyper_synthesis_transform(z_tilde), num_or_size_splits=2, axis=-1)
sigma = tf.exp(sigma) # make positive
# need to handle images with non-standard sizes during compression; mu/sigma must have the same shape as y
y_shape = tf.shape(y_tilde)
mu = mu[:, :y_shape[1], :y_shape[2], :]
sigma = sigma[:, :y_shape[1], :y_shape[2], :]
scale_table = np.exp(np.linspace(np.log(SCALES_MIN), np.log(SCALES_MAX), SCALES_LEVELS))
conditional_bottleneck = tfc.GaussianConditional(sigma, scale_table, mean=mu)
# compute the pdf of y_tilde under the conditional prior/entropy model p(y_tilde|z_tilde)
# = N(y_tilde|mu, sigma^2) conv U(-0.5, 0.5)
y_likelihoods = conditional_bottleneck._likelihood(y_tilde) # p(\tilde y | \tilde z)
if conditional_bottleneck.likelihood_bound > 0:
likelihood_bound = conditional_bottleneck.likelihood_bound
y_likelihoods = math_ops.lower_bound(y_likelihoods, likelihood_bound)
#### END build compression graph ####
# graph = build_graph(args, x, training=False)
# Total number of bits divided by number of pixels.
# - log p(\tilde y | \tilde z) - log p(\tilde z) - - log q(\tilde z | \tilde y)
axes_except_batch = list(range(1, len(x.shape))) # should be [1,2,3]
y_bpp = tf.reduce_sum(-tf.log(y_likelihoods), axis=axes_except_batch) / (np.log(2) * img_num_pixels)
z_bpp = tf.reduce_sum(-tf.log(z_likelihoods), axis=axes_except_batch) / (np.log(2) * img_num_pixels)
eval_bpp = y_bpp + z_bpp # shape (N,)
train_bpp = tf.reduce_mean(eval_bpp)
# Mean squared error across pixels.
train_mse = tf.reduce_mean(tf.squared_difference(x, x_tilde))
# Multiply by 255^2 to correct for rescaling.
# float_train_mse = train_mse
# psnr = - 10 * (tf.log(float_train_mse) / np.log(10)) # float MSE computed on float images
train_mse *= 255 ** 2
# The rate-distortion cost.
if args.lmbda < 0:
args.lmbda = float(args.runname.split('lmbda=')[1].split('-')[0]) # re-use the lmbda as used for training
print('Defaulting lmbda (mse coefficient) to %g as used in model training.' % args.lmbda)
if args.lmbda > 0:
rd_loss = args.lmbda * train_mse + train_bpp
else:
rd_loss = train_bpp
rd_gradients = tf.gradients(rd_loss, [y, z])
# Bring both images back to 0..255 range, for evaluation only.
x *= 255
x_tilde = tf.clip_by_value(x_tilde, 0, 1)
x_tilde = tf.round(x_tilde * 255)
mse = tf.reduce_mean(tf.squared_difference(x, x_tilde), axis=axes_except_batch) # shape (N,)
psnr = tf.image.psnr(x_tilde, x, 255) # shape (N,)
msssim = tf.image.ssim_multiscale(x_tilde, x, 255) # shape (N,)
msssim_db = -10 * tf.log(1 - msssim) / np.log(10) # shape (N,)
with tf.Session() as sess:
# Load the latest model checkpoint, get compression stats
save_dir = os.path.join(args.checkpoint_dir, args.runname)
latest = tf.train.latest_checkpoint(checkpoint_dir=save_dir)
tf.train.Saver().restore(sess, save_path=latest)
eval_fields = ['mse', 'psnr', 'msssim', 'msssim_db', 'est_bpp', 'est_y_bpp', 'est_z_bpp']
eval_tensors = [mse, psnr, msssim, msssim_db, eval_bpp, y_bpp, z_bpp]
all_results_arrs = {key: [] for key in eval_fields} # append across all batches
log_itv = 100
if save_opt_record or stop_early:
log_itv = 10
rd_lr = 0.0001
rd_opt_its = 2000
from adam import Adam
batch_idx = 0
while True:
try:
x_val = sess.run(x_next)
x_feed_dict = {x_ph: x_val}
# 1. Perform R-D optimization conditioned on ground truth x
print('----RD Optimization----')
y_cur, z_cur = sess.run([y_init, z_init], feed_dict=x_feed_dict) # np arrays
adam_optimizer = Adam(lr=rd_lr)
if stop_early:
obj_prev = np.inf
y_prev, z_prev = None, None
opt_record = {'its': [], 'rd_loss': [], 'rd_loss_after_rounding': []}
for it in range(rd_opt_its):
grads, obj, mse_, train_bpp_, psnr_ = sess.run([rd_gradients, rd_loss, train_mse, train_bpp, psnr],
feed_dict={y: y_cur, z: z_cur, **x_feed_dict})
y_cur, z_cur = adam_optimizer.update([y_cur, z_cur], grads)
if it % log_itv == 0 or it + 1 == rd_opt_its:
psnr_ = psnr_.mean()
print('it=%d, rd_loss=%.4f mse=%.3f bpp=%.4f psnr=%.4f' % (it, obj, mse_, train_bpp_, psnr_))
if stop_early:
if obj >= obj_prev: # no longer improving
y_cur, z_cur = y_prev, z_prev
break
else:
obj_prev = obj
y_prev, z_prev = y_cur, z_cur
opt_record['its'].append(it)
opt_record['rd_loss'].append(obj)
opt_record['rd_loss_after_rounding'].append(obj)
print()
y_tilde_cur = np.round(y_cur) # this is the latents we end up transmitting
z_tilde_cur = np.round(z_cur)
# If requested, transform the quantized image back and measure performance.
eval_arrs = sess.run(eval_tensors,
feed_dict={y_tilde: y_tilde_cur, z_tilde: z_tilde_cur, **x_feed_dict})
for field, arr in zip(eval_fields, eval_arrs):
all_results_arrs[field] += arr.tolist()
batch_idx += 1
except tf.errors.OutOfRangeError:
break
for field in eval_fields:
all_results_arrs[field] = np.asarray(all_results_arrs[field])
input_file = os.path.basename(args.input_file)
results_dict = all_results_arrs
trained_script_name = args.runname.split('-')[0]
script_name = os.path.splitext(os.path.basename(__file__))[0] # current script name, without extension
# save RD evaluation results
prefix = 'rd'
save_file = '%s-%s-input=%s.npz' % (prefix, args.runname, input_file)
if script_name != trained_script_name:
save_file = '%s-%s-lmbda=%g+%s-input=%s.npz' % (
prefix, script_name, args.lmbda, args.runname, input_file)
np.savez(os.path.join(args.results_dir, save_file), **results_dict)
if save_opt_record:
# save optimization record
prefix = 'opt'
save_file = '%s-%s-input=%s.npz' % (prefix, args.runname, input_file)
if script_name != trained_script_name:
save_file = '%s-%s-lmbda=%g+%s-input=%s.npz' % (
prefix, script_name, args.lmbda, args.runname, input_file)
np.savez(os.path.join(args.results_dir, save_file), **opt_record)
for field in eval_fields:
arr = all_results_arrs[field]
print('Avg {}: {:0.4f}'.format(field, arr.mean()))
from tf_boilerplate import parse_args
def main(args):
# Invoke subcommand.
assert args.command == "compress", 'Only compression is supported.'
compress(args)
if __name__ == "__main__":
app.run(main, flags_parser=parse_args)
|
{"hexsha": "92ed6287badd45c4574bbcb00b43283570c44a5c", "size": 11326, "ext": "py", "lang": "Python", "max_stars_repo_path": "ste.py", "max_stars_repo_name": "mdong151/improving-inference-for-neural-image-compression", "max_stars_repo_head_hexsha": "8b876ff84e1d075d8058cb23314e71166fc25074", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2020-10-09T13:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T21:12:57.000Z", "max_issues_repo_path": "ste.py", "max_issues_repo_name": "mdong151/improving-inference-for-neural-image-compression", "max_issues_repo_head_hexsha": "8b876ff84e1d075d8058cb23314e71166fc25074", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-12T01:49:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-09T13:55:17.000Z", "max_forks_repo_path": "ste.py", "max_forks_repo_name": "mdong151/improving-inference-for-neural-image-compression", "max_forks_repo_head_hexsha": "8b876ff84e1d075d8058cb23314e71166fc25074", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-10-21T22:18:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-08T22:27:47.000Z", "avg_line_length": 44.2421875, "max_line_length": 122, "alphanum_fraction": 0.6398552004, "include": true, "reason": "import numpy", "num_tokens": 2947}
|
import numpy as np
from collections import defaultdict
from agents.policy.montage_workflow_policy_factory import MontageWorkflowPolicyFactory
import sys
class MonteCarlo:
@staticmethod
def mc_prediction(policy, env, num_episodes, discount_factor=1.0):
"""
Monte Carlo prediction algorithm. Calculates the value function
for a given policy using sampling.
Args:
policy: A function that maps an observation to action probabilities.
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
discount_factor: Gamma discount factor.
Returns:
A dictionary that maps from state -> value.
The state is a tuple and the value is a float.
"""
# Keeps track of sum and count of returns for each state
# to calculate an average. We could use an array to save all
# returns (like in the book) but that's memory inefficient.
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# The publication value function
V = defaultdict(float)
for i_episode in range(1, num_episodes + 1):
# Print out which episode we're on, useful for debugging.
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# Generate an episode.
# An episode is an array of (state, action, reward) tuples
episode = []
state = env.reset()
for t in range(100):
action = policy(state)
next_state, reward, done, _ = env.step(action)
episode.append((state, action, reward))
if done:
break
state = next_state
# Find all states the we've visited in this episode
# We convert each state to a tuple so that we can use it as a dict key
states_in_episode = set([tuple(x[0]) for x in episode])
for state in states_in_episode:
# Find the first occurance of the state in the episode
first_occurence_idx = next(i for i, x in enumerate(episode) if x[0] == state)
# Sum up all rewards since the first occurance
G = sum([x[2] * (discount_factor ** i) for i, x in enumerate(episode[first_occurence_idx:])])
# Calculate average return for this state over all sampled episodes
returns_sum[state] += G
returns_count[state] += 1.0
V[state] = returns_sum[state] / returns_count[state]
return V
@staticmethod
def mc_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1):
"""
Monte Carlo Control using Epsilon-Greedy policies.
Finds an optimal epsilon-greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
discount_factor: Gamma discount factor.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities
"""
# Keeps track of sum and count of returns for each state
# to calculate an average. We could use an array to save all
# returns (like in the book) but that's memory inefficient.
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# The publication action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# The policy we're following
policy = MontageWorkflowPolicyFactory().make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
# policy = MontageWorkflowPolicyFactory().create_random_policy(env.action_space.n)
for i_episode in range(1, num_episodes + 1):
# Print out which episode we're on, useful for debugging.
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# Generate an episode.
# An episode is an array of (state, action, reward) tuples
episode = []
state = env.reset()
for t in range(100):
probs = policy(state)
action = np.random.choice(np.arange(len(probs)), p=probs)
next_state, reward, done, _ = env.step(action)
episode.append((state, action, reward))
# print("Episode: %s" % i_episode)
# env.render()
# print("Policy: %s" % probs)
# print("Reward: %s \n\n" % reward)
if done:
break
# Find all (state, action) pairs we've visited in this episode
# We convert each state to a tuple so that we can use it as a dict key
sa_in_episode = set([(tuple(x[0]), x[1]) for x in episode])
for state, action in sa_in_episode:
sa_pair = (state, action)
# Find the first occurance of the (state, action) pair in the episode
first_occurence_idx = next(i for i, x in enumerate(episode) if x[0] == state and x[1] == action)
# Sum up all rewards since the first occurance
G = sum([x[2] * (discount_factor ** i) for i, x in enumerate(episode[first_occurence_idx:])])
# Calculate average return for this state over all sampled episodes
returns_sum[sa_pair] += G
returns_count[sa_pair] += 1.0
Q[state][action] = returns_sum[sa_pair] / returns_count[sa_pair]
# The policy is improved implicitly by changing the Q dictionary
# print(returns_count)
return Q, policy
@staticmethod
def mc_control_importance_sampling(env, num_episodes, behavior_policy, discount_factor=1.0):
"""
Monte Carlo Control Off-Policy Control using Weighted Importance Sampling.
Finds an optimal greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
behavior_policy: The behavior to follow while generating episodes.
A function that given an observation returns a vector of probabilities for each action.
discount_factor: Gamma discount factor.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities. This is the optimal greedy policy.
"""
# The publication action-value function.
# A dictionary that maps state -> action values
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# The cumulative denominator of the weighted importance sampling formula
# (across all episodes)
C = defaultdict(lambda: np.zeros(env.action_space.n))
# Our greedily policy we want to learn
target_policy = MontageWorkflowPolicyFactory().create_greedy_policy(Q)
for i_episode in range(1, num_episodes + 1):
# Print out which episode we're on, useful for debugging.
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# Generate an episode.
# An episode is an array of (state, action, reward) tuples
episode = []
state = env.reset()
for t in range(100):
# Sample an action from our policy
probs = behavior_policy(state)
action = np.random.choice(np.arange(len(probs)), p=probs)
next_state, reward, done, _ = env.step(action)
episode.append((state, action, reward))
if done:
break
state = next_state
# Sum of discounted returns
G = 0.0
# The importance sampling ratio (the weights of the returns)
W = 1.0
# For each step in the episode, backwards
for t in range(len(episode))[::-1]:
state, action, reward = episode[t]
# Update the total reward since step t
G = discount_factor * G + reward
# Update weighted importance sampling formula denominator
C[state][action] += W
# Update the action-value function using the incremental update formula (5.7)
# This also improves our target policy which holds a reference to Q
Q[state][action] += (W / C[state][action]) * (G - Q[state][action])
# If the action taken by the behavior policy is not the action
# taken by the target policy the probability will be 0 and we can break
if action != np.argmax(target_policy(state)):
break
W = W * 1. / behavior_policy(state)[action]
return Q, target_policy
|
{"hexsha": "16c318229ce2763982d5a4fb125b05c6b7611936", "size": 7920, "ext": "py", "lang": "Python", "max_stars_repo_path": "agents/strategy/monte_carlo.py", "max_stars_repo_name": "rayson1223/gym-workflow", "max_stars_repo_head_hexsha": "877b3f17951b9a85ef10b83e7d70a09edc07fd2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "agents/strategy/monte_carlo.py", "max_issues_repo_name": "rayson1223/gym-workflow", "max_issues_repo_head_hexsha": "877b3f17951b9a85ef10b83e7d70a09edc07fd2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agents/strategy/monte_carlo.py", "max_forks_repo_name": "rayson1223/gym-workflow", "max_forks_repo_head_hexsha": "877b3f17951b9a85ef10b83e7d70a09edc07fd2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.358490566, "max_line_length": 100, "alphanum_fraction": 0.7032828283, "include": true, "reason": "import numpy", "num_tokens": 2049}
|
% Default to the notebook output style
% Inherit from the specified cell style.
\documentclass[11pt]{article}
\usepackage[T1]{fontenc}
% Nicer default font (+ math font) than Computer Modern for most use cases
\usepackage{mathpazo}
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts  syntax from Markdown).
\usepackage{graphicx}
% We will generate all images so they have a width \maxwidth. This means
% that they will get their normal width if they fit onto the page, but
% are scaled down if they would overflow the margins.
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth
\else\Gin@nat@width\fi}
\makeatother
\let\Oldincludegraphics\includegraphics
% Set max figure width to be 80% of text width, for now hardcoded.
\renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}}
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
\DeclareCaptionLabelFormat{nolabel}{}
\captionsetup{labelformat=nolabel}
\usepackage{adjustbox} % Used to constrain images to a maximum size
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
\usepackage{mathrsfs}
% Colors for the hyperref package
\definecolor{urlcolor}{rgb}{0,.145,.698}
\definecolor{linkcolor}{rgb}{.71,0.21,0.01}
\definecolor{citecolor}{rgb}{.12,.54,.11}
% ANSI colors
\definecolor{ansi-black}{HTML}{3E424D}
\definecolor{ansi-black-intense}{HTML}{282C36}
\definecolor{ansi-red}{HTML}{E75C58}
\definecolor{ansi-red-intense}{HTML}{B22B31}
\definecolor{ansi-green}{HTML}{00A250}
\definecolor{ansi-green-intense}{HTML}{007427}
\definecolor{ansi-yellow}{HTML}{DDB62B}
\definecolor{ansi-yellow-intense}{HTML}{B27D12}
\definecolor{ansi-blue}{HTML}{208FFB}
\definecolor{ansi-blue-intense}{HTML}{0065CA}
\definecolor{ansi-magenta}{HTML}{D160C4}
\definecolor{ansi-magenta-intense}{HTML}{A03196}
\definecolor{ansi-cyan}{HTML}{60C6C8}
\definecolor{ansi-cyan-intense}{HTML}{258F8F}
\definecolor{ansi-white}{HTML}{C5C1B4}
\definecolor{ansi-white-intense}{HTML}{A1A6B2}
\definecolor{ansi-default-inverse-fg}{HTML}{FFFFFF}
\definecolor{ansi-default-inverse-bg}{HTML}{000000}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Additional commands for more recent versions of Pandoc
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatibility definitions
\def\gt{>}
\def\lt{<}
\let\Oldtex\TeX
\let\Oldlatex\LaTeX
\renewcommand{\TeX}{\textrm{\Oldtex}}
\renewcommand{\LaTeX}{\textrm{\Oldlatex}}
% Document parameters
% Document title
\title{Fractals \& Rendering Techniques}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% Exact colors from NB
\definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
\definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=urlcolor,
linkcolor=linkcolor,
citecolor=citecolor,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\newpage
\tableofcontents
\newpage
\hypertarget{introduction}{%
\section{Introduction}\label{introduction}}
This document is an attempt at explaining different types of fractals
and techniques to create beautiful renderings.
First we start by explaining what shaders are and how you can program
them in GLSL. Then we will do a quick refresh on the math that is
required for complex numbers. If you think that math is not your thing,
the functions to do the complex arithmetic in GLSL are implemented in
this document and you can simply copy them. However, after the
Mandelbrot section there is no more escaping from the math, and basic
knowledge of algebra, vectors, and matrices will help tremendously.
Armed with the knowledge of basic GLSL programming and complex numbers,
we will start to render the Mandelbrot set. The core ideas of rendering
fractals are explained here. When we have a rendering of the set, we
will explore a technique for coloring. The coloring technique is
developed by \emph{Iniqo Quilez}, and I think it is so simple and
powerful that any programmer can benefit from it. To complete the
rendering program, we will also implement supersampling. With
supersampling we are `measuring' multiple points in the pixel and
average them out to get the final color. This gives an anti-aliasing
effect, where jagged edges become more smooth (less pixelized look).
\hypertarget{shaders-and-glsl}{%
\section{Shaders and GLSL}\label{shaders-and-glsl}}
https://thebookofshaders.com/01/
\hypertarget{complex-numbers}{%
\section{Complex Numbers}\label{complex-numbers}}
A complex number \(z\) is defined as a a number with two components,
indicating that they are two dimensional. An example is \(z=3+2i\),
which can be generalized to \(z=a + bi\). In a complex number, \(a\) is
called the \emph{real} component, and \(b\) is called the
\emph{imaginary} component. Like normal arithmetic, the rules for
addition and multiplication have been defined for complex numbers.
However, for complex numbers the arithmetic is differently from what you
are used to do with real numbers. How to do the arithmetic with complex
numbers is explained in a later section. Another notation that is used
in complex numbers is \(\textrm{Re}(z)\) to refer to the real component,
and \(\textrm{Im}(z)\) to refer to the imaginary component. This means
that a complex numbers can also be written as
\(z=\textrm{Re}(z) + i\ \textrm{Im}(z)\).
\begin{quote}
Open a Python REPL, and define a complex number with
\texttt{complex(a,\ b)}. This is an easy way to play with complex
arithmetic.
\end{quote}
\hypertarget{geometric-interpretation-of-complex-numbers}{%
\subsection{Geometric Interpretation of Complex
Numbers}\label{geometric-interpretation-of-complex-numbers}}
Another way to think about complex numbers, is with \(x\) and \(y\)
coordinates. In this idea the real component is the \(x\) value, and the
imaginary component is the \(y\) value. For example, \(z=3+2i\) can be
plotted on the complex plane:
\begin{figure}[h]
\centering
\includegraphics{img/complex-plane1.png}
\caption{\(3+2i\) in the complex plane}
\end{figure}
This gives us a nice way to think about complex numbers. Namely, a
complex number can be thought of as a point in the \(xy\)-plane.
\hypertarget{rules-of-complex-arithmetic}{%
\subsection{Rules of Complex
Arithmetic}\label{rules-of-complex-arithmetic}}
Complex arithmetic is defined a bit differently from what you are used
to do with real numbers, because they are two dimensional. If have have
two complex numbers \(a + bi\) and \(c + di\), then:
\begin{itemize}
\tightlist
\item
Addition: \((a + bi) + (c + di) = a + c + i(b + d)\).
\begin{itemize}
\tightlist
\item
Example: \((3 + 2i) + (1 - i) = 4 + i\).
\end{itemize}
\item
Multiplication:
\((a + bi)(c + di) = ac + adi + bci + bdi^2 = ac - bd + i(ad + bc)\),
notice that \(i^2 = -1\), thus \(bdi^2 = - bd\).
\begin{itemize}
\tightlist
\item
Example: \((3 + 2i)(1 - i) = 3 - 3i + 2i - 2i^2 = 5 - i\).
\end{itemize}
\item
Exponentiation:
\((a + bi)^2 = a^2 + 2abi + b^2i^2 = a^2 - b^2 + 2abi\).
\begin{itemize}
\tightlist
\item
Example: \((3 + 2i)^2 = 9 + 6i + 6i + 4i^2 = 5 + 12i\).
\end{itemize}
\end{itemize}
\begin{quote}
Open a Python REPL and verify the examples to get a feel for it. It's
easier than you think.
\end{quote}
\hypertarget{complex-arithmetic-in-glsl}{%
\subsection{Complex Arithmetic in
GLSL}\label{complex-arithmetic-in-glsl}}
In GLSL we represent complex numbers with a vector of two components. If
we can encode the complex arithmetic in terms of matrix operations, the
calculations can be accelerated by the hardware of the GPU. In this
section, we will derive these matrix operations. For example, if we have
two complex numbers \(a + bi\) and \(c + di\), we can write this the
vectors \(u = [a, b]\) and \(v = [c, d]\).
\hypertarget{addition}{%
\subsubsection{Addition}\label{addition}}
The first case we will consider is addition. It can be achieved
trivially with vector addition. Both components will be added to each
other. Thus to add them the operation simply is \(u+v\).
\hypertarget{multiplication}{%
\subsubsection{Multiplication}\label{multiplication}}
The second case is multiplication. First we create a matrix
\(A = \begin{bmatrix} a && b \\ -b && a \end{bmatrix}\) from the vector
\(u\). For the complex multiplication of \(u\cdot v\), we replace \(u\)
with the matrix \(A\). When we work out the matrix multiplication:
\[ A\cdot v = \begin{bmatrix} a && b \\ -b && a \end{bmatrix} \cdot \begin{bmatrix}c \\ d \end{bmatrix} = \begin{bmatrix} ac-bd \\ ad+bc \end{bmatrix},\]
the result is the same as the multiplication rule:
\((a + bi)(c + di) = ac - bd + i(ad + bc)\).
\hypertarget{exponentiation}{%
\subsubsection{Exponentiation}\label{exponentiation}}
The last case is exponentation. The case \(u\cdot u\) is a simplified
version of the multiplication case, since we can just multiply the
complex number by itself. Here we replace the first \(u\) with \(A\),
and work out \(A\cdot u\) to get:
\[ A\cdot u = \begin{bmatrix} a && b \\ -b && a \end{bmatrix} \cdot \begin{bmatrix}a \\ b \end{bmatrix} = \begin{bmatrix} a^2-b^2 \\ 2ab \end{bmatrix},\]
which is the same as the exponentiation rule:
\((a + bi)^2 = a^2 - b^2 + 2abi\).
\hypertarget{implementation}{%
\subsubsection{Implementation}\label{implementation}}
The implementation in GLSL of the operations follows easily from the
formulas.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{u + v }\CommentTok{// addition u+v (vector addition)}
\DataTypeTok{mat2}\NormalTok{(u, -u.}\FunctionTok{y}\NormalTok{, u.}\FunctionTok{x}\NormalTok{) * v }\CommentTok{// multiplication u*v (matrix multiplication)}
\DataTypeTok{mat2}\NormalTok{(u, -u.}\FunctionTok{y}\NormalTok{, u.}\FunctionTok{x}\NormalTok{) * u }\CommentTok{// exponentiation u^2 (matrix multiplication)}
\end{Highlighting}
\end{Shaded}
\hypertarget{the-mandelbrot-set}{%
\newpage
\section{The Mandelbrot Set}\label{the-mandelbrot-set}}
Pick a complex number \(c\), for example \(c = 0.3 + 0.1i\), and
\(z = 0 + 0i\). For the first iteration we calculate \(z^2 + c\), which
is \((0 + 0i)^2 + 0.3 + 0.1i = 0.3 + 0.1i\). For the second iteration we
set \(z = 0.3+0.1i\), and then we find that \(z^2 + c = 0.38 + 0.16i\).
If we repeat this proces and plot the points we get after each
iteration:
\begin{figure}[h]
\centering
\includegraphics{img/orbit-converging.png}
\caption{Converging orbit}
\end{figure}
As we can see the points are spiraling inwards to a point. The path that
the complex number takes when iterating is called the \emph{orbit}. It
is clear that the orbit is \emph{converging} towards a point. The other
case is that the orbit quickly grows after each iteration and soon
shoots off to infinity.
\begin{figure}[h]
\centering
\includegraphics{img/orbit-diverging.png}
\caption{Diverging orbit}
\end{figure}
If the point shoots to infinity, the point is not in the Mandelbrot set.
We can check this while iterating and it happens when \(|z| > B\). Based
on how fast the point shoots to infinity, and how many iterations we are
doing, a color is determined. We divide the iterations \(i\) by the
number of iterations \(N\), to get a value between \([0, 1]\). This
scalar value is used to map to a color. If the point stayed within the
bounds \(B\) during all iterations, it is in the Mandelbrot set and we
color it black.
The proces we described above is perfectly suited for the GPU. Each
pixel on the screen, can be mapped, as an example to the range of
\([-2, 2]\) on the complex plane, and then we follow the orbit of that
complex number when we iterate with the Mandelbrot formula.
The Mandelbrot set is given with:
\[ z_{n+1} = z_n^2 + c \]
Clasically we pick a bound \(B=2\) which is the disk that contains the
Mandelbrot set. To remove this disc, we increase \(B\) to \(4\). On each
iteration we calculate \(z = z^2 + c\), and check if \(|z|\) is greater
than \(B\), meaning that the orbit of \(z\) has escaped to infinity.
This happens when \(\sqrt{a^2 + b^2} > B\), which we can square to get
\(a^2 + b^2 > B^2\). The next thing we can do is to rewrite
\(a^2 + b^2\) as the dot product \(z\cdot z\). This leads to the
following equation that we can use to check if \(z\) has escaped to
infinity: \(z\cdot z > B^2\). Rewriting it algebraically removes the
necessity of the square root operation, which improves the speed of the
algorithm.
The following GLSL code is a simple and fast implementation for
rendering the Mandelbrot set.
\begin{Shaded}
\begin{Highlighting}[]
\PreprocessorTok{#define N 64.}
\PreprocessorTok{#define B 4.}
\DataTypeTok{void} \FunctionTok{mainImage}\NormalTok{( }\DataTypeTok{out} \DataTypeTok{vec4}\NormalTok{ fragColor, }\DataTypeTok{in} \DataTypeTok{vec2}\NormalTok{ fragCoord ) \{}
\DataTypeTok{vec2}\NormalTok{ R = iResolution.}\FunctionTok{xy}\NormalTok{;}
\DataTypeTok{vec2}\NormalTok{ uv = (}\FloatTok{2.}\NormalTok{ * fragCoord - R - }\FloatTok{1.}\NormalTok{) / R.}\FunctionTok{y}\NormalTok{;}
\DataTypeTok{vec2}\NormalTok{ z = }\DataTypeTok{vec2}\NormalTok{(}\DecValTok{0}\NormalTok{), c = uv;}
\DataTypeTok{float}\NormalTok{ i;}
\KeywordTok{for}\NormalTok{(i=}\FloatTok{0.}\NormalTok{; i < N; i++) \{}
\NormalTok{ z = }\DataTypeTok{mat2}\NormalTok{(z, -z.}\FunctionTok{y}\NormalTok{, z.}\FunctionTok{x}\NormalTok{) * z + c;}
\KeywordTok{if}\NormalTok{(}\BuiltInTok{dot}\NormalTok{(z, z) > B*B) }\KeywordTok{break}\NormalTok{;}
\NormalTok{ \}}
\KeywordTok{if}\NormalTok{(i==N) \{ i = }\FloatTok{0.}\NormalTok{; \} }\CommentTok{// mark interior black}
\NormalTok{ fragColor = }\DataTypeTok{vec4}\NormalTok{(}\DataTypeTok{vec3}\NormalTok{(i/N), }\FloatTok{1.}\NormalTok{);}
\NormalTok{\}}
\end{Highlighting}
\end{Shaded}
It will render the following image:
\begin{figure}[h]
\centering
\includegraphics{img/mandelbrot-first.png}
\caption{Simple Mandelbrot}
\end{figure}
The code can be refactored a little bit by moving iteration into a new
function \texttt{iterate(float\ p)}. We can also scale the image by
multiplying \texttt{uv} by \(1.2\). Finally, to center the image we
subtract \([0.4, 0.0]\) from \texttt{uv}. This results in the final code
for this section:
\begin{Shaded}
\begin{Highlighting}[]
\PreprocessorTok{#define N 64.}
\PreprocessorTok{#define B 4.}
\DataTypeTok{float} \FunctionTok{iterate}\NormalTok{(}\DataTypeTok{vec2}\NormalTok{ p) \{}
\DataTypeTok{vec2}\NormalTok{ z = }\DataTypeTok{vec2}\NormalTok{(}\DecValTok{0}\NormalTok{), c = p;}
\DataTypeTok{float}\NormalTok{ i;}
\KeywordTok{for}\NormalTok{(i=}\FloatTok{0.}\NormalTok{; i < N; i++) \{}
\NormalTok{ z = }\DataTypeTok{mat2}\NormalTok{(z, -z.}\FunctionTok{y}\NormalTok{, z.}\FunctionTok{x}\NormalTok{) * z + c;}
\KeywordTok{if}\NormalTok{(}\BuiltInTok{dot}\NormalTok{(z, z) > B*B) }\KeywordTok{break}\NormalTok{;}
\NormalTok{ \}}
\KeywordTok{return}\NormalTok{ i; }
\NormalTok{\}}
\DataTypeTok{void} \FunctionTok{mainImage}\NormalTok{( }\DataTypeTok{out} \DataTypeTok{vec4}\NormalTok{ fragColor, }\DataTypeTok{in} \DataTypeTok{vec2}\NormalTok{ fragCoord ) \{}
\DataTypeTok{vec2}\NormalTok{ R = iResolution.}\FunctionTok{xy}\NormalTok{;}
\DataTypeTok{vec2}\NormalTok{ uv = }\FloatTok{1.2}\NormalTok{ * (}\FloatTok{2.}\NormalTok{ * fragCoord - R - }\FloatTok{1.}\NormalTok{) / R.}\FunctionTok{y}\NormalTok{ - }\DataTypeTok{vec2}\NormalTok{(.}\FunctionTok{4}\NormalTok{, }\FloatTok{0.}\NormalTok{);}
\DataTypeTok{float}\NormalTok{ n = }\FunctionTok{iterate}\NormalTok{(uv) / N;}
\KeywordTok{if}\NormalTok{(n==}\FloatTok{1.}\NormalTok{) n = }\FloatTok{0.}\NormalTok{;}
\NormalTok{ fragColor = }\DataTypeTok{vec4}\NormalTok{(}\DataTypeTok{vec3}\NormalTok{(n), }\FloatTok{1.0}\NormalTok{);}
\NormalTok{\}}
\end{Highlighting}
\end{Shaded}
The end result should look like this:
\begin{figure}[h]
\centering
\includegraphics{img/mandelbrot-centered.png}
\caption{Centered Mandelbrot}
\end{figure}
So far we have created the Mandelbrot itself, but the white coloring is
quite boring. In the next section we will explore a technique which
allows us to create color gradients with a surprisingly simple formula.
\hypertarget{colorful-palettes}{%
\section{Colorful Palettes}\label{colorful-palettes}}
When we calculated the Mandelbrot set, we apply colorization based on
the value of \(n\), which has been normalized between \([0,1]\). The
idea in this chapter is to develop a function with a parameter \(t\)
ranging between \([0,1]\), that returns a color from a gradient. The
gradient can be composed out of many different colors, also called a
\emph{palette}.
\hypertarget{procedural-color-palette}{%
\subsection{Procedural Color Palette}\label{procedural-color-palette}}
A simple way to create a procedural color palette has been created by
\emph{Inoqo Quilez}
(https://iquilezles.org/www/articles/palettes/palettes.htm), it is the
following cosine expression:
\[ \textrm{color}(t) = a + b \cdot \cos [2\pi(c\cdot t+d)] \]
As \(t\) runs from \(0\) to \(1\), the cosine oscillates \(c\) times
with a phase of \(d\). The result is scaled and biased by \(a\) and
\(b\) to meet the desired contrast and brightness. The parameters
\(a, b, c\) and \(d\) are vectors with three components (r, g, b). We
can also think of \(a\) as the \emph{offset}, \(b\) as the
\emph{amplitude}, \(c\) as the \emph{frequency}, and \(d\) as the
\emph{phase}, for each r, g, b component respectively.
For example, if we pick values for \(a, b, c\) and \(d\):
\[ a = \begin{bmatrix} 0.65 \\ 0.5 \\ 0.31 \end{bmatrix} \quad b = \begin{bmatrix} -0.65 \\ 0.5 \\ 0.6 \end{bmatrix} \quad c = \begin{bmatrix} 0.333 \\ 0.278 \\ 0.278 \end{bmatrix} \quad d = \begin{bmatrix} 0.66 \\ 0 \\ 0.667 \end{bmatrix} \]
we can create a plot with the
\href{http://dev.thi.ng/gradients/}{\emph{cosine gradient generator}}.
This gives a nice visualization of what is going on and how this
procedural color palette works.
\begin{figure}[h]
\centering
\includegraphics{img/color-palette.png}
\caption{Color palette}
\end{figure}
We can see that each of the r, g, b components sits on a cosine wave and
they are mixed together with sliding \(t\) between \([0,1]\). This set
of values gives a nice yellow, green and blueish gradient.
\hypertarget{gradient-examples}{%
\subsection{Gradient Examples}\label{gradient-examples}}
This section contains a table with different palettes that can be used
as a color map.
\begin{longtable}[]{@{}lllll@{}}
\toprule
\begin{minipage}[b]{0.17\columnwidth}\raggedright
a\strut
\end{minipage} & \begin{minipage}[b]{0.17\columnwidth}\raggedright
b\strut
\end{minipage} & \begin{minipage}[b]{0.17\columnwidth}\raggedright
c\strut
\end{minipage} & \begin{minipage}[b]{0.17\columnwidth}\raggedright
d\strut
\end{minipage} & \begin{minipage}[b]{0.17\columnwidth}\raggedright
palette\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ \ 0.5\ \ 0.33{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ \ 0.5\ \ 0.66{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette1.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.938\ 0.328\ 0.718{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.659\ 0.438\ 0.328{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.388\ 0.388\ 0.296{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}2.538\ 2.478\ 0.168{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette2.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.66\ 0.56\ 0.68{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.718\ 0.438\ 0.72\ {]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.52\ 0.8\ \ 0.52{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}-0.43\ \ -0.397\ -0.083{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette3.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.8\ 0.8\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ 0.2\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette4.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.821\ 0.328\ 0.242{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.659\ 0.481\ 0.896{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.612\ 0.34\ \ 0.296{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}\ 2.82\ \ \ 3.026\ -0.273{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette5.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}1.\ 1.\ 1.{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ \ 0.33\ 0.67{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette6.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}1.\ 1.\ 1.{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ 0.1\ 0.2{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette7.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}1.\ 1.\ 1.{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.3\ 0.2\ 0.2{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette8.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}1.\ 1.\ 1.{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.8\ 0.9\ 0.3{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette9.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}1.\ \ 0.7\ 0.4{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ \ 0.15\ 0.2\ {]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette10.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}2.\ 1.\ 0.{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ \ 0.2\ \ 0.25{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette11.png}\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.5\ 0.5\ 0.5{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}2.\ 1.\ 1.{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\texttt{{[}0.\ \ \ 0.25\ 0.25{]}}\strut
\end{minipage} & \begin{minipage}[t]{0.17\columnwidth}\raggedright
\includegraphics{palettes/palette12.png}\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
This is a list of palettes have been created by
http://dev.thi.ng/gradients, and \emph{Iniqo Quilez}. More gradients can
be created with http://dev.thi.ng/gradients.
\hypertarget{implementation}{%
\subsection{Implementation}\label{implementation}}
The code follows easily from the formula described above:
\begin{Shaded}
\begin{Highlighting}[]
\DataTypeTok{vec3} \FunctionTok{palette}\NormalTok{( }\DataTypeTok{in} \DataTypeTok{float}\NormalTok{ t, }\DataTypeTok{in} \DataTypeTok{vec3}\NormalTok{ a, }\DataTypeTok{in} \DataTypeTok{vec3}\NormalTok{ b, }\DataTypeTok{in} \DataTypeTok{vec3}\NormalTok{ c, }\DataTypeTok{in} \DataTypeTok{vec3}\NormalTok{ d )}
\NormalTok{\{}
\KeywordTok{return}\NormalTok{ a + b*}\BuiltInTok{cos}\NormalTok{( }\FloatTok{6.28318}\NormalTok{*(c*t+d) );}
\NormalTok{\}}
\end{Highlighting}
\end{Shaded}
\hypertarget{smooth-iteration-count}{%
\section{Smooth Iteration Count}\label{smooth-iteration-count}}
As you probably have noticed, the change in color goes in discrete
steps, which creates the color bands. This happens because \(n\), the
number of iterations, is an integer. These discrete steps of changes in
colors results in the rendering of the bands. To solve this problem,
\href{https://iquilezles.org/www/articles/mset_smooth/mset_smooth.htm}{\emph{Iniqo
Quilez} explains a method} that determines the fractional part of \(n\).
We subtract the fractional part from \(n\) to get \(sn\), which is
smooth.
The fractional part of the smooth iteration count can be calculated
with:
\[ sn = n - \dfrac{\ln \dfrac{\ln |z_n|}{\ln B}}{\ln d} \]
where \(B\) is the threshold when \(|z|\) has escaped, and \(d\) is the
degree of the polynomial under iteration. In the case where \(d=2\),
such as in the Mandelbrot set, an optimized variant is available:
\[ sn = n - \log_2\log_2(z_n^2)+k \]
Implementing the non-optimized version can be done by having the line in
the \texttt{iterate} function:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{return}\NormalTok{ i;}
\end{Highlighting}
\end{Shaded}
perform the formula we described above, which in GLSL is:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{return}\NormalTok{ i - }\BuiltInTok{log}\NormalTok{(}\BuiltInTok{log}\NormalTok{(}\BuiltInTok{dot}\NormalTok{(z, z)) / }\BuiltInTok{log}\NormalTok{(B)) / }\BuiltInTok{log}\NormalTok{(}\FloatTok{2.}\NormalTok{); }
\end{Highlighting}
\end{Shaded}
The next image is a rendering of a comparison between both methods. The
smooth iteration count can be seen in the top part of the rendering, and
the integer iteration count in the bottom half.
\begin{figure}[h]
\centering
\includegraphics{img/smooth-iteration-count.png}
\caption{Mandelbrot Smooth Iteration Count}
\end{figure}
\hypertarget{supersampling}{%
\section{Supersampling}\label{supersampling}}
\hypertarget{burning-ship-fractal}{%
\section{Burning Ship Fractal}\label{burning-ship-fractal}}
\hypertarget{julia-sets}{%
\section{Julia Sets}\label{julia-sets}}
\hypertarget{animation}{%
\section{Animation}\label{animation}}
\hypertarget{rotation-over-time}{%
\subsection{Rotation over time}\label{rotation-over-time}}
\hypertarget{zoom-over-time}{%
\subsection{Zoom over time}\label{zoom-over-time}}
\hypertarget{polynomials}{%
\section{Polynomials}\label{polynomials}}
\begin{figure}[h]
\centering
\includegraphics{img/poly1.png}
\caption{Polynomial - Spiral}
\end{figure}
\hypertarget{distance-rendering}{%
\section{Distance Rendering}\label{distance-rendering}}
https://iquilezles.org/www/articles/distancefractals/distancefractals.htm
\hypertarget{geometric-orbit-traps}{%
\section{Geometric Orbit Traps}\label{geometric-orbit-traps}}
https://iquilezles.org/www/articles/ftrapsgeometric/ftrapsgeometric.htm
\hypertarget{iterated-functions-systems}{%
\section{Iterated Functions Systems}\label{iterated-functions-systems}}
https://iquilezles.org/www/articles/ifsfractals/ifsfractals.htm
\hypertarget{references}{%
\section{References}\label{references}}
% Add a bibliography block to the postdoc
\end{document}
|
{"hexsha": "05b18c2f303af2d20edce196bf8fb7941c4607cf", "size": 42376, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/Fractals & Rendering Techniques.tex", "max_stars_repo_name": "darkeclipz/fractals", "max_stars_repo_head_hexsha": "8647eea9b3c4a63bfeea30a98e9f2edf15bf9587", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-29T21:13:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-29T21:13:31.000Z", "max_issues_repo_path": "paper/Fractals & Rendering Techniques.tex", "max_issues_repo_name": "darkeclipz/fractals", "max_issues_repo_head_hexsha": "8647eea9b3c4a63bfeea30a98e9f2edf15bf9587", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/Fractals & Rendering Techniques.tex", "max_forks_repo_name": "darkeclipz/fractals", "max_forks_repo_head_hexsha": "8647eea9b3c4a63bfeea30a98e9f2edf15bf9587", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-18T06:33:52.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-18T06:33:52.000Z", "avg_line_length": 45.5655913978, "max_line_length": 310, "alphanum_fraction": 0.693057391, "num_tokens": 15647}
|
import numpy as np
def chol_params_to_lower_triangular_matrix(params):
dim = number_of_triangular_elements_to_dimension(len(params))
mat = np.zeros((dim, dim))
mat[np.tril_indices(dim)] = params
return mat
def cov_params_to_matrix(cov_params):
"""Build covariance matrix from 1d array with its lower triangular elements.
Args:
cov_params (np.array): 1d array with the lower triangular elements of a
covariance matrix (in C-order)
Returns:
cov (np.array): a covariance matrix
"""
lower = chol_params_to_lower_triangular_matrix(cov_params)
cov = lower + np.tril(lower, k=-1).T
return cov
def cov_matrix_to_params(cov):
return cov[np.tril_indices(len(cov))]
def sdcorr_params_to_sds_and_corr(sdcorr_params):
dim = number_of_triangular_elements_to_dimension(len(sdcorr_params))
sds = np.array(sdcorr_params[:dim])
corr = np.eye(dim)
corr[np.tril_indices(dim, k=-1)] = sdcorr_params[dim:]
corr += np.tril(corr, k=-1).T
return sds, corr
def sds_and_corr_to_cov(sds, corr):
diag = np.diag(sds)
return diag @ corr @ diag
def cov_to_sds_and_corr(cov):
sds = np.sqrt(np.diagonal(cov))
diag = np.diag(1 / sds)
corr = diag @ cov @ diag
return sds, corr
def sdcorr_params_to_matrix(sdcorr_params):
"""Build covariance matrix out of variances and correlations.
Args:
sdcorr_params (np.array): 1d array with parameters. The dimensions of the
covariance matrix are inferred automatically. The first dim parameters
are assumed to be the variances. The remainder are the lower triangular
elements (excluding the diagonal) of a correlation matrix.
Returns:
cov (np.array): a covariance matrix
"""
return sds_and_corr_to_cov(*sdcorr_params_to_sds_and_corr(sdcorr_params))
def cov_matrix_to_sdcorr_params(cov):
dim = len(cov)
sds, corr = cov_to_sds_and_corr(cov)
correlations = corr[np.tril_indices(dim, k=-1)]
return np.hstack([sds, correlations])
def number_of_triangular_elements_to_dimension(num):
"""Calculate the dimension of a square matrix from number of triangular elements.
Args:
num (int): The number of upper or lower triangular elements in the matrix.
Examples:
>>> number_of_triangular_elements_to_dimension(6)
3
>>> number_of_triangular_elements_to_dimension(10)
4
"""
return int(np.sqrt(8 * num + 1) / 2 - 0.5)
def dimension_to_number_of_triangular_elements(dim):
"""Calculate number of triangular elements from the dimension of a square matrix.
Args:
dim (int): Dimension of a square matrix.
"""
return int(dim * (dim + 1) / 2)
|
{"hexsha": "931dc61adcf2f07b5c05db01f88a0774d368b115", "size": 2741, "ext": "py", "lang": "Python", "max_stars_repo_path": "utilities.py", "max_stars_repo_name": "janosg/derivatives", "max_stars_repo_head_hexsha": "ee4640baa273093a04ef6bd7a482ba485b753bd2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utilities.py", "max_issues_repo_name": "janosg/derivatives", "max_issues_repo_head_hexsha": "ee4640baa273093a04ef6bd7a482ba485b753bd2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utilities.py", "max_forks_repo_name": "janosg/derivatives", "max_forks_repo_head_hexsha": "ee4640baa273093a04ef6bd7a482ba485b753bd2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.797752809, "max_line_length": 85, "alphanum_fraction": 0.692812842, "include": true, "reason": "import numpy", "num_tokens": 678}
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from music21 import midi
import pypianoroll
from pypianoroll import Multitrack
from texttable import Texttable
import os
from pprint import pprint
def play_midi(input_midi):
'''Takes path to an input and plays the midi file in the notebook cell
:param input_midi: Path to midi file
:return:
'''
midi_object = midi.MidiFile()
midi_object.open(input_midi)
midi_object.read()
midi_object.close()
show_midi = midi.translate.midiFileToStream(midi_object)
show_midi.show('midi')
def find_files_by_extensions(root, exts=[]):
def _has_ext(name):
if not exts:
return True
name = name.lower()
for ext in exts:
if name.endswith(ext):
return True
return False
for path, _, files in os.walk(root):
for name in files:
if _has_ext(name):
yield os.path.join(path, name)
def print_sample_array(split, parent_dir="data/jsb_chorales_numpy"):
"""
Prints a randomly sampled numpy array from the parent_dir
"""
midi_files = [
os.path.join(parent_dir, split, midi)
for midi in os.listdir(os.path.join(parent_dir, split))
]
selection = np.random.choice(midi_files)
pprint(np.load(selection))
|
{"hexsha": "56e076cdb4c2f4ca2c73d1f9e7653861b9dd741d", "size": 1416, "ext": "py", "lang": "Python", "max_stars_repo_path": "transformer-xl/utils/midi_utils.py", "max_stars_repo_name": "froggie901/aws-deepcomposer-samples", "max_stars_repo_head_hexsha": "142b98b130efbb4ed91f22b54919d71877146c73", "max_stars_repo_licenses": ["MIT", "MIT-0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-07T13:28:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T13:28:17.000Z", "max_issues_repo_path": "notebooks/AWS DeepComposer/transformer-xl/utils/midi_utils.py", "max_issues_repo_name": "jesussantana/AWS-Machine-Learning-Foundations", "max_issues_repo_head_hexsha": "526eddb486fe8398cafcc30184c4ecce49df5816", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/AWS DeepComposer/transformer-xl/utils/midi_utils.py", "max_forks_repo_name": "jesussantana/AWS-Machine-Learning-Foundations", "max_forks_repo_head_hexsha": "526eddb486fe8398cafcc30184c4ecce49df5816", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-27T13:01:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T21:36:49.000Z", "avg_line_length": 28.32, "max_line_length": 74, "alphanum_fraction": 0.6574858757, "include": true, "reason": "import numpy", "num_tokens": 335}
|
% This files adds a coastline from an existing data set
global coastline faults mainfault main
report_this_filefun(mfilename('fullpath'));
%aa = a;
[file1,path1] = uigetfile( '*.mat',' Earthquake Datafile'); %disabled window position
loadpath = [path1 file1];
new_data = load(loadpath);
loaded=false;
if isfield(new_data,'coastline')
if ~isempty(new_data.coastline)
loaded=true;
coastline=new_data.coastline;
end
end
if isfield(new_data,'faults')
if ~isempty(new_data.faults)
loaded=true;
faults=new_data.faults;
end
end
if isfield(new_data,'mainfault')
if ~isempty(new_data.mainfault)
loaded=true;
mainfault=new_data.mainfault;
end
end
if isfield(new_data,'main')
if ~isempty(new_data.main)
loaded=true;
main=new_data.main;
end
end
if ~loaded
disp('Error lodaing data! Are they in the right *.mat format??')
end
whos
%a = aa;
%clear aa
update(mainmap())
|
{"author": "CelsoReyes", "repo": "zmap7", "sha": "3895fcb3ca3073608abe22ca71960eb082fd0d9a", "save_path": "github-repos/MATLAB/CelsoReyes-zmap7", "path": "github-repos/MATLAB/CelsoReyes-zmap7/zmap7-3895fcb3ca3073608abe22ca71960eb082fd0d9a/zmap_deprecated/addcoast.m"}
|
import numpy as np
import pandas as pa
import time
from sklearn.metrics import pairwise_distances
from scipy.sparse import csr_matrix
class Kmeans:
def __init__(self,data,k,geneNames,cellNames,cluster_label=None,seed=None):
self.data=data
self.k=k
self.geneNames=geneNames
self.cellNames=cellNames
self.seed=seed
self.centroids=None
self.cluster_assignment=None
self.cluster_label=cluster_label
self.heterogeneity=0.0
self.get_initial_centroids()
self.heterogeneities=None
def getCentroids(self):
return self.centroids
def getCluster_assignment(self):
return self.cluster_assignment
def getHeterogenity(self):
return self.heterogeneity
def getHetrogenities(self):
return self.heterogeneities
def get_initial_centroids(self):
'''Randomly choose k data points as initial centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
n = self.data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, self.k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids
def smart_initialize(self):
'''Use k-means++ to initialize a good set of centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
centroids = np.zeros((self.k, self.data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(self.data.shape[0])
centroids[0] = self.data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(self.data, centroids[0:1], metric='euclidean').flatten()**2
for i in range(1, self.k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(self.data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = self.data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(self.data, centroids[0:i+1], metric='euclidean')**2,axis=1)
self.centroids=centroids
return centroids
def assign_clusters(self):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = pairwise_distances(self.data,self.centroids,metric='euclidean')
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.apply_along_axis(np.argmin, 1, distances_from_centroids)
self.cluster_assignment=cluster_assignment
return cluster_assignment
def revise_centroids(self):
new_centroids = []
for i in range(self.k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = self.data[self.cluster_assignment==i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = centroid.A1
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
self.centroids=new_centroids
return new_centroids
def kmeans(self, maxiter, record_heterogeneity=None, verbose=False):
'''This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration'''
centroids = self.centroids[:]
prev_cluster_assignment = None
for itr in range(int(maxiter)):
if verbose:
print(itr)
# 1. Make cluster assignments using nearest centroids
# YOUR CODE HERE
cluster_assignment = self.assign_clusters()
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# YOUR CODE HERE
centroids = self.revise_centroids()
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and \
(prev_cluster_assignment==self.cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment!=self.cluster_assignment)
if verbose:
print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(self.data, self.k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
self.centroids=centroids
self.cluster_assignment=cluster_assignment
return centroids, cluster_assignment
def kmeans_multiple_runs(self, maxiter, num_runs, seed_list=None, verbose=False):
heterogeneity = {}
min_heterogeneity_achieved = float('inf')
best_seed = None
final_centroids = None
final_cluster_assignment = None
for i in range(num_runs):
# Use UTC time if no seeds are provided
if seed_list is not None:
seed = seed_list[i]
np.random.seed(seed)
else:
seed = int(time.time())
np.random.seed(seed)
# Use k-means++ initialization
self.initial_centroids = self.smart_initialize()
# Run k-means
centroids, cluster_assignment = self.kmeans(maxiter, record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = self.compute_heterogeneity()
if verbose:
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
# if current measurement of heterogeneity is lower than previously seen,
# update the minimum record of heterogeneity.
if heterogeneity[seed] < min_heterogeneity_achieved:
min_heterogeneity_achieved = heterogeneity[seed]
best_seed = seed
final_centroids = centroids
final_cluster_assignment = cluster_assignment
self.centroids=final_centroids
self.cluster_assignment=final_cluster_assignment
self.heterogeneities=heterogeneity
return final_centroids, final_cluster_assignment
def clusterEvaluation(self):
clustMaxDist={}
clustMinDist={}
clustMeanDist={}
for i in range(self.k):
binMaxDist=[]
binMinDist=[]
binMeanDist=[]
for j in np.concatenate(np.argwhere(self.cluster_assignment==i)):
dist=pairwise_distances(self.data[np.concatenate(np.argwhere(self.cluster_assignment==i))], self.data[j], metric='euclidean').flatten()
dist=dist**2
binMaxDist.append(np.max(dist))
binMinDist.append(np.min(dist))
binMeanDist.append(np.mean(dist))
clustMaxDist[i]=np.max(binMaxDist)
clustMinDist[i]=np.min(binMinDist)
clustMeanDist[i]=np.mean(binMeanDist)
plt.figure(figsize=(7,4.5))
plt.plot(clustMaxDist.keys(),clustMaxDist.values(), linewidth=2, label='Maximum distance among clusters')
plt.plot(clustMaxDist.keys(),clustMinDist.values(), linewidth=2, label='Minimum distance among clusters')
plt.plot(clustMaxDist.keys(),clustMeanDist.values(), linewidth=2, label='avarage distance among clusters')
plt.xlabel('Cluster number')
plt.ylabel('Eculidean distance')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.show()
return np.sum(clustMeanDist)
def compute_heterogeneity(self):
heterogeneity = 0.0
for i in range(self.k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = self.data[self.cluster_assignment==i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(member_data_points, [self.centroids[i]], metric='euclidean')
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
self.heterogeneity=heterogeneity
return heterogeneity
def plot_k_vs_heterogeneity(self, k_values, heterogeneity_values):
plt.figure(figsize=(7,4))
plt.plot(k_values, heterogeneity_values, linewidth=4)
plt.xlabel('K')
plt.ylabel('Heterogeneity')
plt.title('K vs. Heterogeneity')
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
plt.show()
return None
def get_cluster_data(self, cluster_number):
return self.data[np.in1d(np.array(self.cluster_assignment), cluster_number),:], self.cellNames[np.in1d(np.array(self.cluster_assignment), cluster_number)]
def select_K(self):
cluster_centroids={}
cluster_assignments={}
hetroginity_score=float('inf')
delta_k={}
max_K_value=self.k
hetro_Per_K={}
deltaHetro=None
for i in range(max_K_value):
self.k=i+1
print("going for k=", i+1)
cluster_centroid, cluster_assignment=self.kmeans_multiple_runs(5,100)
hetro=self.compute_heterogeneity()
hetro_Per_K[i+1]=hetro
if hetro<hetroginity_score:
if hetroginity_score==float('inf'):
hetroginity_score=hetro
deltaHetro=0
else:
deltaHetro=hetroginity_score-hetro
hetroginity_score=hetro
cluster_centroids[i+1]=cluster_centroid
cluster_assignments[i+1]=cluster_assignment
delta_k[i+1]=deltaHetro
best_k=sum(delta_k.values()[1:]>sum(delta_k.values())/(2*len(delta_k.values())))
print("best k value:", best_k, delta_k)
self.centroids=cluster_centroids[best_k]
self.cluster_assignment=cluster_assignments[best_k]
self.k=best_k
self.getVisualization(method="tsne")
self.plot_k_vs_heterogeneity(hetro_Per_K.keys(), hetro_Per_K.values())
return self.k
|
{"hexsha": "8ed0abcc201759c98415acc5106460c56828f45e", "size": 12154, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/MICTI/Kmeans.py", "max_stars_repo_name": "insilicolife/micti", "max_stars_repo_head_hexsha": "100055316014d86963ec191d30bf3d44310f1254", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build/lib/MICTI/Kmeans.py", "max_issues_repo_name": "insilicolife/micti", "max_issues_repo_head_hexsha": "100055316014d86963ec191d30bf3d44310f1254", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/MICTI/Kmeans.py", "max_forks_repo_name": "insilicolife/micti", "max_forks_repo_head_hexsha": "100055316014d86963ec191d30bf3d44310f1254", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.6838235294, "max_line_length": 162, "alphanum_fraction": 0.6279414185, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2539}
|
[STATEMENT]
lemma mod_exE: assumes "h\<Turnstile>(\<exists>\<^sub>Ax. P x)" obtains x where "h\<Turnstile>P x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>x. h \<Turnstile> P x \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
h \<Turnstile> (\<exists>\<^sub>Ax. P x)
goal (1 subgoal):
1. (\<And>x. h \<Turnstile> P x \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: mod_ex_dist)
|
{"llama_tokens": 196, "file": "Van_Emde_Boas_Trees_Separation_Logic_Imperative_HOL_Assertions", "length": 2}
|
#This code reads in the optimally extracted lightcurve and bins it into channels 5 pixels wide, following Berta '12
import numpy as np
#from numpy import *
#from pylab import *
from astropy.io import ascii
from scipy import signal
import os
import time as time_now
from astropy.table import QTable
from tqdm import tqdm
from .lib import plots
from .lib import sort_nicely as sn
from .lib import manageevent as me
from astropy.table import QTable
def run21(eventlabel, workdir, meta=None):
"""
This function reads in the lc_spec.txt file with the flux as a funtion of wavelength and bins it into light curves.
"""
print('Starting s21\n')
if meta == None:
meta = me.loadevent(workdir + '/WFC3_' + eventlabel + "_Meta_Save")
def weighted_mean(data, err): #calculates the weighted mean for data points data with std err
weights = 1.0/err**2.
mu = np.sum(data*weights)/np.sum(weights)
var = 1.0/np.sum(weights)
return [mu, np.sqrt(var)] #returns weighted mean and variance
if meta.use_wvl_list:
print(meta.wvl_edge_list)
wave_edges = np.array(meta.wvl_edge_list)
meta.wvl_bins = int(len(wave_edges)-1)
print('Number of bins:', meta.wvl_bins)
else:
meta.wvl_bins = int(meta.wvl_bins)
wave_edges = np.linspace(meta.wvl_min, meta.wvl_max, meta.wvl_bins+1)*1e4
print('Number of bins:', meta.wvl_bins)
print('chosen bin edges:', wave_edges)
#reads in spectra
if meta.s21_most_recent_s20:
lst_dir = os.listdir(meta.workdir + "/extracted_lc/")
lst_dir = sn.sort_nicely(lst_dir)
spec_dir = lst_dir[-1]
else:
spec_dir = meta.s21_spec_dir_path_s20
print("Chosen directory with the spectroscopic flux files:", spec_dir)
# save the mid bin wavelengths into a new file
table_wvl = QTable(names=('bin', 'wavelengths'))
wavelengths = np.array([(wave_edges[i] + wave_edges[i+1]) / 2. / 1.e4 for i in range(len(wave_edges) - 1)])
d = ascii.read(meta.workdir + "/extracted_lc/" + spec_dir + "/lc_spec.txt")
d = np.array([d[i].data for i in d.colnames])
nexp = meta.nexp #number of exposures
npix = meta.npix#meta.BEAMA_f - meta.BEAMA_i #width of spectrum in pixels (BEAMA_f - BEAMA_i)
#d = d.reshape(nexp , npix, -1) #reshapes array by exposure
t_mjd, t_bjd = d[0].reshape(nexp, npix), d[1].reshape(nexp, npix)
t_visit, t_orbit = d[2].reshape(nexp, npix), d[3].reshape(nexp, npix)
ivisit, iorbit = d[4].reshape(nexp, npix), d[5].reshape(nexp, npix)
scan = d[6].reshape(nexp, npix)
spec_opt, var_opt = d[7].reshape(nexp, npix), d[8].reshape(nexp, npix)
w = d[9].reshape(nexp, npix) # d[0,:, 4]
#f = d[0, :, 2]
w_min = max(w[:,0])
w_max = min(w[:,-1])
#w_hires = np.linspace(w.min(), w.max(), 10000)
w_hires = np.linspace(w_min, w_max, 10000)
oversample_factor = len(w_hires)/npix*1.0
#print(oversample_factor)
#stores the indices corresponding to the wavelength range in each bin
wave_inds = []
#lo_res_wave_inds = []
for i in range(len(wave_edges)- 1): wave_inds.append((w_hires >= wave_edges[i])&(w_hires <= wave_edges[i+1]))
#for i in range(len(wave_bins)- 1): lo_res_wave_inds.append((w >= wave_bins[i])&(w <= wave_bins[i+1]))
datetime = time_now.strftime('%Y-%m-%d_%H-%M-%S')
dirname = meta.workdir + "/extracted_sp/" + 'bins{0}_'.format(meta.wvl_bins) + datetime
if not os.path.exists(dirname): os.makedirs(dirname)
for i in tqdm(range(len(wave_edges) - 1), desc='***************** Looping over Bins', ascii=True):
wave = (wave_edges[i] + wave_edges[i+1])/2./1.e4
outname = dirname + "/speclc" + "{0:.3f}".format(wave)+".txt"
#outname = "wasp33b_" + "{0:.4f}".format(wave)+".txt"
#outfile = open(outname, 'w')
table = QTable(names=('t_mjd', 't_bjd', 't_visit', 't_orbit', 'ivisit', 'iorbit', 'scan', 'spec_opt', 'var_opt', 'wave'))
#print('#t_mjd', '\t', 't_bjd', '\t', 't_visit', '\t', 't_orbit', '\t', 'ivisit', '\t', 'iorbit', '\t', 'scan', '\t', 'spec_opt', '\t', 'var_opt', '\t','wave', file=outfile)
for j in range(nexp):
t_mjd_i, t_bjd_i = t_mjd[j][0], t_bjd[j][0]
t_visit_i, t_orbit_i = t_visit[j][0], t_orbit[j][0]
ivisit_i, iorbit_i = ivisit[j][0], iorbit[j][0]
scan_i = scan[j][0]
spec_opt_i, var_opt_i = spec_opt[j], var_opt[j]
w_i = w[j]
f_interp = np.interp(w_hires, w_i, spec_opt_i)
variance_interp = np.interp(w_hires, w_i, var_opt_i)
#accounts for decrease in precision when spectrum is oversampled
variance_interp *= oversample_factor
fluxes = f_interp[wave_inds[i]]
errs = np.sqrt(variance_interp[wave_inds[i]])
meanflux, meanerr = weighted_mean(fluxes, errs)
#print(t_mjd, t_bjd, t_visit, t_orbit, ivisit, iorbit, scan, meanflux, meanerr**2, wave, file=outfile)
#print wave, np.sum(d[j, lo_res_wave_inds[i],2])
table.add_row([t_mjd_i, t_bjd_i, t_visit_i, t_orbit_i, ivisit_i, iorbit_i, scan_i, meanflux, meanerr**2, wave])
#print wave, 1.0*sum(wave_inds)/len(w_hires), meanflux, meanerr
ascii.write(table, outname, format='ecsv', overwrite=True)
print('Saved light curve(s) in {0}'.format(dirname))
plots.plot_wvl_bins(w_hires, f_interp, wave_edges, meta.wvl_bins, dirname)
print('Saving Wavelength bin file')
for idx, wavelengths_i in enumerate(wavelengths):
table_wvl.add_row([idx, wavelengths_i])
ascii.write(table_wvl, dirname + '/wvl_table.dat', format='rst', overwrite=True)
print('Saving Metadata')
me.saveevent(meta, meta.workdir + '/WFC3_' + meta.eventlabel + "_Meta_Save", save=[])
print('Finished s21 \n')
return meta
|
{"hexsha": "09c53bbe22458d6f2a305f9c93c3e0fe5e43c375", "size": 5894, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pacman/s21_bin_spectroscopic_lc.py", "max_stars_repo_name": "sebastian-zieba/PACMAN", "max_stars_repo_head_hexsha": "2eb1e4b450c97dc28d5a05b3ebddd80706cfca79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-23T10:26:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T10:26:33.000Z", "max_issues_repo_path": "src/pacman/s21_bin_spectroscopic_lc.py", "max_issues_repo_name": "sebastian-zieba/PACMAN", "max_issues_repo_head_hexsha": "2eb1e4b450c97dc28d5a05b3ebddd80706cfca79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pacman/s21_bin_spectroscopic_lc.py", "max_forks_repo_name": "sebastian-zieba/PACMAN", "max_forks_repo_head_hexsha": "2eb1e4b450c97dc28d5a05b3ebddd80706cfca79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-29T13:37:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T13:37:31.000Z", "avg_line_length": 41.8014184397, "max_line_length": 181, "alphanum_fraction": 0.6350525959, "include": true, "reason": "import numpy,from numpy,from scipy,from astropy", "num_tokens": 1779}
|
import os.path as path
import random
import copy
import numpy as np
from chainer import Variable, optimizers, serializers, Chain
import chainer.functions as F
import chainer.links as L
import chainer.computational_graph as c
import matplotlib.pyplot as plt
class Model(Chain):
def __init__(self):
super(Model, self).__init__(
l1 = L.Linear(64, 100),
l2 = L.Linear(100, 200),
l3 = L.Linear(200, 400),
l4 = L.Linear(400, 64)
)
def __call__(self, x, dropout=True):
h = F.dropout(F.leaky_relu(self.l1(x)), train=dropout, ratio=0.2)
h = F.dropout(F.leaky_relu(self.l2(h)), train=dropout, ratio=0.5)
h = F.dropout(F.leaky_relu(self.l3(h)), train=dropout, ratio=0.5)
return self.l4(h)
def get_loss(self, x, t):
return F.mean_squared_error(x, t)
class DeepQ_learning():
"""
turn (rqeuired parameter): set string "Before" or "After"
DQN: switch whether to learn or not
use_ER: if True, use Experience Replay
use_target_q: if True, use target Q
name: this player's name
epsilon: default is 1, it will be used to the epsilon-greedy
model_file: the file for loading and saving self.model
optimizer_file: the file for loading and saving self.optimizer
"""
def __init__(self, turn, DQN=True, use_ER=True, name="DeepQ_learning", epsilon=1, model_file="product-model", optimizer_file="product-optimizer"):
self.turn = turn
self.DQN = DQN
self.use_ER = use_ER
self.name = name
self.e = epsilon
self.gamma = 0.95
self.last_state = None
self.last_action = None
# Experience Replay
self.experience_memory = []
self.experience_memory_size = 10000
self.replay_mini_batch_size = 500
# Target Q
self.model = Model()
self.target_model = copy.deepcopy(self.model)
# self.optimizer = optimizers.MomentumSGD(lr=0.001)
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model)
# files
self.model_file = "data/deep_q_learning/" + model_file
self.optimizer_file = "data/deep_q_learning/" + optimizer_file
self.load_model()
self.load_optimizer()
self.count = 0
# tools for visualizing
# self.plot_y = []
def action(self, game):
if not self.DQN:
return game.play(self.not_learning(game))
s = game.parse()
if self.use_ER:
if len(self.experience_memory) < self.experience_memory_size // 2:
y_x = game.play(self.store_experience(game))
else:
y_x = game.play(self.policy(game))
else:
y_x = game.play(self.policy(game))
a = y_x[0] * 8 + y_x[1]
r = 1 if game.result == self.turn else -1
if game.result == None: r = 0
fs = game.parse()
if self.experience_memory_size < len(self.experience_memory):
temp = np.where(np.array(self.experience_memory).T[2]==0)[0]
if temp.size == 0:
del self.experience_memory[0]
else:
del self.experience_memory[np.random.choice(temp)]
self.experience_memory.append([s, a, r, fs])
return y_x
def not_learning(self, game):
input_data = Variable(np.array([game.parse()], dtype=np.float32))
pred = self.model(input_data)
pred = pred.data[0]
pos = np.argmax(pred)
y = pos // 8
x = pos % 8
clone = game.clone()
if not clone.can_play((y, x)):
y, x = random.choice(game.movable)
return y, x
def store_experience(self, game):
return random.choice(game.movable)
def update_target_model(self):
self.target_model = copy.deepcopy(self.model)
def policy(self, game):
input_data = Variable(np.array([game.parse()], dtype=np.float32))
pred = self.model(input_data)
pos = np.argmax(pred.data, axis=1)
y = pos // 8
x = pos % 8
if random.random() < self.e: y, x = random.choice(game.movable)
if 0.2 < self.e: self.e -= 1e-3
i = 0
clone = game.clone()
while not clone.can_play((y, x)):
self.learn(game.parse(), pos, -1, game.parse(), opt=game.movable)
pred = self.model(input_data)
pos = np.argmax(pred.data[0])
y = pos // 8
x = pos % 8
i += 1
# if 1000 < i:
# print("random")
# y, x = random.choice(game.movable)
clone = game.clone()
pos = y * 8 + x
self.last_state = game.parse()
self.last_action = pos
self.update_target_model()
return y, x
def learn(self, s, a, r, fs, opt=None):
if s is None or a is None or fs is None: return
self.count += 1
if self.count % 50 == 0:
self.count = 0
self.update_target_model()
fs_y = self.target_model(np.array([fs], dtype=np.float32))
max_q = np.max(fs_y.data, axis=1)
s_y = self.model(np.array([s], dtype=np.float32)) # now pred
t = copy.deepcopy(s_y)
t.data[0][a] = r + self.gamma * max_q # fix pred
self.model.cleargrads()
loss = self.model.get_loss(s_y, t)
# if opt is not None:
# temp = []
# for o in opt:
# o = o[0] * 8 + o[1]
# temp.append(s_y.data[0][o])
# print(temp, max_q)
print(loss.data)
# self.plot_y.append(loss.data)
loss.backward()
self.optimizer.update()
def learn_by_minibatch(self, s_batch, a_batch, r_batch, fs_batch):
"""
s_batch : two-dimensional array
a_batch : one-dimensional array
r_batch : one-dimensional array
fs_batch: two-dimensional array
"""
max_qs = np.max(self.target_model(np.array(list(map(np.array, fs_batch)), dtype=np.float32)).data, axis=1)
temp = self.model(np.array(list(map(np.array, s_batch)), dtype=np.float32))
y = F.reshape(copy.deepcopy(temp), (-1, 64))
for i in range(len(max_qs)):
temp.data[i][a_batch[i]] = r_batch[i] + self.gamma * max_qs[i]
t = F.reshape(temp, (-1, 64))
self.model.cleargrads()
loss = self.model.get_loss(y, t)
print(loss.data)
# print(" | ".join(map(str, [y.data[0][a_batch[0]], r_batch[0] + self.gamma * max_qs[0], loss.data])))
loss.backward()
self.optimizer.update()
def ER(self):
"Experience Replay"
if not self.use_ER: return
print("---- learn ER ----")
experience_memory = np.array(self.experience_memory)
perm = np.random.permutation(experience_memory)
for i in range(0, len(perm), self.replay_mini_batch_size):
batch = perm[i:i + self.replay_mini_batch_size].T
s_batch = batch[0]
a_batch = batch[1]
r_batch = batch[2]
fs_batch = batch[3]
settled = False
for k in range(3000):
self.learn_by_minibatch(s_batch, a_batch, r_batch, fs_batch)
self.update_target_model()
def game_finished(self, game):
"when finished one game, run this."
if game.result == None:
self.learn(self.last_state, self.last_action, 0, game.parse())
elif game.result == self.turn:
self.learn(self.last_state, self.last_action, 1, game.parse())
elif game.result != "Draw":
self.learn(self.last_state, self.last_action, -1, game.parse())
else:
self.learn(self.last_state, self.last_action, 0, game.parse())
self.update_target_model()
self.last_state = None
self.last_action = None
def all_game_finished(self):
"when finished all game, run this."
# plot_x = np.arange(len(self.plot_y))
# plt.plot(plot_x + 1, self.plot_y)
# plt.show()
if self.use_ER: self.ER()
self.save_model()
self.save_optimizer()
def load_model(self):
if path.exists(self.model_file):
if path.getsize(self.model_file) == 0: return
serializers.load_npz(self.model_file, self.model)
else:
with open(self.model_file, "w") as f:
f.write("")
def load_optimizer(self):
if path.exists(self.optimizer_file):
if path.getsize(self.optimizer_file) == 0: return
serializers.load_npz(self.optimizer_file, self.optimizer)
else:
with open(self.optimizer_file, "w") as f:
f.write("")
def save_model(self):
serializers.save_npz(self.model_file, self.model)
def save_optimizer(self):
serializers.save_npz(self.optimizer_file, self.optimizer)
|
{"hexsha": "e0820b3b3296a72573891ea2f457d70b8398a32f", "size": 8935, "ext": "py", "lang": "Python", "max_stars_repo_path": "players/deep_q_learning.py", "max_stars_repo_name": "pikatyuu/deep-learning-othello", "max_stars_repo_head_hexsha": "d9f149b01f079f5d021ba9655445cd43a847a628", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "players/deep_q_learning.py", "max_issues_repo_name": "pikatyuu/deep-learning-othello", "max_issues_repo_head_hexsha": "d9f149b01f079f5d021ba9655445cd43a847a628", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "players/deep_q_learning.py", "max_forks_repo_name": "pikatyuu/deep-learning-othello", "max_forks_repo_head_hexsha": "d9f149b01f079f5d021ba9655445cd43a847a628", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5976095618, "max_line_length": 150, "alphanum_fraction": 0.5760492445, "include": true, "reason": "import numpy", "num_tokens": 2212}
|
from typing import Tuple, Dict, Union
from .rnn import RNN
import numpy as np
import tensorflow as tf
class RoemmeleSentences(RNN):
CLASSES = 1
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def _sentence_rnn(self, per_sentence_states: tf.Tensor) -> tf.Tensor:
assert len(per_sentence_states.get_shape()) == 3
assert per_sentence_states.get_shape()[1] == self.TOTAL_SENTENCES - 1
# Create the cell
rnn_cell_sentences = self._create_cell(self.rnn_cell_dim, name='sentence_cell')
inputs = tf.unstack(per_sentence_states, axis=1)
outputs, state = tf.nn.static_rnn(cell=rnn_cell_sentences, inputs=inputs, dtype=tf.float32)
if self.rnn_cell == "LSTM":
state = state[0] # c_state
print("outputs[0]", outputs[0].get_shape())
outputs_lst = [tf.expand_dims(x, axis=1) for x in outputs]
outputs_tensor = tf.concat(outputs_lst, axis=1)
print("outputs_tensor", outputs_tensor.get_shape())
sentence_states = [state]
if self.attention is not None: # with attention
with tf.variable_scope("attention"):
context = self._add_attention(outputs_tensor, cell_output=state, prefix="attention")
print("context", context.get_shape())
sentence_states.append(context)
res = tf.concat(sentence_states, axis=1)
print("sentence_states", res.get_shape())
return res
def _output_fc(self, state: tf.Tensor) -> tf.Tensor:
output = tf.layers.dense(state, self.CLASSES, activation=None, name="output")
print("output", output.get_shape())
return output
def _sentence_states(self) -> tf.Tensor:
return self.batch
def build_model(self) -> Tuple[tf.Tensor, tf.Tensor, tf.Operation]:
# Construct the graph
with self.session.graph.as_default():
with tf.name_scope("split_endings"):
per_sentence_states = self._sentence_states()
sentence_states = per_sentence_states[:, :self.SENTENCES, :]
print("sentence_states", sentence_states.get_shape())
ending1_states = per_sentence_states[:, self.SENTENCES + 0, :]
ending1_states = tf.expand_dims(ending1_states, axis=1)
print("ending1_states", ending1_states.get_shape())
ending2_states = per_sentence_states[:, self.SENTENCES + 1, :]
ending2_states = tf.expand_dims(ending2_states, axis=1)
print("ending2_states", ending2_states.get_shape())
ending1_states = tf.concat([sentence_states, ending1_states], axis=1)
ending2_states = tf.concat([sentence_states, ending2_states], axis=1)
with tf.variable_scope("ending") as ending_scope:
with tf.name_scope("sentence_rnn"):
per_story_states = self._sentence_module(ending1_states)
with tf.name_scope("fc"):
self.ending1_output = self._output_fc(per_story_states)
with tf.variable_scope(ending_scope, reuse=True):
with tf.name_scope("sentence_rnn"):
per_story_states = self._sentence_module(ending2_states)
with tf.name_scope("fc"):
self.ending2_output = self._output_fc(per_story_states)
with tf.name_scope("eval_predictions"):
endings = tf.concat([self.ending1_output, self.ending2_output], axis=1)
eval_predictions = tf.to_int32(tf.argmax(endings, axis=1))
with tf.name_scope("train_predictions"):
self.train_logits = tf.squeeze(self.ending1_output, axis=[1])
self.train_probs = tf.sigmoid(self.train_logits)
self.train_predictions = tf.to_int32(tf.round(self.train_probs))
with tf.name_scope("loss"):
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=self.labels, logits=self.train_logits)
with tf.name_scope("optimizer"):
optimizer = self._optimizer()
gradients = optimizer.compute_gradients(loss)
clipped_gradients = [(tf.clip_by_norm(gradient, self.grad_clip), var) for gradient, var in gradients]
training_step = optimizer.apply_gradients(clipped_gradients, global_step=self.global_step)
variables = tf.trainable_variables()
print("Variables", variables)
return eval_predictions, loss, training_step
def _optimizer(self) -> tf.train.Optimizer:
return tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
def _summaries_and_init(self) -> None:
with tf.name_scope("summaries"):
current_accuracy, update_accuracy = tf.metrics.accuracy(self.labels, self.train_predictions)
current_eval_accuracy, update_eval_accuracy = tf.metrics.accuracy(self.labels, self.predictions)
current_loss, update_loss = tf.metrics.mean(self.loss)
self.reset_metrics = tf.variables_initializer(tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
self.current_metrics = [current_accuracy, current_loss]
self.update_metrics = [update_accuracy, update_loss]
self.current_eval_metrics = [current_eval_accuracy]
with self.summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(50):
self.summaries["train"].extend([
tf.contrib.summary.histogram("train/activations", self.train_probs),
tf.contrib.summary.scalar("train/loss", update_loss),
tf.contrib.summary.scalar("train/accuracy", update_accuracy)
])
with self.summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
eval_histograms = [
tf.contrib.summary.histogram("eval/activations1", tf.sigmoid(self.ending1_output)),
tf.contrib.summary.histogram("eval/activations2", tf.sigmoid(self.ending2_output))
]
self.update_eval_metrics = [update_eval_accuracy] + eval_histograms
for dataset in ["eval", "test"]:
self.summaries[dataset].append(
tf.contrib.summary.scalar(dataset + "/accuracy", current_eval_accuracy))
# Saver
self.saver = tf.train.Saver(max_to_keep=3)
# Initialize variables
self.session.run(tf.global_variables_initializer())
with self.summary_writer.as_default():
tf.contrib.summary.initialize(session=self.session, graph=self.session.graph)
def _placeholders(self) -> None:
self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
# [batch_size, SENTENCES x sentence_id, sentence_embeddings]
self.batch = tf.placeholder(tf.float32, [None, self.TOTAL_SENTENCES, self.sentence_embedding], name="batch")
# [batch_size]
self.labels = tf.placeholder(tf.int32, [None], name="labels")
# [] bool scalar
self.is_training = tf.placeholder_with_default(False, [], name="is_training")
# Useful tensors
self.batch_size = tf.shape(self.batch)[0]
def _build_feed_dict(self, batch: Dict[str, Union[np.ndarray, bool]],
is_training: bool = False) -> Dict[tf.Tensor, Union[np.ndarray, bool]]:
assert is_training == batch['is_training']
feed_dict = {self.batch: batch['batch'], self.is_training: batch['is_training']}
if 'labels' in batch:
feed_dict[self.labels] = batch['labels']
return feed_dict
|
{"hexsha": "89955811df716f015b3918b379ad95860e463287", "size": 7800, "ext": "py", "lang": "Python", "max_stars_repo_path": "project2/sct/model/roemmele_sentences.py", "max_stars_repo_name": "oskopek/nlu", "max_stars_repo_head_hexsha": "301611383fabf0d263a86dcb932fa51762b3f022", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project2/sct/model/roemmele_sentences.py", "max_issues_repo_name": "oskopek/nlu", "max_issues_repo_head_hexsha": "301611383fabf0d263a86dcb932fa51762b3f022", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-02-06T01:00:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T03:37:09.000Z", "max_forks_repo_path": "project2/sct/model/roemmele_sentences.py", "max_forks_repo_name": "oskopek/nlu", "max_forks_repo_head_hexsha": "301611383fabf0d263a86dcb932fa51762b3f022", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-04-17T18:51:19.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-27T08:46:40.000Z", "avg_line_length": 49.0566037736, "max_line_length": 117, "alphanum_fraction": 0.6385897436, "include": true, "reason": "import numpy", "num_tokens": 1590}
|
"""
Diffusion Imaging in Python
============================
For more information, please visit http://dipy.org
Subpackages
-----------
::
align -- Registration, streamline alignment, volume resampling
boots -- Bootstrapping algorithms
core -- Spheres, gradient tables
core.geometry -- Spherical geometry, coordinate and vector manipulation
core.meshes -- Point distributions on the sphere
data -- Small testing datasets
external -- Interfaces to external tools such as FSL
io -- Loading/saving of dpy datasets
reconst -- Signal reconstruction modules (tensor, spherical harmonics,
diffusion spectrum, etc.)
segment -- Tractography segmentation
sims -- MRI phantom signal simulation
tracking -- Tractography, metrics for streamlines
viz -- Visualization and GUIs
Utilities
---------
::
test -- Run unittests
__version__ -- Dipy version
"""
import sys
if sys.version[0:3] < '2.6':
raise ImportError('Dipy needs Python version 2.6 or above')
from .info import __version__
# Test callable
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
del Tester
# Plumb in version etc info stuff
from .pkg_info import get_pkg_info as _get_pkg_info
def get_info():
from os.path import dirname
return _get_pkg_info(dirname(__file__))
del sys
|
{"hexsha": "8aa00ce5c63aebce2dc37db878eddc29216e53e7", "size": 1405, "ext": "py", "lang": "Python", "max_stars_repo_path": "dipy/__init__.py", "max_stars_repo_name": "JohnGriffiths/dipy", "max_stars_repo_head_hexsha": "5fb38e9b77547cdaf5eb140730444535733ae01d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-07-25T14:04:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-10T07:10:10.000Z", "max_issues_repo_path": "dipy/__init__.py", "max_issues_repo_name": "JohnGriffiths/dipy", "max_issues_repo_head_hexsha": "5fb38e9b77547cdaf5eb140730444535733ae01d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dipy/__init__.py", "max_forks_repo_name": "JohnGriffiths/dipy", "max_forks_repo_head_hexsha": "5fb38e9b77547cdaf5eb140730444535733ae01d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-07-24T21:20:54.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-27T04:08:24.000Z", "avg_line_length": 27.0192307692, "max_line_length": 77, "alphanum_fraction": 0.671886121, "include": true, "reason": "from numpy", "num_tokens": 298}
|
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
import theano.tensor as T
from sklearn.datasets import make_moons, make_circles, make_classification
from simec.ann_models import SupervisedNNModel
def linear_regression(y_dim=5, x_dim=10):
# generate training and test data
np.random.seed(15)
n_train = 1000
W = np.random.randint(-4, 5, size=(x_dim, y_dim))
b = np.random.randint(-2, 2, size=(1, y_dim))
X = np.random.rand(n_train, x_dim)
Y = np.dot(X, W) + b
X += np.random.randn(n_train, x_dim) * 0.02
X_test = np.random.rand(20, x_dim)
Y_test = np.dot(X_test, W) + b
X_test += np.random.randn(20, x_dim) * 0.02
# build, train, and test the model
model = SupervisedNNModel(x_dim, y_dim)
model.fit(X, Y)
print("Test Error: %f" % model.score(X_test, Y_test))
if x_dim == 1 and y_dim == 1:
plt.figure()
plt.plot(X[:, 0], Y[:, 0], 'm*', label='data samples')
X_plot = np.linspace(np.min(X), np.max(X), 1000)
plt.plot(X_plot, model.predict(X_plot[:, np.newaxis]), 'g', label='prediction')
plt.plot(X_plot, np.dot(X_plot[:, np.newaxis], W) + b, 'k', label='true curve')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Linear Regression Problem')
plt.legend()
def nonlinear_regression(x_dim=1):
# generate training and test data
np.random.seed(15)
n_train = 1000
X = np.random.rand(n_train, x_dim) * np.pi * 2.
Y = np.sin(X)
X += np.random.randn(n_train, x_dim) * 0.02
X_test = np.random.rand(20, x_dim) * np.pi * 2.
Y_test = np.sin(X_test)
X_test += np.random.randn(20, x_dim) * 0.02
# build, train, and test the model
model = SupervisedNNModel(x_dim, x_dim, hunits=[100, 50], activations=[T.tanh, T.tanh, None])
model.fit(X, Y)
print("Test Error: %f" % model.score(X_test, Y_test))
if x_dim == 1:
plt.figure()
plt.plot(X[:, 0], Y[:, 0], 'm*', label='data samples')
X_plot = np.linspace(np.min(X), np.max(X), 1000)
plt.plot(X_plot, model.predict(X_plot[:, np.newaxis]), 'g', label='prediction')
plt.plot(X_plot, np.sin(X_plot), 'k', label='true curve')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Non-Linear Regression Problem')
plt.legend()
def classification(dataset=0):
# generate training and test data
n_train = 1000
if dataset == 0:
X, Y = make_classification(n_samples=n_train+50, n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
elif dataset == 1:
X, Y = make_moons(n_samples=n_train+50, noise=0.3, random_state=1)
elif dataset == 2:
X, Y = make_circles(n_samples=n_train+50, noise=0.2, factor=0.5, random_state=1)
else:
print("dataset unknown")
return
X_test, Y_test = X[n_train:, :], Y[n_train:]
X, Y = X[:n_train, :], Y[:n_train]
# build, train, and test the model
model = SupervisedNNModel(X.shape[1], 2, hunits=[100, 50], activations=[T.tanh, T.tanh, T.nnet.softmax], cost_fun='negative_log_likelihood',
error_fun='zero_one_loss', learning_rate=0.01, L1_reg=0., L2_reg=0.)
model.fit(X, Y)
print("Test Error: %f" % model.score(X_test, Y_test))
# plot dataset + predictions
plt.figure()
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=cm_bright, alpha=0.6)
# and testing points
plt.scatter(X_test[:, 0], X_test[:, 1], c=Y_test, cmap=cm_bright)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title('Classification Problem (%i)' % dataset)
if __name__ == '__main__':
linear_regression(1, 1)
nonlinear_regression(1)
classification(0)
classification(1)
classification(2)
plt.show()
|
{"hexsha": "d549cd43b3e9a5517a5133b707007e730db38112", "size": 4564, "ext": "py", "lang": "Python", "max_stars_repo_path": "ann_test.py", "max_stars_repo_name": "cod3licious/simec-theano", "max_stars_repo_head_hexsha": "dd2bc0a4d954754fafb2d6d7d571aca3092569b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ann_test.py", "max_issues_repo_name": "cod3licious/simec-theano", "max_issues_repo_head_hexsha": "dd2bc0a4d954754fafb2d6d7d571aca3092569b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ann_test.py", "max_forks_repo_name": "cod3licious/simec-theano", "max_forks_repo_head_hexsha": "dd2bc0a4d954754fafb2d6d7d571aca3092569b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.65625, "max_line_length": 144, "alphanum_fraction": 0.6075810692, "include": true, "reason": "import numpy,import theano", "num_tokens": 1376}
|
'''
Analysis of several FOI files against several GFs using Fisher's exact test. Best used for SNP set analysis, using whole SNP database as a spot background.
sys.argv[1] - text file with FOI file names. Include full, or relative path, if needed.
sys.argv[2] - text file with GF file names. Include full, or relative path, if needed.
sys.argv[3] - spot background file
'''
import sys
import gzip
import pybedtools
import multiprocessing as mp
import math
from scipy.stats import hypergeom
import scipy
import pdb
import numpy
# Collect lines from a file into a dictionary
def read_lines(path):
elems = []
with open(path) as h:
for line in h:
elems.append(line.strip())
return elems
# Run hypergeometric test (pyGenome, pyFOI, pyGF)
def run_hypergeometric(g, f, b):
foi = pybedtools.BedTool(f)
gf = pybedtools.BedTool(g)
bg = pybedtools.BedTool(b)
foi_obs = len(foi.intersect(gf, u = True)) # number of FOIs overlapping with a GF
bg_obs = len(bg.intersect(gf, u = True)) # number of spot bkg overlapping with a GF
rnd_obs = (len(foi)*bg_obs/len(bg)) # Mean of hypergeometric distiribution
# pdb.set_trace()
if foi_obs == rnd_obs: # No difference
return 0
elif foi_obs < rnd_obs: # Underrepresentation
# pdb.set_trace()
# pval = hypergeom.cdf(foi_obs,len(bg),bg_obs,len(foi))
# if pval <= 0:
# pval = float(numpy.finfo(numpy.float64).tiny) # If pval is 0, set to min, to avoid log10 error
# elif pval >= 1:
# pval = 1 # Sometimes there may be an overflow in another direction
pval = scipy.stats.fisher_exact([[foi_obs,bg_obs],[len(foi)-foi_obs,len(bg)-bg_obs]],alternative='less')[1]
return math.log10(pval) # Calculating cdf hypergeometric distribution, not adding "-" to signify underrepresentation
elif foi_obs > rnd_obs: # Overrepresentation
# pdb.set_trace()
# pval = hypergeom.sf(foi_obs,len(bg),bg_obs,len(foi))
# if pval <= 0:
# pval = float(numpy.finfo(numpy.float64).tiny) # If pval is 0, set to min, to avoid log10 error
# elif pval >= 1:
# pval = 1 # Sometimes there may be an overflow in another direction
pval = scipy.stats.fisher_exact([[foi_obs,bg_obs],[len(foi)-foi_obs,len(bg)-bg_obs]],alternative='greater')[1]
return -math.log10(pval) # Calculating sf hypergeometric distribution
else: # No difference
return 0
# print "e.obs : %d, rand.obs : %d, len(back) : %d, back.obs : %d, len(e.A) : %d, hypergeomp_value : %r" % (eobs, randobs, len(back), backobs, len(eA), hypergeomp_value)
# Run hypergeometric for multiple FOIs from file
def run_multiple_hypergeometric((gf, fois, bg)):
result = [run_hypergeometric(gf,foi,bg) for foi in fois]
result.insert(0, gf) # Add GF name to 0 position, the rest are p-values
return result
if __name__ == "__main__":
fois = read_lines(sys.argv[1]) # Text file with FOI file names
gfs = read_lines(sys.argv[2]) # Text file with GF file names, gzipped
bg_path = sys.argv[3] # Path to spot background file
pool = mp.Pool(mp.cpu_count())
jobs = [(gf, fois, bg_path) for gf in gfs]
print "\t".join(fois)
for result in pool.imap(run_multiple_hypergeometric, jobs): # pool.imap. Use 'map' to use single thread
print "\t".join(map(str, result))
|
{"hexsha": "b368340f1ef79d8f2da9cf49734c198366e89f78", "size": 3175, "ext": "py", "lang": "Python", "max_stars_repo_path": "grsnp/hypergeom.py", "max_stars_repo_name": "mdozmorov/genome_runner", "max_stars_repo_head_hexsha": "1fd77dd8e0bb7333e2d8e0d299d020bc8a3e36a1", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grsnp/hypergeom.py", "max_issues_repo_name": "mdozmorov/genome_runner", "max_issues_repo_head_hexsha": "1fd77dd8e0bb7333e2d8e0d299d020bc8a3e36a1", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": 66, "max_issues_repo_issues_event_min_datetime": "2015-07-10T12:08:34.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-02T01:56:07.000Z", "max_forks_repo_path": "grsnp/hypergeom.py", "max_forks_repo_name": "mdozmorov/genome_runner", "max_forks_repo_head_hexsha": "1fd77dd8e0bb7333e2d8e0d299d020bc8a3e36a1", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-04-16T12:54:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-06T20:06:33.000Z", "avg_line_length": 41.2337662338, "max_line_length": 173, "alphanum_fraction": 0.7137007874, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 969}
|
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
num_bins = 100
raw_data = pd.read_csv('./raw_data.csv', header = 0, index_col = 0)
sample_num = raw_data.shape[0]
print(sample_num)
label = raw_data.iloc[:,raw_data.shape[1]-1]
price = label.values
print('max price: ', max(price))
print('min price: ', min(price))
df = raw_data.loc[raw_data['SalePrice'] <= 135000]
print(df.shape[0])
plt.hist(price, num_bins)
plt.show()
|
{"hexsha": "865e8135aab65f0a62f7864dac2a51d2ec48c6e7", "size": 455, "ext": "py", "lang": "Python", "max_stars_repo_path": "pattern_recognition/code/DataPre.py", "max_stars_repo_name": "geneti/courseworkproj", "max_stars_repo_head_hexsha": "5843cc14c2ce01172420befca5d2683f1123096a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pattern_recognition/code/DataPre.py", "max_issues_repo_name": "geneti/courseworkproj", "max_issues_repo_head_hexsha": "5843cc14c2ce01172420befca5d2683f1123096a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pattern_recognition/code/DataPre.py", "max_forks_repo_name": "geneti/courseworkproj", "max_forks_repo_head_hexsha": "5843cc14c2ce01172420befca5d2683f1123096a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2777777778, "max_line_length": 67, "alphanum_fraction": 0.7252747253, "include": true, "reason": "import numpy", "num_tokens": 130}
|
"""
brief: Testing ground for 1D moment solver
Author: Steffen Schotthöfer
Date: 17.05.2021
"""
import sys
import csv
sys.path.append('../..')
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
from matplotlib import animation
import tensorflow as tf
import multiprocessing
import pandas as pd
# inpackage imports
# from networks.configModel import initNeuralClosure
from src import math
from src.networks.configmodel import init_neural_closure
num_cores = multiprocessing.cpu_count()
def main():
solver = MNSolver1D(traditional=False, polyDegree=2)
# solver.solve_animation(maxIter=100)
# solver.solve_animation_iter_error(maxIter=100)
# solver.solve_iter_error(maxIter=100)
solver.solve(maxIter=2000)
return 0
class MNSolver1D:
def __init__(self, traditional=False, polyDegree=3, model_mk=11):
# Prototype for spatialDim=1, polyDegree=2
self.model_mk = model_mk
self.n_system = polyDegree + 1
self.polyDegree = polyDegree
self.quadOrder = 28
self.traditional = traditional
[self.quadPts, self.quadWeights] = math.qGaussLegendre1D(self.quadOrder) # dims = nq
self.nq = self.quadWeights.size
self.mBasis = math.computeMonomialBasis1D(self.quadPts, self.polyDegree) # dims = (N x nq)
self.inputDim = self.mBasis.shape[0] # = self.nSystem
# generate geometry
self.x0 = 0
self.x1 = 1
self.nx = 100
self.dx = (self.x1 - self.x0) / self.nx
# physics (isotropic, homogenious)
self.sigmaS = 1.0
self.scatter_vector = np.zeros(self.n_system)
for i in range(self.n_system):
if i % 2 == 0:
self.scatter_vector[i] = 1.0 / float(i + 1)
# time
self.tEnd = 1.0
self.cfl = 0.05 # 0.3
self.dt = self.cfl * self.dx
# boundary
self.boundary = 0 # 0 = periodic, 1 = neumann with l.h.s. source,
if self.boundary == 0:
print("Periodic boundary conditions")
else:
print("Dirichlet boundary conditions")
# self.datafile = "data_file_1D_M" + str(self.polyDegree) + "_MK" + str(self.model_mk) + "_inflow.csv"
# self.solution_file = "1D_M" + str(self.polyDegree) + "_MK" + str(self.model_mk) + "_inflow.csv"
# self.errorfile = "err_1D_M" + str(self.polyDegree) + "_MK" + str(self.model_mk) + "_inflow.csv"
self.datafile = "data_file_1D_M" + str(self.polyDegree) + "_MK" + str(self.model_mk) + "_periodic.csv"
self.solution_file = "1D_M" + str(self.polyDegree) + "_MK" + str(self.model_mk) + "_periodic.csv"
self.errorfile = "err_1D_M" + str(self.polyDegree) + "_MK" + str(self.model_mk) + "_periodic.csv"
# Solver variables Traditional
self.u = self.ic_zero() # self.ic_periodic()# self.ic_zero() #
self.alpha = np.zeros((self.n_system, self.nx))
self.xFlux = np.zeros((self.n_system, self.nx + 1), dtype=float)
self.h = np.zeros(self.nx)
self.h2 = np.zeros(self.nx)
self.u2 = self.ic_zero() # self.ic_periodic() # self.ic_zero() #
self.alpha2 = np.zeros((self.n_system, self.nx))
self.xFlux2 = np.zeros((self.n_system, self.nx + 1), dtype=float)
# Neural closure
self.neuralClosure = None
print("Using tensorflow with version:")
print(tf.__version__)
self.legacy_model = False
if not self.traditional:
if self.model_mk == 11:
if self.polyDegree == 1:
self.neuralClosure = init_neural_closure(network_mk=11, poly_degree=1, spatial_dim=1,
folder_name="tmp",
loss_combination=2,
nw_width=10, nw_depth=7, normalized=True)
self.neuralClosure.create_model()
### Need to load this model as legacy code
print("Load model in legacy mode. Model was created using tf 2.2.0")
self.legacy_model = True
imported = tf.keras.models.load_model("models/_simulation/mk11_M1_1D/best_model")
self.neuralClosure.model_legacy = imported
elif self.polyDegree == 2:
self.neuralClosure = init_neural_closure(network_mk=11, poly_degree=2, spatial_dim=1,
folder_name="tmp",
loss_combination=2,
nw_width=15, nw_depth=7, normalized=True)
self.neuralClosure.create_model()
### Need to load this model as legacy code
print("Load model in legacy mode. Model was created using tf 2.2.0")
self.legacy_model = True
imported = tf.keras.models.load_model("models/_simulation/mk11_M2_1D/best_model")
self.neuralClosure.model_legacy = imported
elif self.polyDegree == 3:
self.neuralClosure = init_neural_closure(network_mk=13, poly_degree=3, spatial_dim=1,
folder_name="002_sim_M3_1D", loss_combination=2,
nw_width=20, nw_depth=7, normalized=True)
self.neuralClosure.loadModel("../../models/002_sim_M3_1D")
elif self.model_mk == 15:
if self.polyDegree == 1:
self.neuralClosure = init_neural_closure(network_mk=self.model_mk, poly_degree=1, spatial_dim=1,
folder_name="_simulation/mk15_M1_1D",
loss_combination=2, nw_width=30, nw_depth=2,
normalized=True, input_decorrelation=True,
scale_active=True)
self.neuralClosure.load_model()
if self.polyDegree == 2:
self.neuralClosure = init_neural_closure(network_mk=self.model_mk, poly_degree=2, spatial_dim=1,
folder_name="_simulation/mk15_M2_1D",
loss_combination=2, nw_width=50, nw_depth=2,
normalized=True, input_decorrelation=True,
scale_active=True)
self.neuralClosure.load_model()
elif self.polyDegree == 3:
self.neuralClosure = init_neural_closure(network_mk=13, poly_degree=3, spatial_dim=1,
folder_name="_simulation/mk15_M3_1D", loss_combination=2,
nw_width=20, nw_depth=7, normalized=True)
self.neuralClosure.loadModel("../../models/002_sim_M3_1D")
# Analysis variables
self.errorMap = np.zeros((self.n_system, self.nx))
self.normErrorMap = np.zeros(self.nx)
self.realizabilityMap = np.zeros(self.nx)
columns = ['u0', 'u1', 'u2', 'alpha0', 'alpha1', 'alpha2', 'h'] # , 'realizable']
self.dfErrPoints = pd.DataFrame(columns=columns)
with open('figures/solvers/' + self.errorfile, 'w', newline='') as f:
# create the csv writer
writer = csv.writer(f)
row = ["t", "err_u", "err_alpha", "int_h", "int_h_ref"]
writer.writerow(row)
with open('figures/solvers/' + self.datafile, 'w', newline='') as f:
# create the csv writer
writer = csv.writer(f)
row = ["t", "u_0", "u_1", "u_2", "alpha_0", "alpha_1", "alpha_2", "entropy"]
writer.writerow(row)
with open('figures/solvers/' + self.solution_file, 'w', newline='') as f:
# create the csv writer
writer = csv.writer(f)
if self.polyDegree == 1:
row = ["u_0", "u_1", "u_0_ref", "u_1_ref"]
elif self.polyDegree == 2:
row = ["u_0", "u_1", "u_2", "u_0_ref", "u_1_ref", "u_2_ref"]
writer.writerow(row)
def get_realizable_moment(self, value=1.0):
u_ic0 = value * np.ones((1,))
u_ic1 = np.zeros((1,))
erg = np.concatenate([u_ic0, u_ic1], axis=0)
if self.polyDegree >= 2:
u_ic2 = 0.5 * value * np.ones((self.n_system - 2,))
erg = np.concatenate([u_ic0, u_ic1, u_ic2], axis=0)
return erg
def boundary_inflow(self):
"""
Gives the Flux for an inflow condition with from the left
with f(t>0,0,v>0) = 0.5
"""
f = 0.5 * np.ones((self.nq))
for idx_quad in range(self.nq):
if self.quadPts[idx_quad] <= 0:
f[idx_quad] *= 0.0
return f
def ic_zero(self):
self.boundary = 1
f0 = 0.0001 * np.ones(self.nq)
t = np.multiply(f0, self.mBasis)
t2 = np.multiply(t, self.quadWeights)
u_ic_pt = np.sum(t2, axis=1)
u_ic = np.zeros((self.n_system, self.nx))
for idx_cell in range(self.nx):
u_ic[:, idx_cell] = u_ic_pt
return u_ic
def ic_periodic(self):
def sincos(x):
return 1.5 + np.cos(2 * np.pi * x)
u_ic = np.zeros((self.n_system, self.nx))
for i in range(self.nx):
x_koor = self.x0 + (i - 0.5) * self.dx
u_ic[0, i] = sincos(x_koor)
u_ic[1, i] = 0.3 * u_ic[0, i]
if self.polyDegree > 1:
u_ic[2, i] = 0.5 * u_ic[0, i]
if self.polyDegree > 2:
u_ic[3, i] = 0.5 * u_ic[0, i]
return u_ic
def ic_linesource(self):
"""
brief: linesource test case
"""
def normal_dist(x, mean, sd):
prob_density = 1 / (4.0 * np.pi * sd) * np.exp(-0.5 * ((x - mean) / sd) ** 2)
return max(prob_density, 0.001)
u_ic = np.zeros((self.n_system, self.nx))
for i in range(self.nx):
x_koor = self.x0 + (i - 0.5) * self.dx
u_ic[0, i] = normal_dist(x_koor, 0.0, 0.01)
u_ic[1, i] = 0.0
if self.polyDegree >= 2:
u_ic[2, i] = 0.3 * u_ic[0, i]
if self.polyDegree >= 3:
u_ic[3, i] = 0.005 * u_ic[0, i]
print("using linesource initial conditions")
return u_ic
def ic_soft_linesource(self):
"""
brief: linesource test case
"""
def coscos(x):
if -0.49 < x < 0.49:
return np.cos(x) * np.cos(x)
else:
return 0.01
u_ic = np.zeros((self.n_system, self.nx))
for i in range(self.nx):
x_koor = self.x0 + (i - 0.5) * self.dx
u_ic[0, i] = coscos(x_koor)
u_ic[1, i] = 0.0
if self.polyDegree >= 2:
u_ic[2, i] = 0.05 * u_ic[0, i]
if self.polyDegree >= 3:
u_ic[3, i] = 0.05 * u_ic[0, i]
print("using soft linesource initial conditions")
return u_ic
def ic_bump(self):
u_ic = np.zeros((self.n_system, self.nx))
for i in range(self.nx):
x_koor = self.x0 + (i - 0.5) * self.dx
if 1 > x_koor > -1:
u_ic[0, i] = 1.0
u_ic[1, i] = 0.0
if self.polyDegree >= 2:
u_ic[2, i] = 0.5
if self.polyDegree == 3:
N1 = u_ic[1, i] / u_ic[0, i]
N2 = u_ic[2, i] / u_ic[0, i]
upper = N2 - (N1 - N2) ** 2 / (1 - N1)
lower = - N2 + (N1 + N2) ** 2 / (1 + N1)
u_ic[3, i] = (upper + lower / 2) * u_ic[0, i]
else:
u_ic[0, i] = 0.5
u_ic[1, i] = 0.0
if self.polyDegree >= 2:
u_ic[2, i] = 0.25
# u_ic[2, i] = 0.25
if self.polyDegree == 3:
N1 = u_ic[1, i] / u_ic[0, i]
N2 = u_ic[2, i] / u_ic[0, i]
upper = N2 - (N1 - N2) ** 2 / (1 - N1)
lower = - N2 + (N1 + N2) ** 2 / (1 + N1)
u_ic[3, i] = (upper + lower / 2) * u_ic[0, i]
# uIc[0, i] = sincos(x=xKoor)
# uIc[1, i] = 0.0 # 0.8 * uIc[0, i] # 0.5 * uIc[0, i] # realizable
# uIc[2, i] = 0.1 * uIc[0, i] # 1 + (0.8 ** 2 + 0.05) * uIc[
# 0, i] # uIc[1, i] ** 2 + 0.1 # uIc[1, i] ** 2 + (1 - uIc[1, i] ** 2) / 2 # realizable
# if self.polyDegree == 3:
# N1 = uIc[1, i] / uIc[0, i]
# N2 = uIc[2, i] / uIc[0, i]
# uIc[3, i] = -N2 + (N1 + N2) ** 2 / (1 + N1) + 0.002 # error!
return u_ic
def solve(self, maxIter=100, t_end=1.0):
# self.show_solution(0)
idx_time = 0
while idx_time < maxIter and idx_time * self.dt < t_end:
self.solve_iter_newton(idx_time)
self.solver_iter_ml(idx_time)
print("Iteration: " + str(idx_time) + ". Time " + str(idx_time * self.dt) + " of " + str(t_end))
self.error_analysis(idx_time * self.dt)
# print iteration results
# self.show_solution(idx_time)
idx_time += 1
self.show_solution(idx_time)
self.write_solution()
return self.u
def solve_animation_iter_error(self, maxIter=100):
fps = 1 / self.dt
# First set up the figure, the axis, and the plot element we want to animate
fig, ax = plt.subplots()
ax.set_xlim((-1.5, 1.5))
ax.set_ylim((-0.15, 1.15))
line1, = ax.plot([], [], "ro", label="u0_ML")
line2, = ax.plot([], [], "ro", label="u1_ML")
line3, = ax.plot([], [], "ro", label="u2_ML")
line4, = ax.plot([], [], "k-", label="u0_trad")
line5, = ax.plot([], [], "k--", label="u1_trad")
line6, = ax.plot([], [], "k:", label="u2_trad")
if self.polyDegree == 3:
line7, = ax.plot([], [], "ro", label="u3_ML")
line8, = ax.plot([], [], "k.", label="u3_trad")
x = np.linspace(self.x0, self.x1, self.nx)
ax.legend()
def animate_func(i):
# entropy closure and
self.entropy_closure_newton()
# reconstruction
self.realizability_reconstruction()
# entropy closure and
self.compute_closure_ml()
self.compare_and_retrain()
# flux computation
self.compute_flux_newton()
# FVM update
self.fvm_update_newton()
# flux computation
self.compute_flux_ml()
# FVM update
self.fvm_update_ml()
# self.solve_iter_newton(i)
# self.solve_iter_ml(i)
# step by step execution
# self.compare_and_retrain()
print("Iteration: " + str(i))
# ax.plot(x, self.u2[0, :])
line1.set_data(x, self.u2[0, :])
line2.set_data(x, self.u2[1, :])
line3.set_data(x, self.u2[2, :])
if self.polyDegree == 3:
line7.set_data(x, self.u2[3, :])
line4.set_data(x, self.u[0, :])
line5.set_data(x, self.u[1, :])
line6.set_data(x, self.u[2, :])
if self.polyDegree == 3:
line8.set_data(x, self.u[3, :])
return [line1, line2, line3, line4, line5, line6, line7, line8]
return [line1, line2, line3, line4, line5, line6]
# anim = animation.FuncAnimation(fig, animate_func, frames=maxIter, interval=10000 * self.dt)
anim = animation.FuncAnimation(fig, animate_func, frames=maxIter, interval=20000 * self.dt, blit=True)
if self.traditional:
filename = "newton_version.gif"
else:
filename = "ErrorPerIter.gif"
# anim.save('ErrorPerIter.gif', writer='imagemagick', fps=60)
anim.save(filename, writer=animation.PillowWriter(fps=fps))
def solve_iter_newton(self, t_idx):
# entropy closure and
self.entropy_closure_newton()
# reconstruction
# self.realizability_reconstruction()
# flux computation
self.compute_flux_newton()
# FVM update
self.fvm_update_newton()
return 0
def solver_iter_ml(self, t_idx):
# entropy closure and
self.compute_closure_ml()
# flux computation
self.compute_flux_ml()
# FVM update
self.fvm_update_ml()
return 0
def entropy_closure_newton(self):
# if (self.traditional): # NEWTON
for i in range(self.nx):
self.entropy_closure_single_row(i)
return 0
def entropy_closure_single_row(self, i):
rowRes = 0
opti_u = self.u[:, i]
alpha_init = self.alpha[:, i]
# test objective functions
# t = self.create_opti_entropy(opti_u)(alpha_init)
# tp = self.create_opti_entropy_prime(opti_u)(alpha_init)
# t = self.create_opti_entropy_hessian()(alpha_init)
# print(t)
# print(tp)
normU = np.abs(self.u[1, i])
u0 = self.u[0, i]
if u0 == 0:
print("u0 = 0")
elif normU / u0 > 0.95:
print("Warning")
opt_result = opt.minimize(fun=self.create_opti_entropy(opti_u), x0=alpha_init,
jac=self.create_opti_entropy_prime(opti_u),
tol=1e-6)
# hess=self.create_opti_entropy_hessian(),
# method='Newton-CG',
if not opt_result.success:
print("Optimization unsuccessfull! u=" + str(opti_u))
exit(ValueError)
else:
self.alpha[:, i] = opt_result.x
rowRes = opt_result.x
self.h[i] = opt_result.fun
return rowRes
def create_opti_entropy(self, u):
def opti_entropy(alpha):
"""
brief: returns the negative entropy functional with fixed u
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (1 x N)
u, dims = (1 x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = - alpha*u + <eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
# compute negative entropy functional
f_quad = np.exp(np.tensordot(alpha, self.mBasis, axes=([0], [0]))) # alpha*m
t1 = np.tensordot(f_quad, self.quadWeights, axes=([0], [0])) # f*w
t2 = np.inner(alpha, u)
return t1 - t2
return opti_entropy
def create_opti_entropy_prime(self, u):
def opti_entropy_prime(alpha):
"""
brief: returns the derivative negative entropy functional with fixed u
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (1 x N)
u, dims = (1 x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = - alpha + <m eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
f_quad = np.exp(np.tensordot(alpha, self.mBasis, axes=([0], [0]))) # alpha*m
tmp = np.multiply(f_quad, self.quadWeights) # f*w
t2 = np.tensordot(tmp, self.mBasis, axes=([0], [1])) # f * w * momentBasis
return t2 - u
return opti_entropy_prime
def create_opti_entropy_hessian(self):
def opti_entropy_hessian(alpha):
"""
brief: returns the derivative negative entropy functional with fixed u
nS = batchSize
N = basisSize
nq = number of quadPts
input: alpha, dims = (1 x N)
u, dims = (1 x N)
used members: m , dims = (N x nq)
w , dims = nq
returns h = <mxm eta_*(alpha*m)>
"""
# Currently only for maxwell Boltzmann entropy
f_quad = np.exp(np.tensordot(alpha, self.mBasis, axes=([0], [0]))) # alpha*m
tmp = np.multiply(f_quad, self.quadWeights) # f*w
t2 = 0
for i in range(self.nq):
t = np.tensordot(self.mBasis[:, i], self.mBasis[:, i], axes=0)
t2 += t * tmp[i]
return t2
return opti_entropy_hessian
def realizability_reconstruction(self):
for i in range(self.nx):
# self.u2[:, i] = np.copy(self.u[:, i])
a = np.reshape(self.alpha[:, i], (1, self.n_system))
self.u[:, i] = math.reconstructU(alpha=a, m=self.mBasis, w=self.quadWeights)
# print("(" + str(self.u2[:, i]) + " | " + str(self.u[:, i]))
# h = self.create_opti_entropy(self.u[:, i])(self.alpha[:, i])
# row = [0, self.u[0, i], self.u[1, i], self.u[2, i], self.alpha[0, i], self.alpha[1, i],
# self.alpha[2, i], h]
return 0
def compare_and_retrain(self):
# open the file in the write mode
with open('figures/solvers/csv_writeout/Monomial_M2_1D.csv', 'a+', newline='') as f:
# create the csv writer
writer = csv.writer(f)
for i in range(self.nx):
h = self.create_opti_entropy(self.u[:, i])(self.alpha[:, i])
row = [0, self.u[0, i], self.u[1, i], self.u[2, i], self.alpha[0, i], self.alpha[1, i],
self.alpha[2, i], h]
writer.writerow(row)
h = self.create_opti_entropy(self.u2[:, i])(self.alpha2[:, i])
row = [1, self.u2[0, i], self.u2[1, i], self.u2[2, i], self.alpha2[0, i], self.alpha2[1, i],
self.alpha2[2, i], h]
# write a row to the csv file
writer.writerow(row)
return 0
def compute_flux_newton(self):
"""
for periodic boundaries and inflow boundaries, upwinding.
writes to xFlux and yFlux, uses alpha
"""
# reconstruct all densities
densities = np.zeros((self.nx, self.nq))
for i in range(self.nx):
densities[i, :] = math.entropyDualPrime(np.tensordot(self.alpha[:, i], self.mBasis, axes=([0], [0])))
# compute fluxes in interior cell faces
for i in range(1, self.nx):
flux = np.zeros(self.n_system)
for q in range(self.nq): # integrate upwinding result
upwind = self.upwinding(densities[i - 1, q], densities[i, q], self.quadPts[q])
flux = flux + upwind * self.quadWeights[q] * self.mBasis[:, q]
self.xFlux[:, i] = flux
# compute boundary layer fluxes
if self.boundary == 0: # periodic boundary
flux = np.zeros(self.n_system)
for q in range(self.nq): # integrate upwinding result
upwind = self.upwinding(densities[self.nx - 1, q], densities[0, q], self.quadPts[q])
flux = flux + upwind * self.quadWeights[q] * self.mBasis[:, q]
self.xFlux[:, 0] = flux
self.xFlux[:, self.nx] = flux
if self.boundary == 1: # neumann with l.h.s inflow
flux_lhs = self.boundary_inflow()
flux = np.zeros(self.n_system)
for q in range(self.nq): # integrate upwinding result
upwind = self.upwinding(flux_lhs[q], densities[0, q], self.quadPts[q])
flux = flux + upwind * self.quadWeights[q] * self.mBasis[:, q]
self.xFlux[:, 0] = flux # inflow conditions
self.xFlux[:, self.nx] = self.xFlux[:, self.nx - 1] # do nothing conditions
return 0
def upwinding(self, fluxL, fluxR, quadpt):
# t = np.inner(quadpt, normal)
if quadpt > 0:
return quadpt * fluxL
else:
return quadpt * fluxR
def fvm_update_newton(self):
for i in range(self.nx):
ip1 = i + 1
# advection
self.u[:, i] = self.u[:, i] + ((self.xFlux[:, i] - self.xFlux[:, ip1]) / self.dx) * self.dt
# Scattering
self.u[:, i] += self.dt * self.sigmaS * (self.scatter_vector * self.u[0, i] - self.u[:, i])
return 0
def compute_closure_ml(self):
tmp = np.copy(np.transpose(self.u2))
for i in range(self.nx):
if tmp[i, 0] < 0.0001:
tmp[i, 0] = 0.0001
[u_pred, alpha_pred, h] = self.neuralClosure.call_scaled_64(np.asarray(tmp), legacy_mode=self.legacy_model)
for i in range(self.nx):
self.alpha2[:, i] = alpha_pred[i, :]
self.h2[i] = h[i]
return 0
def compute_flux_ml(self):
"""
for periodic boundaries and inflow boundaries, upwinding.
writes to xFlux and yFlux, uses alpha
"""
# reconstruct all densities
densities = np.zeros((self.nx, self.nq))
for i in range(self.nx):
densities[i, :] = math.entropyDualPrime(np.tensordot(self.alpha2[:, i], self.mBasis, axes=([0], [0])))
# compute fluxes in interior cell faces
for i in range(1, self.nx):
flux = np.zeros(self.n_system)
for q in range(self.nq): # integrate upwinding result
upwind = self.upwinding(densities[i - 1, q], densities[i, q], self.quadPts[q])
flux = flux + upwind * self.quadWeights[q] * self.mBasis[:, q]
self.xFlux2[:, i] = flux
# compute boundary layer fluxes
if self.boundary == 0: # periodic boundary
flux = np.zeros(self.n_system)
for q in range(self.nq): # integrate upwinding result
upwind = self.upwinding(densities[self.nx - 1, q], densities[0, q], self.quadPts[q])
flux = flux + upwind * self.quadWeights[q] * self.mBasis[:, q]
self.xFlux2[:, 0] = flux
self.xFlux2[:, self.nx] = flux
if self.boundary == 1: # neumann with l.h.s inflow
flux_lhs = self.boundary_inflow()
flux = np.zeros(self.n_system)
for q in range(self.nq): # integrate upwinding result
upwind = self.upwinding(flux_lhs[q], densities[0, q], self.quadPts[q])
flux = flux + upwind * self.quadWeights[q] * self.mBasis[:, q]
self.xFlux2[:, 0] = flux # inflow conditions
self.xFlux2[:, self.nx] = self.xFlux[:, self.nx - 1] # do nothing conditions
return 0
def fvm_update_ml(self):
for i in range(self.nx):
ip1 = i + 1
# advection
self.u2[:, i] = self.u2[:, i] + ((self.xFlux2[:, i] - self.xFlux2[:, ip1]) / self.dx) * self.dt
# Scattering
self.u2[:, i] += self.dt * self.sigmaS * (self.scatter_vector * self.u2[0, i] - self.u2[:, i])
return 0
def show_solution(self, idx):
plt.clf()
x = np.linspace(self.x0, self.x1, self.nx)
plt.plot(x, self.u[0, :], "k-", linewidth=1, label="Newton u0")
plt.plot(x, self.u2[0, :], 'o', markersize=2, markerfacecolor='orange',
markeredgewidth=0.5, markeredgecolor='k', label="Neural u0")
plt.xlim([self.x0, self.x1])
# plt.ylim([0.0, 1.5])
plt.xlabel("x")
plt.ylabel("u")
plt.legend()
# plt.savefig("figures/solvers/u_0_comparison_" + str(idx) + ".png", dpi=450)
# plt.clf()
plt.plot(x, self.u[1, :], "k-", linewidth=1, label="Newton u1")
plt.plot(x, self.u2[1, :], 'o', markersize=2, markerfacecolor='orange',
markeredgewidth=0.5, markeredgecolor='k', label="Neural u1")
plt.xlim([self.x0, self.x1])
# plt.ylim([0.0, 1.5])
plt.xlabel("x")
plt.ylabel("u")
plt.legend()
# plt.savefig("figures/solvers/u_1_comparison_" + str(idx) + ".png", dpi=450)
# plt.clf()
if self.polyDegree >= 2:
plt.plot(x, self.u[2, :], "k-", linewidth=1, label="Newton u2")
plt.plot(x, self.u2[2, :], 'o', markersize=2, markerfacecolor='orange',
markeredgewidth=0.5, markeredgecolor='k', label="Neural u2")
plt.xlim([self.x0, self.x1])
# plt.ylim([0.0, 1.5])
plt.xlabel("x")
plt.ylabel("u")
plt.legend()
plt.savefig("figures/solvers/u_comparison_" + str(idx) + ".png", dpi=450)
plt.clf()
err = np.linalg.norm(self.u - self.u2, axis=0)
plt.plot(x, err, "k-", linewidth=1, label="Newton closure")
plt.xlim([self.x0, self.x1])
# plt.ylim([0.0, 1.5])
plt.xlabel("x")
plt.ylabel("norm(u-u-theta)")
plt.legend()
plt.savefig("figures/solvers/error" + str(idx) + ".png", dpi=450)
plt.clf()
# plt.show()
return 0
def error_analysis(self, time):
# mean absulote error
with open('figures/solvers/' + self.datafile, 'a+', newline='') as f:
# create the csv writer
writer = csv.writer(f)
# writer.writerow("u0,u1,u2,u0_ref,u1_ref,u2_ref")
for i in range(self.nx):
if self.polyDegree == 1:
row = [i, self.u[0, i], self.u[1, i], self.alpha[0, i], self.alpha[1, i],
self.h[i]]
elif self.polyDegree == 2:
row = [i, self.u[0, i], self.u[1, i], self.u[2, i], self.alpha[0, i], self.alpha[1, i],
self.alpha[2, i], self.h[i]]
writer.writerow(row)
err_u = np.linalg.norm(self.u - self.u2, axis=0)
rel_err_u = err_u / np.linalg.norm(self.u, axis=0)
avg_rel_err_u = np.sum(rel_err_u) / self.nx
err_alpha = np.linalg.norm(self.alpha - self.alpha2, axis=0)
rel_err_alpha = err_alpha / np.linalg.norm(self.alpha, axis=0)
avg_rel_err_alpha = np.sum(rel_err_alpha) / self.nx
entropy_orig = - self.h.sum() * self.dx
entropy_ml = self.h2.sum() * self.dx
with open('figures/solvers/' + self.errorfile, 'a+', newline='') as f:
writer = csv.writer(f)
writer.writerow([time, avg_rel_err_u, avg_rel_err_alpha, entropy_ml, entropy_orig])
print("Error data written")
return 0
def write_solution(self):
with open('figures/solvers/' + self.solution_file, 'a+', newline='') as f:
# create the csv writer
writer = csv.writer(f)
# writer.writerow("u0,u1,u2,u0_ref,u1_ref,u2_ref")
for i in range(self.nx):
if self.polyDegree == 1:
row = [self.u2[0, i], self.u2[1, i], self.u[0, i], self.u[1, i]]
elif self.polyDegree == 2:
row = [self.u2[0, i], self.u2[1, i], self.u2[2, i], self.u[0, i], self.u[1, i], self.u[2, i]]
# row = [iter, self.u[0, i], self.u[1, i], self.u[2, i], self.alpha[0, i], self.alpha[1, i],
# self.alpha[2, i], self.h[i]]
writer.writerow(row)
return 0
if __name__ == '__main__':
main()
|
{"hexsha": "d8587148075078562220c9868404d48832d5c3a4", "size": 31811, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/solver/MNSolver1D.py", "max_stars_repo_name": "CSMMLab/neuralEntropyClosures", "max_stars_repo_head_hexsha": "5efc5961f2fac36921a749d35f3636c61d1cc873", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-23T07:21:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T11:35:39.000Z", "max_issues_repo_path": "src/solver/MNSolver1D.py", "max_issues_repo_name": "ScSteffen/neuralEntropyClosures", "max_issues_repo_head_hexsha": "5efc5961f2fac36921a749d35f3636c61d1cc873", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/solver/MNSolver1D.py", "max_forks_repo_name": "ScSteffen/neuralEntropyClosures", "max_forks_repo_head_hexsha": "5efc5961f2fac36921a749d35f3636c61d1cc873", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4205729167, "max_line_length": 118, "alphanum_fraction": 0.5137531043, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8676}
|
"""Tools for working with segmented systems."""
from collections import namedtuple
import numpy as truenp
from .geometry import regular_polygon
from .mathops import np
Hex = namedtuple('Hex', ['q', 'r', 's'])
def add_hex(h1, h2):
"""Add two hex coordinates together."""
q = h1.q + h2.q
r = h1.r + h2.r
s = h1.s + h2.s
return Hex(q, r, s)
def sub_hex(h1, h2):
"""Subtract two hex coordinates."""
q = h1.q - h2.q
r = h1.r - h2.r
s = h1.s - h2.s
return Hex(q, r, s)
def mul_hex(h1, h2):
"""Multiply two hex coordinates."""
q = h1.q * h2.q
r = h1.r * h2.r
s = h1.s * h2.s
return Hex(q, r, s)
# as given
hex_dirs = [
Hex(1, 0, -1), Hex(1, -1, 0), Hex(0, -1, 1),
Hex(-1, 0, 1), Hex(-1, 1, 0), Hex(0, 1, -1)
]
def hex_dir(i):
"""Hex direction associated with a given integer, wrapped at 6."""
return hex_dirs[i % 6] # wrap dirs at 6 (there are only 6)
def hex_neighbor(h, direction):
"""Neighboring hex in a given direction."""
return add_hex(h, hex_dir(direction))
def hex_to_xy(h, radius, rot=90):
"""Convert hexagon coordinate to (x,y), if all hexagons have a given radius and rotation."""
if rot == 90:
x = 3/2 * h.q
y = truenp.sqrt(3)/2 * h.q + truenp.sqrt(3) * h.r
else:
x = truenp.sqrt(3) * h.q + truenp.sqrt(3)/2 * h.r
y = 3/2 * h.r
return x*radius, y*radius
def scale_hex(h, k):
"""Scale a hex coordinate by some constant factor."""
return Hex(h.q * k, h.r * k, h.s * k)
def hex_ring(radius):
"""Compute all hex coordinates in a given ring."""
start = Hex(-radius, radius, 0)
tile = start
results = []
# there are 6*r hexes per ring (the i)
# the j ensures that we reset the direction we travel every time we reach a
# 'corner' of the ring.
for i in range(6):
for j in range(radius):
results.append(tile)
tile = hex_neighbor(tile, i)
# rotate one so that the first element is 'north'
for _ in range(radius):
results.append(results.pop(0)) # roll < radius > elements so that the first element is "north"
return results
def _local_window(cy, cx, center, dx, samples_per_seg, x, y):
offset_x = cx + int(center[0]/dx) - samples_per_seg
offset_y = cy + int(center[1]/dx) - samples_per_seg
upper_x = offset_x + (2*samples_per_seg)
upper_y = offset_y + (2*samples_per_seg)
# clamp the offsets
if offset_x < 0:
offset_x = 0
if offset_x > x.shape[1]:
offset_x = x.shape[1]
if offset_y < 0:
offset_y = 0
if offset_y > y.shape[0]:
offset_y = y.shape[0]
if upper_x < 0:
upper_x = 0
if upper_x > x.shape[1]:
upper_x = x.shape[1]
if upper_y < 0:
upper_y = 0
if upper_y > y.shape[0]:
upper_y = y.shape[0]
return slice(offset_y, upper_y), slice(offset_x, upper_x)
class CompositeHexagonalAperture:
"""An aperture composed of several hexagonal segments."""
def __init__(self, x, y, rings, segment_diameter, segment_separation, segment_angle=90, exclude=()):
"""Create a new CompositeHexagonalAperture.
Note that __init__ is relatively computationally expensive and hides a lot of work.
Parameters
----------
x : `numpy.ndarray`
array of x sample positions, of shape (m, n)
y : `numpy.ndarray`
array of y sample positions, of shape (m, n)
rings : `int`
number of rings in the structure
segment_diameter : `float`
flat-to-flat diameter of each segment, same units as x
segment_separation : `float`
edge-to-nearest-edge distance between segments, same units as x
segment_angle : `float`, optional, {0, 90}
rotation angle of each segment
exclude : sequence of `int`
which segment numbers to exclude.
defaults to all segments included.
The 0th segment is the center of the array.
Other segments begin from the "up" orientation and count clockwise.
"""
(
self.vtov,
self.all_centers,
self.windows,
self.local_coords,
self. local_masks,
self.segment_ids,
self.amp
) = _composite_hexagonal_aperture(rings, segment_diameter, segment_separation,
x, y, segment_angle, exclude)
self.exclude = exclude
def _composite_hexagonal_aperture(rings, segment_diameter, segment_separation, x, y, segment_angle=90, exclude=(0,)):
if segment_angle not in {0, 90}:
raise ValueError('can only synthesize composite apertures with hexagons along a cartesian axis')
flat_to_flat_to_vertex_vertex = 2 / truenp.sqrt(3)
segment_vtov = segment_diameter * flat_to_flat_to_vertex_vertex
rseg = segment_vtov / 2
# center segment
dx = x[0, 1] - x[0, 0]
samples_per_seg = rseg / dx
# add 1, must avoid error in the case that non-center segments
# fall on a different subpixel and have different rounding
# use rseg since it is what we are directly interested in
samples_per_seg = int(samples_per_seg+1)
# compute the center segment over the entire x, y array
# so that mask covers the entirety of the x/y extent
# this may look out of place/unused, but the window is used when creating
# the 'windows' list
cx = int(np.ceil(x.shape[1]/2))
cy = int(np.ceil(y.shape[0]/2))
center_segment_window = _local_window(cy, cx, (0, 0), dx, samples_per_seg, x, y)
mask = np.zeros(x.shape, dtype=np.bool)
all_centers = [(0, 0)]
segment_id = 0
segment_ids = [segment_id]
windows = [center_segment_window]
xx = x[center_segment_window]
yy = y[center_segment_window]
local_coords = [
(xx, yy)
]
center_mask = regular_polygon(6, rseg, xx, yy, center=(0, 0), rotation=segment_angle)
if 0 not in exclude:
mask[center_segment_window] |= center_mask
local_masks = [center_mask]
for i in range(1, rings+1):
hexes = hex_ring(i)
centers = [hex_to_xy(h, rseg+(segment_separation/2), rot=segment_angle) for h in hexes]
ids = np.arange(segment_id+1, segment_id+1+len(centers), dtype=int)
id_mask = ~np.isin(ids, exclude, assume_unique=True)
valid_ids = ids[id_mask]
centers = truenp.array(centers)
centers = centers[id_mask]
all_centers += centers.tolist()
for segment_id, center in zip(valid_ids, centers):
# short circuit: if we do not wish to include a segment,
# do no further work on it
if segment_id in exclude:
continue
segment_ids.append(segment_id)
local_window = _local_window(cy, cx, center, dx, samples_per_seg, x, y)
windows.append(local_window)
xx = x[local_window]
yy = y[local_window]
local_coords.append((xx-center[0], yy-center[1]))
local_mask = regular_polygon(6, rseg, xx, yy, center=center, rotation=segment_angle)
local_masks.append(local_mask)
mask[local_window] |= local_mask
segment_id = ids[-1]
return segment_vtov, all_centers, windows, local_coords, local_masks, segment_ids, mask
|
{"hexsha": "e4df95cc2cdc1f1463a8b2b8946b91a69dbe5207", "size": 7391, "ext": "py", "lang": "Python", "max_stars_repo_path": "prysm/segmented.py", "max_stars_repo_name": "deisenroth/prysm", "max_stars_repo_head_hexsha": "53a400ef89697041f67192e879e61ad28c451318", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 110, "max_stars_repo_stars_event_min_datetime": "2017-09-28T05:24:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:34:08.000Z", "max_issues_repo_path": "prysm/segmented.py", "max_issues_repo_name": "mjhoptics/prysm", "max_issues_repo_head_hexsha": "5dea335e068d04d1006741d8eb02278181751f73", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 82, "max_issues_repo_issues_event_min_datetime": "2018-01-03T03:52:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T02:30:19.000Z", "max_forks_repo_path": "prysm/segmented.py", "max_forks_repo_name": "mjhoptics/prysm", "max_forks_repo_head_hexsha": "5dea335e068d04d1006741d8eb02278181751f73", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2017-12-28T02:47:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:10:11.000Z", "avg_line_length": 32.1347826087, "max_line_length": 117, "alphanum_fraction": 0.61304289, "include": true, "reason": "import numpy", "num_tokens": 2009}
|
from scipy import stats # apply statistical knowledge from scitific python lib
import numpy as np # matrix manipulation
import cv2 # image processing lib
import argparse # input and output file.
from time import sleep # time based library
from collections import defaultdict # DS
from tqdm import tqdm as tqdm # for progess bar
# K-means algorithm to cluster the histogram of image
# Value of K is auto-selected
def animefy(input_image,old=0):
output = np.array(input_image)
x, y, channel = output.shape
# hists = []
for i in range(channel):
#apply bilateral filter on image with i skip value
output[:, :, i] = cv2.bilateralFilter(output[:, :, i], 5, 50, 50)
edge = cv2.Canny(output, 100, 200)
#Convert image from one color space to another (RGB to HSV value)
output = cv2.cvtColor(output, cv2.COLOR_RGB2HSV)
# Initialize a histogram values for HSV specifically
hists = []
#H val histogram
hist, _ = np.histogram(output[:, :, 0], bins=np.arange(180+1))
hists.append(hist)
#S val histogram
hist, _ = np.histogram(output[:, :, 1], bins=np.arange(256+1))
hists.append(hist)
#V val histogram
hist, _ = np.histogram(output[:, :, 2], bins=np.arange(256+1))
hists.append(hist)
Collect = []
#for collecting all H,S,V histograms after apply KHist fuction on all.
for h in tqdm(hists,desc="Progress 1 of 2"):
sleep(0.2)
Collect.append(KHist(h))
"""print("centroids: {0}".format(Collect))"""
output = output.reshape((-1, channel))
for i in tqdm(range(channel),desc="Progress 2 of 2"):
channel1 = output[:, i]
index = np.argmin(np.abs(channel1[:, np.newaxis] - Collect[i]), axis=1)
output[:, i] = Collect[i][index]
output = output.reshape((x, y, channel))
output = cv2.cvtColor(output, cv2.COLOR_HSV2RGB)
# contours find and apply on org. image
contours, _ = cv2.findContours(edge,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
cv2.drawContours(output, contours, -1, 0, thickness=1)
output2=cv2.cvtColor(output, cv2.COLOR_BGR2XYZ)
output3=cv2.cvtColor(output, cv2.COLOR_BGR2HLS)
#multip-value return statement needed
if(old==0):
return output,output2,output3
else:
output1 = output.copy()
output = np.array(output, dtype=np.float64) # converting to float to prevent loss
output = cv2.transform(output, np.matrix([[0.272, 0.534, 0.131],
[0.349, 0.686, 0.168],
[0.393, 0.769, 0.189]])) # multipying image with special vintage view matrix
output[np.where(output > 255)] = 255 # normalizing values greater than 255 to 255
output = np.array(output, dtype=np.uint8) # converting back to int
return output1,output2,output3,output
def update_C(C, histogram):
#update centroids until they don't change
while (True):
groups = defaultdict(list)
# Assign pixel values
for i in range(len(histogram)):
if histogram[i] == 0:
continue
d = np.abs(C-i)
index = np.argmin(d)
groups[index].append(i)
new_C = np.array(C)
for i, indice in groups.items():
if np.sum(histogram[indice]) == 0:
continue
new_C[i] = int(np.sum(indice*histogram[indice])/np.sum(histogram[indice]))
if np.sum(new_C-C) == 0:
break
C = new_C
return C, groups
def KHist(hist):
#Choose the most appropriate K for k-means and get the centroids accordingly
alpha = 0.001 # p-value threshold for normaltest
N = 80 # minimun group size for normaltest
C = np.array([128])
while True:
C, groups = update_C(C, hist)
#start increase K if possible
new_C = set() # use set to avoid same value when seperating centroid
for i, indice in groups.items():
#if there are not enough values in the group, do not seperate
if len(indice) < N:
new_C.add(C[i])
continue
# judge whether we should seperate the centroid by testing if the values of the group is under a normal distribution
z, pval = stats.normaltest(hist[indice])
if pval < alpha:
#not a normal dist, seperate
left = 0 if i == 0 else C[i-1]
right = len(hist)-1 if i == len(C)-1 else C[i+1]
delta = right-left
if delta >= 3:
c1 = (C[i]+left)/2
c2 = (C[i]+right)/2
new_C.add(c1)
new_C.add(c2)
else:
# though it is not a normal dist, we have no extra space to seperate
new_C.add(C[i])
else:
# normal dist, no need to seperate
new_C.add(C[i])
if len(new_C) == len(C):
break
else:
C = np.array(sorted(new_C))
return C
if __name__ == '__main__':
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
#reading the image
image = cv2.imread((args["image"]))
start_time = time.time()
print("Wait, Work is in Progess.")
output,output2,output3,output4 = animefy(image,1)
end_time = time.time()
t = end_time-start_time # processing time
print('time: {0}s'.format(t))
cv2.imwrite("assets/anime_effect.jpg", output) # save the image
cv2.imwrite("assets/anime_Blue_effect.jpg", output2)
cv2.imwrite("assets/anime_PredatorView_effect.jpg", output3)
cv2.imwrite("assets/anime_vintage_effect.jpg", output4)
print("Your results are ready!")
|
{"hexsha": "e2a646cafe4dc519754324828d89427401baadfd", "size": 5954, "ext": "py", "lang": "Python", "max_stars_repo_path": "anime_effect.py", "max_stars_repo_name": "Aayush-hub/ArtCV", "max_stars_repo_head_hexsha": "d5f01d9dacb3bb1f976d38d14e2dd3ac85e4b94a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2020-08-10T16:00:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-02T13:58:22.000Z", "max_issues_repo_path": "anime_effect.py", "max_issues_repo_name": "BlueBlaze6335/ArtCV", "max_issues_repo_head_hexsha": "f2a6b72a8b0add5e16c7a4846b227054df87546d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 72, "max_issues_repo_issues_event_min_datetime": "2020-09-18T05:03:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-21T04:04:07.000Z", "max_forks_repo_path": "anime_effect.py", "max_forks_repo_name": "BlueBlaze6335/ArtCV", "max_forks_repo_head_hexsha": "f2a6b72a8b0add5e16c7a4846b227054df87546d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2020-08-14T18:50:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-29T20:19:35.000Z", "avg_line_length": 38.1666666667, "max_line_length": 128, "alphanum_fraction": 0.5925428284, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1488}
|
import torch
from torch import optim
import time
import os
from model.UNet_model import UNet
from loader.data_loader import SlicesDataset
from torch.utils.data import DataLoader
import numpy as np
from trainer.inference import UNetInferenceAgent
from utils.utils import Dice3d, Jaccard3d
class UNetExperiment:
"""
Basic life cycle for segmentation
"""
def __init__(self, config, split, dataset):
self.n_epochs = config.n_epochs
self.split = split
self._time_start = ""
self._time_end = ""
self.epoch = 0
self.name = config.name
#create output folders
dirname = f'{time.strftime("%Y-%m-%d_%H%M", time.gmtime())}_{self.name}'
self.out_dir = os.path.join(config.test_results_dir, dirname)
os.makedirs(self.out_dir, exist_ok=True)
#Create data loaders
self.train_loader = DataLoader(SlicesDataset(dataset[split["train"]]),
batch_size=config.batch_size,
shuffle=True,
num_workers=0)
self.val_loader = DataLoader(SlicesDataset(dataset[split["val"]]),
batch_size = config.batch_size,
shuffle=True,
num_workers=0)
# We will access volumes directly for testing
self.test_data = dataset[split["test"]]
# Check whether CUDA is available
if not torch.cuda.is_available():
print("WARNING: No CUDA device is found. This may take significantly longer!")
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.model = UNet(num_classes=3)
#self.model = UNet(num_classes=3, use_dropout=True)
self.model.to(self.device)
#We are using standard cross_entropy loss
self.loss_function = torch.nn.CrossEntropyLoss()
#Optimizer
self.optimizer = optim.Adam(self.model.parameters(),
lr = config.learning_rate)
# Scheduler helps us update learning rate automatically
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,'min')
def train(self):
"""
This method is executed once per epoch and takes care of
model weight update cycle
"""
print(f"Training epoch {self.epoch} ...")
self.model.train()
loss_list = []
#Loop over our minibatches
for i, batch in enumerate(self.train_loader):
batch_imgs = batch["image"]
batch_segs = batch["seg"]
data = batch_imgs.to(self.device).type(torch.float)
target = batch_segs.to(self.device).type(torch.long)
prediction = self.model(data)
loss = self.loss_function(prediction, target[:,0,:,:])
#Adding L2 regularization to the loss
l2_lambda = 0.001
l2_norm = sum(p.pow(2.0).sum() for p in self.model.parameters() if p.requires_grad == True)
loss = loss + l2_lambda*l2_norm
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_list.append(loss.item())
#if (i % 10) == 0:
# Output to console on every 10th batch
#print(f"Epoch: {self.epoch} Train loss:{loss/len(self.train_loader)}, {100*(i+1)/len(self.train_loader):.1}% complete")
#print("\nTraining comple")
return loss_list
def validate(self):
"""
This method runs validation cycle, using same metrics as
Train method. Note that model needs to be switched to eval mode
and no_grad needs to be called so that gradients do not
propagate.
"""
print(f"Validating epoch {self.epoch}...")
self.model.eval()
loss_list = []
with torch.no_grad():
for i, batch in enumerate(self.val_loader):
batch_imgs = batch["image"]
batch_segs = batch["seg"]
data = batch_imgs.to(self.device).type(torch.float)
target = batch_segs.to(self.device).type(torch.long)
prediction = self.model(data)
#prediction_softmax = F.softmax(prediction, dim=1)
#prediction_softmax = prediction
loss = self.loss_function(prediction, target[:,0,:,:])
#print(f"Batch {i}. Data shape {data.shape} Loss {loss.item()/len(self.val_loader)}")
loss_list.append(loss.item())
self.scheduler.step(np.mean(loss_list))
print(f"Validation complete")
return loss_list
def save_model_parameters(self):
"""
Saves model parameters to a file in results directory
"""
path = os.path.join(self.out_dir, "model.pth")
torch.save(self.model.state_dict(), path)
def load_model_parameters(self, path=''):
"""
Loads model parameters from a supplied path or a results directory
"""
if not path:
model_path = os.path.join(self.out_dir, "model.pth")
else:
model_path = path
if os.path.exists(model_path):
self.model.load_state_dict(torch.load(model_path))
else:
raise Exception(f"Could not find path {model_path}")
def run_test(self):
"""
This runs test cycle on the test dataset.
Note that process and evaluations are quite different.
Here we are computing a lot more metrics and returning a
dictionary that could later be persisted on a JSON.
"""
print("Testing...")
self.model.eval()
inference_agent = UNetInferenceAgent(model=self.model, device=self.device)
out_dict = {}
out_dict["volume_stats"] = []
dc_list = []
jc_list = []
for i, x in enumerate(self.test_data):
pred_label = inference_agent.single_volume_inference(x["image"])
segmentation = x["seg"]
dc = 0.0
jc = 0.0
num_classes = 3
for index in range(num_classes):
y_true = segmentation
y_pred = pred_label[:,index,:,:].reshape(segmentation.shape)
dc += Dice3d(y_true, y_pred)
jc += Jaccard3d(y_true, y_pred)
dc_list.append(float(dc) / float(num_classes))
jc_list.append(float(jc) / float(num_classes))
out_dict["volume_stats"].append({
"filename":x['filename'],
"dice":dc,
"jaccard":jc
})
print(f"{x['filename']}; Dice {dc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete")
print(f"{x['filename']}; Jaccard {jc:.4f}. {100*(i+1)/len(self.test_data):.2f}% complete")
out_dict["overall"] = {
"mean_dice": np.mean(dc_list),
"mean_jaccard": np.mean(jc_list)}
print("\nTesting complete.")
return out_dict
def run(self):
"""
Kicks off train cycle and writes model parameter file at the end
"""
self._time_start = time.time()
print("Experiment started.")
result = dict()
result['epoch'] = []
result['train_loss']=[]
result['val_loss'] = []
#Iterate over epochs
for self.epoch in range(self.n_epochs):
train_loss_list = self.train()
val_loss_list = self.validate()
result['epoch'].append(self.epoch)
result['train_loss'].append(np.mean(train_loss_list))
result['val_loss'].append(np.mean(val_loss_list))
print(f"Epoch: {self.epoch + 1}, Train error: {np.mean(train_loss_list)}, Validation loss: {np.mean(val_loss_list)}")
#Save model for inferencing
self.save_model_parameters()
self._time_end = time.time()
print(f"Run complete. Total tiem:{time.strftime('%H:%M:%S', time.gmtime(self._time_end - self._time_start))}")
return result
|
{"hexsha": "f910761159cda58613279f11cb6f69e44d233706", "size": 7632, "ext": "py", "lang": "Python", "max_stars_repo_path": "src-python/trainer/training.py", "max_stars_repo_name": "RAFAELLOPE/Hippocampus_project", "max_stars_repo_head_hexsha": "fb9c1ed4227a8a4c0e4f73ecd6ba2e9f3d315021", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src-python/trainer/training.py", "max_issues_repo_name": "RAFAELLOPE/Hippocampus_project", "max_issues_repo_head_hexsha": "fb9c1ed4227a8a4c0e4f73ecd6ba2e9f3d315021", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src-python/trainer/training.py", "max_forks_repo_name": "RAFAELLOPE/Hippocampus_project", "max_forks_repo_head_hexsha": "fb9c1ed4227a8a4c0e4f73ecd6ba2e9f3d315021", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.92, "max_line_length": 130, "alphanum_fraction": 0.6154350105, "include": true, "reason": "import numpy", "num_tokens": 1745}
|
HANSARD REVISE * NUMERO 108
Le lundi 25 mai 1998
PROGRAMME NATIONAL BON DEPART
Demande et report des votes
LOI D'EXECUTION DU BUDGET DE 1998
Projet de loi C-36-Motion d'attribution de temps
M. Jean-Guy Chretien
L'ECOLE SECONDAIRE ALGONQUIN DE NORTH BAY
M. Pat O'Brien
LA GENDARMERIE ROYALE DU CANADA
L'hon. Pierre S. Pettigrew
L'hon. Pierre S. Pettigrew
L'hon. Pierre S. Pettigrew
L'hon. Pierre S. Pettigrew
L'hon. Arthur C. Eggleton
L'hon. Arthur C. Eggleton
L'hon. Pierre S. Pettigrew
L'hon. Arthur C. Eggleton
L'hon. Arthur C. Eggleton
L'hon. Arthur C. Eggleton
L'hon. Pierre S. Pettigrew
Mme Nancy Karetak-Lindell
L'hon. Ethel Blondin-Andrew
L'hon. Arthur C. Eggleton
L'hon. Arthur C. Eggleton
LES SCIENCES, LA RECHERCHE ET LE DEVELOPPEMENT
L'hon. Ronald J. Duhamel
L'hon. Pierre S. Pettigrew
L'hon. Arthur C. Eggleton
M. Svend J. Robinson
Mme Caroline St-Hilaire
L'hon. Arthur C. Eggleton
Certains propos tenus au cours de la periode des questions
REPONSE DU GOUVERNEMENT A DES PETITIONS
LES COMITES DE LA CHAMBRE
Environnement et developpement durable
Presentation et premiere lecture
Presentation et premiere lecture
M. Ovid L. Jackson
Mme Rose-Marie Ur
L'Accord multilateral sur l'investissement
L'hon. Arthur C. Eggleton
LOI D'EXECUTION DU BUDGET DE 1998
LES TRAVAUX DE LA CHAMBRE
LOI D'EXECUTION DU BUDGET DE 1998
Mme Jocelyne Girard-Bujold
M. Jean-Paul Marchand
M. Pierre de Savoye
Motions nos 68 et 69
Motions nos 70 et 71
Motions nos 72 et 73
Motions nos 74 et 75
Motions nos 76 et 78 a 81
Motions nos 82, 83 et 84
Motions nos 86, 87 et 88
Motions nos 91 et 92
Motions 94 a 96
Motions nos 97 a 102
Motions nos 104 a 106
Rejet de la motion no 1
Rejet de la motion no 2
Rejet de la motion no 7
Rejet de la motion no 11
Rejet de la motion no 55.
Rejet de la motion no 16
Rejet de la motion no 67
Rejet de la motion no 68
Rejet de la motion no 69
Rejet de la motion no 72
Rejet de la motion no 82
Rejet de la motion no 87
Rejet de la motion no 89
Rejet de la motion no 96
Projet de loi C-19. Troisieme lecture
LE PROGRAMME NATIONAL BON DEPART
Adoption de la motion modifiee
M. Robert D. Nault
M. Robert D. Nault
HANSARD REVISE * NUMERO 108
Le lundi 25 mai 1998
La seance est ouverte a 11 heures.
PROGRAMME NATIONAL BON DEPART
La Chambre reprend l'etude, interrompue le 20 avril, de la motion.
Le president suppleant (M. McClelland):
La Chambre a entendu les termes de la motion.
Plait-il a la Chambre d'adopter la motion?
M. Gurmant Grewal (Surrey-Centre, Ref.):
Ce sont les jeunes, les personnes ages, les familles et les femmes.
Nous devrions offrir un bon depart dans la vie a nos enfants.
Le gouvernement a deja applique des programmes Bon depart dans les communautes autochtones.
Tous les Canadiens devraient etre traites sur un pied d'egalite.
Les enfants autochtones beneficient des programmes Bon depart.
Elle ne va aucunement a l'encontre de ce que disait la secretaire d'Etat.
Il est bien connu que les bebes sains deviennent des enfants sains.
Il a ecoute ce que les temoins avaient a dire.
Il a fait de l'excellent travail.
Les deputes bloquistes devraient eux aussi appuyer cette motion.
Nous devons faire plus que traiter les symptomes du probleme.
Nous devons nous attaquer a l'origine du probleme.
Les programmes du genre de ceux que propose la motion s'attaquent a la cause.
J'aimerais donner un exemple.
Un de mes electeurs, Sandor Nyerges, veteran des deux grandes guerres, etait sourd et muet.
Age de 80 ans, il vivait seul.
Il est mort a l'hopital des suites de cette attaque.
Les habitants de Surrey-Centre et moi-meme sommes furieux.
Les crimes de ce genre ne sont que trop communs a Surrey et ailleurs.
J'aimerais dire en guise de conclusion que les Canadiens souffrent.
Nous voulons des rues plus sures, des localites plus sures.
C'est pour cela que nous devrions tous appuyer la motion no 261.
M. Scott Brison (Kings-Hants, PC):
Le depute continue de manifester son engagement envers une politique sociale progressive.
Toutes les regions n'ont pas la meme chance.
Seulement huit de ces 23 eleves ont obtenu leur diplome d'etudes secondaires.
Plus recemment, dans cette localite, cette ecole a fait d'importants progres.
Le gouvernement a opte pour une approche differente, politiquement plus attrayante.
Il a decide de creer le fonds du millenaire en l'honneur du premier ministre.
Les faits prouvent le contraire.
Cependant, encore une fois, les liberaux n'ont pas compris.
L'intervention precoce est la route a suivre.
Un programme Bon depart contribuerait largement a regler ces problemes.
Des etudes ont demontre que l'intervention precoce est socialement l'une des meilleures approches.
Je lui ai demande ce qu'il pensait d'une intervention precoce.
Le professeur Stager est un specialiste de l'enseignement postsecondaire.
Nous sommes a l'oree du XXIe siecle.
Mme Beth Phinney (Hamilton Mountain, Lib.):
Je voudrais feliciter le depute de son vigoureux appui au developpement des jeunes enfants.
A partir de quel age faut-il s'attacher au developpement de l'enfant?
Quelles sont les repercussions sociales de ces experiences negatives ou positives des enfants?
Les recherches revelent que les experiences negatives tendent a produire des adultes impulsifs et agressifs.
M. Yvan Loubier (Saint-Hyacinthe-Bagot, BQ):
Au bout du compte, cela donne le taux de recidive le plus bas au Canada.
Les resultats sont fort eloquents.
Depuis cinq ans, on en a entendu de toutes les couleurs.
On est un peu deroutes par rapport a cela.
Qu'est-ce que les deputes du Parti reformiste pensent?
Est-ce qu'ils sont en faveur de la reinsertion sociale?
Ils trouvaient que ce n'etait pas assez, le fait d'abaisser l'age.
Alors, c'est un peu deroutant.
Quand on regarde les statistiques d'assurance-emploi, c'est odieux et scandaleux.
C'est de la barbarie politique et administrative.
Il y avait, en 1989, un million de chomeurs.
M. Yvon Godin (Acadie-Bathurst, NPD):
La motion M-261 comprend trois volets.
Le programme propose par cette motion n'est pas nouveau.
Je ne peux pas penser a un meilleur investissement.
La motion M-261 devrait aller plus loin.
Pour ces raisons, j'incite tous mes collegues a appuyer la motion M-261.
Nos enfants meritent tous un bon depart.
Le president suppleant (M. McClelland):
Il reste encore trois minutes au debat.
Le president suppleant (M. McClelland):
La Chambre a entendu ce que propose le depute.
M. Gar Knutson (Elgin-Middlesex-London, Lib.):
Je rappelle aux deputes de la Chambre que la motion dit ceci:
Je pourrais en dire long sur les merites de cette proposition.
J'appuie tres fortement l'objet de la motion.
Si on investit tot, on obtient de meilleurs taux d'alphabetisme.
Si on investit tot, on diminue les taux de criminalite.
Si on investit tot, on ameliore l'etat de sante de la population.
Quiconque lit les journaux sait pertinemment que ce n'est pas facile.
Ainsi, on laisserait aux provinces le soin de concocter leurs propres programmes.
Ce genre de choses entraine des negociations tres difficiles.
Le libelle me cause certains problemes.
Tous les deputes reconnaissent le bien-fonde de la mesure.
Elle merite qu'on l'appuie.
Ce serait la un autre programme a ajouter au programme.
Ces negociations s'averent difficiles.
M. Rick Casson (Lethbridge, Ref.):
Monsieur le President, je remercie les autres deputes presents de nous permettre de continuer.
Il a consacre sa vie a soigner les blesses et les malades.
Comme mon collegue l'a ecrit recemment dans une note:
Le mot cle est prevention.
Les enfants ne choisissent pas de devenir des criminels.
Les statistiques sont la et des programmes d'intervention precoce peuvent donner de bons resultats.
Les epargnes a long terme pour les Canadiens sont enormes.
Les couts montent, les primes d'assurance montent.
Tous les enfants du Canada meritent d'avoir la chance de se developper normalement.
J'exhorte tous les deputes ici presents a appuyer la motion de mon collegue.
Monsieur le President, j'invoque le Reglement.
Le president suppleant (M. McClelland):
Le president suppleant (M. McClelland):
et les dirigeants des collectivites autochtones
Le president suppleant (M. McClelland):
Dans l'affirmative, nous le soumettrons a la Chambre.
M. Keith Martin (Esquimalt-Juan de Fuca, Ref.):
Elle raffermira la collaboration entre les provinces et le gouvernement federal.
Elle renforcera la participation des parents.
Elle donnera aux enfants un avenir plus prometteur.
Le president suppleant (M. McClelland):
La presidence a recu avis que l'amendement est recevable.
Comme il est 12 h 3, la periode prevue pour les initiatives parlementaires est expiree.
LOI D'EXECUTION DU BUDGET DE 1998
PROJET DE LOI C-36-MOTION D'ATTRIBUTION DE TEMPS
L'hon. Don Boudria (leader du gouvernement a la Chambre des communes, Lib.) propose:
Le president suppleant (M. McClelland):
Plait-il a la Chambre d'adopter la motion?
Le president suppleant (M. McClelland):
Que tous ceux qui sont en faveur de la motion veuillent bien dire oui.
Le president suppleant (M. McClelland):
Que tous ceux qui sont contre veuillent bien dire non.
Le president suppleant (M. McClelland):
A mon avis, les non l'emportent.
Et plus de cinq deputes s'etant leves:
Le president suppleant (M. McClelland):
(La motion, mise aux voix, est adoptee.)
Je declare la motion adoptee.
M. Gilles Duceppe (Laurier-Sainte-Marie, BQ):
Le depute a-t-il la permission de presenter la motion?
M. Jean-Guy Chretien (Frontenac-Megantic, BQ):
C'est scandaleux, et en plus cela cree un dedoublement ehonte.
M. Gary Lunn (Saanich-Gulf Islands, Ref.):
Le gouvernement devrait avoir honte.
Encore une fois, il a impose l'attribution de temps.
Le projet de loi fait l'objet de 107 propositions d'amendement.
C'est absolument degoutant et navrant.
Je veux parler du groupe de motions no 1.
Nous n'avons aucune idee de l'orientation que prendra ce fonds.
Le premier ministre semble enterrer tout cela dans un trou noir.
Nous ne savons pas ou ira cet argent.
Imaginons qu'il soit remis a certains etudiants.
Le gouvernement a la memoire tres courte.
A quoi servent ces fonds?
Il ne veut pas le mettre en place trop tot au cours de son mandat.
Je voudrais parler du budget.
C'est exactement ce que fait le gouvernement.
Ils ne portent pas attention.
Voyons ce qui se passe.
Ils se sont leves comme des phoques dresses et ont obei aux ordres.
Comment peuvent-ils faire?
Pourquoi se donnent-ils la peine de venir a Ottawa?
On leur commande d'y venir.
Je siege ici depuis un an.
Ils ont des difficultes partout en Colombie-Britannique, d'ou je viens.
Nous n'avons pas encore decide qui sera admissible.
Les etudiants canadiens attendent de l'aide.
Les faits sont la.
Que les liberaux consultent les chiffres.
Tout expert financier le leur confirmera.
Ils croient que c'est une plaisanterie.
Le jour du jugement viendra lors des prochaines elections.
Nous avons eu une seule journee de debat.
Cependant, ils ont vite change d'idee apres leur arrivee au pouvoir.
Nous les avons vus agir a bien des reprises.
Ils ont recu leurs ordres de leur whip.
Je crois qu'ils appellent cela un vote triple whip.
C'est ce a quoi nous avons droit une fois de plus.
Le gouvernement aurait pu prendre une mesure positive pour les etudiants du Canada.
C'est exactement ce qu'il a fait.
Le projet de loi n'offre rien.
Il ne sert qu'a camoufler 2,5 milliards de dollars.
Meme la, seuls 6 p. 100 des etudiants du Canada pourront en profiter.
Mme Maud Debien (Laval-Est, BQ):
Parlons d'abord du retrait inconditionnel du gouvernement federal du champ de l'education.
Tout d'abord, l'education constitue une competence constitutionnelle exclusive aux provinces.
On ne le dira jamais assez.
Toutefois, le gouvernement federal dispose d'une capacite de taxation superieure aux provinces.
Il utilise alors, comme on le sait, et tres souvent, son pouvoir de depenser.
L'actuel premier ministre du Canada realise des assauts sans precedent contre les provinces.
Cet argent, ce n'est pas celui du gouvernement federal.
Mais, comme on le voit, plus ca change, plus c'est pareil.
Le Canada est dysfonctionnel.
M. Peter Stoffer (Sackville-Eastern Shore, NPD):
Actuellement, quelque 130 000 etudiants sont en defaut de paiement sur leurs prets.
On estime a 37 000 le nombre d'etudiants diplomes ayant declare faillite.
Il suffit de manquer un paiement pour etre considere comme etant en defaut de paiement.
J'aimerais poser un certain nombre de questions au gouvernement.
Ce fonds n'aidera pas davantage les etudiants diplomes qui se retrouvent au chomage.
La politique actuelle imposait un delai de deux ans.
Il faudrait poser ces questions au ministre des Finances egalement.
Ils ont augmente de facon tres marquee au cours des dix dernieres annees.
A elle seule, cette raison justifie le rejet du fonds.
Le NPD croit que le gouvernement federal doit jouer un role de chef de file.
La Colombie-Britannique est maintenant dans sa troisieme annee de gel des frais de scolarite.
Le NPD demande au gouvernement federal de faire preuve du leadership necessaire.
Ce que nous voulons, c'est un regime national de bourses.
M. Paul Bonwick (Simcoe-Grey, Lib.):
Il n'y a pas a discuter.
C'est pourquoi, faciliter l'acces a l'enseignement postsecondaire doit etre une priorite nationale.
C'est beaucoup d'argent.
Il est consterne, mais pas completement surpris.
La position du gouvernement de M. Bouchard n'a pas change.
M. Bouchard veut exercer son droit de retrait avec pleine compensation.
Son gouvernement n'a fait preuve d'aucune souplesse.
Ce sont la des paroles raisonnables prononcees par un homme raisonnable.
J'entends les deputes d'en face dire Pas avant l'an 2000.
Le Fonds des bourses d'etudes du millenaire ne profitera...
Le president suppleant (M. McClelland):
M. Jay Hill (Prince George-Peace River, Ref.):
Voila que les liberaux, pour la 41e fois depuis 1994, coupent court au debat.
Nous avons assiste a cette situation tres souvent par le passe.
Ce groupe d'amendements porte precisement sur le fonds du millenaire.
C'est exactement ce que vous avez entre les deux oreilles.
Il s'est leve a la Chambre et en a fait la lecture.
Le fait est que le fonds du millenaire sera une disgrace.
Ce sera un echec.
C'est tres simple.
Nous l'avons signale au cours du debat sur le budget.
Il indemnisera certaines personnes seulement.
N'est-ce pas extraordinaire?
Les etudiants le font: 100 000 sur 400 000.
Encore une fois, le gouvernement liberal veut etablir des distinctions.
Le gouvernement determinera lesquels, parmi ces jeunes etudiants, obtiendront des bourses.
Ceux qui font partie des Jeunes liberaux du Canada obtiendront peut-etre une bourse.
Ce n'est pas tout a fait exact.
L'argent sera depense plus tard.
Les faits sont tres clairs.
Il n'a pas ete question, a ce moment-la, d'un fonds du millenaire.
Ils etudient ici et finissent par travailler pour des societes comme Microsoft.
Mme Francine Lalonde (Mercier, BQ):
C'est honteux que ce gouvernement ait impose le baillon.
Parce qu'il sait que ce projet de loi n'a aucun bon sens.
Au Quebec, c'est clair, il y a un consensus pour dire non.
Cette ponction est extremement dure et severe.
C'est un mauvais projet.
Cela n'a aucun sens.
C'est odieux, honteux et je dirais plus, c'est du gaspillage.
Pourquoi puis-je affirmer avec tant de force que c'est du gaspillage?
Pour une raison que je vais essayer d'expliquer clairement.
Cela veut dire qu'on va devoir batir une tout autre structure.
Il va falloir engager une lourde bureaucratie.
C'est ce qui est le pur scandale.
Cette Fondation canadienne des bourses du millenaire est inacceptable sous tout aspect.
Quelle image de federalisme!
Quels seront les recours des etudiants ou des universites?
M. Maurice Dumas (Argenteuil-Papineau, BQ):
C'est un liberal qui disait cela.
L'attitude imperialiste du gouvernement federal echappe a toute logique.
L'ECOLE SECONDAIRE ALGONQUIN DE NORTH BAY
M. Mauril Belanger (Ottawa-Vanier, Lib.):
M. Jim Gouk (West Kootenay-Okanagan, Ref.):
Oliver a recemment defraye la chronique sous le titre de capitale canadienne de la haine.
En fait, rien ne pourrait etre plus loin de la verite.
Du 19 au 21 juin, Oliver sera l'hote du festival du soleil.
Cette annee, le clou en sera une celebration multiculturelle.
J'invite tous les Canadiens a visiter Oliver cet ete, particulierement pendant le festival.
M. John Finlay (Oxford, Lib.):
Mme Sophia Leung (Vancouver Kingsway, Lib.):
Les chefs du G8 ont recemment souscrit a un tel plan.
M. Wayne Easter (Malpeque, Lib.):
Trois postes a plein temps ont ete crees grace au programme Didacticiens.
Les colleges et les universites, et le Canada tout entier, en profitent.
Le succes du premier concours a incite le gouvernement a en tenir un deuxieme.
Les colleges et universites interesses ont jusqu'au 2 juin 1998 pour presenter leurs propositions.
Je les incite a profiter du programme Didacticiens et a edifier l'avenir.
M. Jason Kenney (Calgary-Sud-Est, Ref.):
M. John Cannis (Scarborough-Centre, Lib.):
Sa Saintete est le 270 e successeur de l'apotre Andre.
Mme Marlene Jennings (Notre-Dame-de-Grace-Lachine, Lib.):
Monsieur le President, nous soulignons aujourd'hui la Journee des enfants disparus.
Un element cle du mandat du gouvernement en matiere de securite publique...
Le depute d'Esquimalt-Juan de Fuca.
M. Keith Martin (Esquimalt-Juan de Fuca, Ref.):
Jusqu'a maintenant, cinq provinces et territoires ont de tels programmes.
M. Louis Plamondon (Richelieu, BQ):
M. Pat O'Brien (London-Fanshawe, Lib.):
Mme Michelle Dockrill (Bras d'Or, NPD):
Etan Patz n'est jamais reapparu, mais il n'a pas ete oublie.
LA GENDARMERIE ROYALE DU CANADA
M. Nick Discepola (Vaudreuil-Soulanges, Lib.):
Bravo aux membres de la GRC!
Mme Suzanne Tremblay (Rimouski-Mitis, BQ):
Adopte en 1948 par le gouvernement, le drapeau avait ete accueilli favorablement par la population.
M. Mark Muise (West Nova, PC):
M. Robert Bertrand (Pontiac-Gatineau-Labelle, Lib.):
M. Grant Hill (Macleod, Ref.):
L'hon. Herb Gray (vice-premier ministre, Lib.):
Monsieur le President, le depute a souleve une question interessante.
Nous devrions le laisser faire son travail.
Nous invitons tous les Canadiens a collaborer avec ce groupe de travail.
M. Grant Hill (Macleod, Ref.):
Le sang etait contamine.
L'hon. Allan Rock (ministre de la Sante, Lib.):
M. Grant Hill (Macleod, Ref.):
Monsieur le President, c'est toujours le meme refrain.
Toutefois, il s'agit ici d'une nouvelle question.
Le premier ministre a insulte toutes les victimes.
Il ne s'agit pas ici d'heroine.
Il ne s'agit pas de crack.
Le gouvernement leur presentera-t-il des excuses des maintenant?
L'hon. Herb Gray (vice-premier ministre, Lib.):
Il parlait simplement de certains facteurs qui meritaient d'etre pris en consideration.
Mme Deborah Grey (Edmonton-Nord, Ref.):
J'aimerais poser la question suivante au gouvernement.
L'hon. Allan Rock (ministre de la Sante, Lib.):
Monsieur le President, c'est notre gouvernement qui a mis l'entente en place.
C'est exactement ce que nous avons fait.
Mme Deborah Grey (Edmonton-Nord, Ref.):
L'hon. Allan Rock (ministre de la Sante, Lib.):
M. Gilles Duceppe (Laurier-Sainte-Marie, BQ):
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
L'hon. Pierre S. Pettigrew:
M. Gilles Duceppe (Laurier-Sainte-Marie, BQ):
Monsieur le President, parlons donc d'equilibre et d'amelioration.
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
Mme Christiane Gagnon (Quebec, BQ):
Monsieur le President, ma question s'adresse au ministre du Developpement des ressources humaines.
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
L'hon. Pierre S. Pettigrew:
L'honorable deputee de Quebec a la parole.
Monsieur le President, le ministre devrait rompre ses habitudes et regarder la realite...
Je pensais que l'honorable ministre avait termine sa reponse.
L'honorable ministre du Developpement des ressources humaines a la parole.
L'hon. Pierre S. Pettigrew:
Mme Christiane Gagnon (Quebec, BQ):
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
Notre reforme est une recherche d'equilibre.
Mme Alexa McDonough (Halifax, NPD):
Le ministre de la Sante a le devoir d'assurer la securite des produits sanguins.
Peut-il declarer franchement aux Canadiens que ce produit sanguin est sans danger?
L'hon. Allan Rock (ministre de la Sante, Lib.):
Ce produit respecte les normes de securite aussi bien ici qu'aux Etats-Unis.
Mme Alexa McDonough (Halifax, NPD):
L'hon. Allan Rock (ministre de la Sante, Lib.):
Nous avons mis en oeuvre de nouvelles exigences rigoureuses.
M. Peter MacKay (Pictou-Antigonish-Guysborough, PC):
L'hon. Anne McLellan (ministre de la Justice et procureur general du Canada, Lib.):
C'est le gouvernement du Canada qui fait l'objet de poursuites.
M. Peter MacKay (Pictou-Antigonish-Guysborough, PC):
Le ministere a l'obligation de representer le client avec diligence.
L'hon. Anne McLellan (ministre de la Justice et procureur general du Canada, Lib.):
Le client n'est pas la commission non plus.
Comme le depute est juriste, il devrait savoir que la commission n'existe plus.
Le client, dans cette cause, est le gouvernement du Canada.
Mme Diane Ablonczy (Calgary-Nose Hill, Ref.):
Monsieur le President, un autre terrible scandale vient secouer le ministere de la Defense.
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Le gouvernement prend les mesures qui s'imposent.
Tres bientot, j'annoncerai...
La deputee de Calgary-Nose Hill.
Mme Diane Ablonczy (Calgary-Nose Hill, Ref.):
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Absolument pas, monsieur le President.
Nous voulons que justice soit faite dans tous ces cas.
Nous voulons integrer pleinement les hommes et les femmes dans les Forces armees canadiennes.
Nous avons adopte de nouvelles methodes de formation.
M. Paul Crete (Kamouraska-Riviere-du-Loup-Temiscouata-Les Basques, BQ):
L'hon. Paul Martin (ministre des Finances, Lib.):
Il fallait le combler, et c'est ce que nous avons fait.
C'est cela qui va nous garantir qu'il ne faut pas hausser les cotisations.
M. Paul Crete (Kamouraska-Riviere-du-Loup-Temiscouata-Les Basques, BQ):
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
M. Peter Goldring (Edmonton-Est, Ref.):
Monsieur le President, ma question s'adresse au ministre de la Defense nationale.
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Au contraire, le gouvernement estime qu'il faut se debarrasser des coupables.
M. Peter Goldring (Edmonton-Est, Ref.):
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Monsieur le President, ce n'est tout simplement pas vrai.
Encore une fois, leurs informations sont erronees.
Nombre des coupables ont ete traduits en justice.
Nous avons l'intention de faire toute la lumiere sur cette question.
M. Michel Gauthier (Roberval, BQ):
M. Michel Gauthier (Roberval, BQ):
J'ai mon voyage!
La parole est a l'honorable depute de Roberval.
L'hon. Herb Gray (vice-premier ministre, Lib.):
C'est totalement deplorable.
M. Jim Hart (Okanagan-Coquihalla, Ref.):
La ministre de la Justice.
L'hon. Anne McLellan (ministre de la Justice et procureur general du Canada, Lib.):
Nous l'avons fait notamment dans le cas cite cet apres-midi.
M. Jim Hart (Okanagan-Coquihalla, Ref.):
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
M. Stephan Tremblay (Lac-Saint-Jean, BQ):
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
Il a meme annule l'echeancier des deux journees de negociation qui etaient prevues.
Il n'y a pas de place a la negociation du cote de Quebec.
Mme Nancy Karetak-Lindell (Nunavut, Lib.):
L'hon. Ethel Blondin-Andrew (secretaire d'Etat (Enfance et Jeunesse), Lib.):
Le depute de Fraser Valley a la parole.
M. Chuck Strahl (Fraser Valley, Ref.):
Les Canadiens ont maintenant quelques questions a poser a ce sujet.
Lanceront-ils des souvenirs de la tribune, immediatement apres la periode des questions?
L'hon. Herb Gray (vice-premier ministre, Lib.):
M. Chuck Strahl (Fraser Valley, Ref.):
Je crois savoir que des deputes liberaux sont blesses dans cette affaire.
Un diplomate canadien a dit qu'il etait un peu contrarie.
Ils doivent payer la note pour cela.
L'hon. Herb Gray (vice-premier ministre, Lib.):
M. Chris Axworthy (Saskatoon-Rosetown-Biggar, NPD):
Monsieur le President, ma question s'adresse au ministre de la Defense nationale.
S'il n'y a rien de tel, pourquoi?
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Monsieur le President, oui, il existe une politique de tolerance zero.
Nous ne tolerons pas ces incidents d'agressions sexuelles.
Nous avons d'ailleurs pris un certain nombre de mesures pour donner un meilleur entrainement.
Nous avons une nouvelle norme en matiere de prevention du harcelement et du racisme.
Nous avons des conseillers specialises.
Nous prenons toutes les mesures necessaires pour faire appliquer notre politique de tolerance zero.
M. Chris Axworthy (Saskatoon-Rosetown-Biggar, NPD):
Comme je l'ai dit, il a qualifie ce comportement de deplorable.
Ne sait-il pas que ces actes sont plus que deplorables, ils sont repugnants?
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Monsieur le President, j'ai aussi utilise des mots comme repugnant.
J'ai utilise des mots comme inacceptables en plus de tous les autres mots.
M. Greg Thompson (Charlotte, PC):
Je demande des clarifications.
Est-ce que cela traduit bien la position du ministre?
Sinon, quelle est sa position?
L'hon. Allan Rock (ministre de la Sante, Lib.):
Monsieur le President, ces dires sont totalement faux.
Sante Canada poursuit ses activites comme d'habitude.
M. Greg Thompson (Charlotte, PC):
Le ministre peut-il dire ou menent les negociations?
Le ministre a-t-il accepte le fait que toutes les victimes devraient etre indemnisees?
L'hon. Allan Rock (ministre de la Sante, Lib.):
LES SCIENCES, LA RECHERCHE ET LE DEVELOPPEMENT
M. Reg Alcock (Winnipeg-Sud, Lib.):
Il sera a l'avant-garde de la recherche dans ce domaine.
M. Rob Anders (Calgary-Ouest, Ref.):
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
Monsieur le President, je vais examiner le cas particulier dont parle mon vis-a-vis.
On examine chaque projet de facon tres serieuse.
Mme Pierrette Venne (Saint-Bruno-Saint-Hubert, BQ):
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
On ne va pas tolerer ce type de comportement au sein des Forces canadiennes.
Nous prenons des mesures et nous allons continuer de le faire.
M. Svend J. Robinson (Burnaby-Douglas, NPD):
Monsieur le President, ma question s'adresse au ministre des Peches et des Oceans.
L'hon. David Anderson (ministre des Peches et des Oceans, Lib.):
Je me suis entretenu vendredi avec le gouverneur de l'Alaska.
M. Andre Bachand (Richmond-Arthabaska, PC):
L'hon. Allan Rock (ministre de la Sante, Lib.):
Le groupe de travail a deja commence ses travaux.
Nous nous attendons a ce que les resultats soient disponibles sous peu.
Mme Karen Redman (Kitchener-Centre, Lib.):
Monsieur le President, ma question s'adresse au ministre des Finances.
Ce week-end, le Canada recevait les ministres des Finances des pays de l'APEC.
L'hon. Paul Martin (ministre des Finances, Lib.):
La vaste majorite des ministres des Finances se sont declares en faveur de notre proposition.
La question de la deputee traduit l'appui de la Chambre, je suppose.
M. Gary Lunn (Saanich-Gulf Islands, Ref.):
L'hon. David Anderson (ministre des Peches et des Oceans, Lib.):
Nous allons reprendre les negociations avec les Americains.
Mme Caroline St-Hilaire (Longueuil, BQ):
Monsieur le President, ma question s'adresse au ministre de la Defense nationale.
L'hon. Arthur C. Eggleton (ministre de la Defense nationale, Lib.):
Cet ordre s'adresse a toute la chaine de commandement.
En outre, des conseillers en matiere de lutte contre le harcelement sont en place.
J'ai deja mentionne le Service national des enquetes.
CERTAINS PROPOS TENUS AU COURS DE LA PERIODE DES QUESTIONS
L'hon. Herb Gray (vice-premier ministre, Lib.):
En fait, c'est la cinquieme.
Le Canada est la septieme.
Je vous remercie de me permettre d'apporter cette correction.
Mme Marlene Jennings (Notre-Dame-de-Grace-Lachine, Lib.):
Je demande qu'il retire ces paroles antiparlementaires et injurieuses.
REPONSE DU GOUVERNEMENT A DES PETITIONS
LES COMITES DE LA CHAMBRE
ENVIRONNEMENT ET DEVELOPPEMENT DURABLE
L'hon. Charles Caccia (Davenport, Lib.):
M. Ovid L. Jackson (Bruce-Grey, Lib.):
M. Keith Martin (Esquimalt-Juan de Fuca, Ref.):
>Monsieur le President, j'ai l'honneur aujourd'hui de presenter deux petitions.
La premiere vient de Bud et June Boomer, deux habitants de ma circonscription.
M. Keith Martin (Esquimalt-Juan de Fuca, Ref.):
M. Janko Peric (Cambridge, Lib.):
M. Peter MacKay (Pictou-Antigonish-Guysborough, PC):
Les petitionnaires s'inquietent de l'affectation de cette ressource.
Mme Karen Redman (Kitchener-Centre, Lib.):
M. Paul Szabo (Mississauga-Sud, Lib.):
M. Paul Szabo (Mississauga-Sud, Lib.):
M. Paul Steckle (Huron-Bruce, Lib.):
Mme Rose-Marie Ur (Lambton-Kent-Middlesex, Lib.):
M. Nelson Riis (Kamloops, NPD):
Cette petition a trait a l'injustice du systeme fiscal au Canada.
M. Nelson Riis (Kamloops, NPD):
L'ACCORD MULTILATERAL SUR L'INVESTISSEMENT
M. Nelson Riis (Kamloops, NPD):
Monsieur le President, la troisieme porte sur l'Accord multilateral sur l'investissement.
M. Peter Adams (Peterborough, Lib.):
Le cout definitif sera divulgue lors de l'annonce de la decision du gouvernement federal.
Services gouvernementaux Canada et
E.H. Industries Ltd
Entente de reglement avec E.H. Industries dans le dossier des helicopteres EH-101
Voici l'information detaillee de ces couts:
Couts totaux du projet pour les EH-101
Ces sommes ont ete deboursees avant l'annulation du contrat en novembre 1993:
E.H. Industries-136,6 millions de dollars
Loral-98,4 millions de dollars
Les couts pour l'annulation du contrat se repartissent comme suit:
E.H. Industries-21,2 millions de dollars (annonce aujourd'hui)
Loral-67,5 millions de dollars (annonce le 31 mars 1995)
Total-478,3 millions de dollars
TPSGC regrette tous les inconvenients et la confusion occasionnes par cette omission.
Nous retransmettrons les versions anglaise et francaise de ce communique.
L'hon. David Anderson (ministre des Peches et des Oceans, Lib.):
L'hon. Jim Peterson (secretaire d'Etat (Institutions financieres internationales), Lib.):
Conformement au Bureau du surintendant des institutions financieres:
M. Peter MacKay (Pictou-Antigonish-Guysborough, PC):
Monsieur le President, j'invoque le Reglement, encore une fois, avec reticence.
La question no 21 est en train d'etre oubliee au Feuilleton .
Les jours et les mois ont passe.
Le temps file, mais la question reste.
Quand pouvons-nous attendre une reponse?
Les autres questions restent-elles au Feuilleton ?
LOI D'EXECUTION DU BUDGET DE 1998
M. Maurice Dumas (Argenteuil-Papineau, BQ):
va encore etre penalise [...] par des discussions interminables, des mecanismes de toutes sortes.
Le Quebec est un chef de file en matiere d'education au Canada.
Toute autre forme de financement en education n'est que pure ingerence.
Le gouvernement devrait plutot bonifier le systeme quebecois deja existant.
Ce sont les paroles du premier ministre.
LES TRAVAUX DE LA CHAMBRE
Mme Marlene Catterall (Ottawa-Ouest-Nepean, Lib.):
Monsieur le President, j'invoque le Reglement.
toutes questions necessaires pour disposer de l'etape du rapport du C-36;
la motion portant troisieme lecture du C-19;
la motion portant deuxieme lecture du C-247;
et toutes questions necessaires pour disposer de la motion M-261.
Nous sommes en train de debattre du projet de loi C-36.
Est-ce qu'on pourrait repeter, s'il vous plait?
Est-ce que la deputee a bien saisi?
Plait-il a la Chambre d'adopter cette motion?
LOI D'EXECUTION DU BUDGET DE 1998
Mme Sarmite Bulte (Parkdale-High Park, Lib.):
Aucun pays ne peut compter simplement sur ses ressources primaires pour assurer sa croissance economique.
En tout, plus d'un million de bourses seront accordees.
Ce sont la des questions qui relevent des gouvernements provinciaux et des institutions elles-memes.
C'est tres clair depuis le debut.
Une fondation independante s'occupera de la gestion du fonds.
Elle ne sera pas geree par le gouvernement, mais par des particuliers.
Je crois que ce projet de loi leur permettra de le faire.
M. Philip Mayfield (Cariboo-Chilcotin, Ref.):
J'ai eu le plaisir de parler de ce projet de loi en deuxieme lecture.
Malheureusement, il a encore beaucoup a faire.
Le groupe no 1 a essentiellement trait au Fonds des bourses d'etudes du millenaire.
Je suis heureux d'appuyer bon nombre des amendements de ce groupe.
J'aimerais consacrer une partie de mon temps de parole aux amendements proposes.
Je puis comprendre la position du Quebec.
Toutes les provinces ont connu des problemes semblables.
La reduction de ces paiements federaux a entraine une baisse radicale des services.
Les Canadiens demandent que le gouvernement leur en donne pour leur argent.
Les motions nos 55, 57 et 58, proposees par le gouvernement, sont bien differentes.
La motion no 55 permet au gouvernement de nommer le verificateur de la fondation.
Mme Jocelyne Girard-Bujold (Jonquiere, BQ):
Nous nous interessons aujourd'hui plus particulierement aux motions qui touchent les bourses du millenaire.
Il est, je dois le dire, plutot difficile de suivre les politiques de ce gouvernement.
Le gouvernement Chretien ne comprend vraiment rien aux aspirations des Quebecoises et des Quebecois.
J'espere qu'elle respectera le Reglement a cet egard.
La deputee de Jonquiere a la parole.
Mme Jocelyne Girard-Bujold:
Il s'est fait innovateur dans la gestion de son programme.
M. Steve Mahoney (Mississauga-Ouest, Lib.):
Le ministre des Finances a depose le budget le 24 fevrier.
Les deputes d'en face secouent la tete comme s'ils ne comprenaient pas.
Pourquoi, a leur avis, devons-nous recourir a l'attribution de temps?
Ce projet de loi s'inscrit dans les politiques et le programme du gouvernement.
Ils seraient tout simplement negatifs.
Je n'entends aucune remarque positive de leur part.
Qu'ont-ils contre les etudiants et l'enseignement superieur?
Les deputes neo-democrates de l'aile gauche supprimeraient tous les frais de scolarite.
Or, j'entends des deputes parler contre un acces accru a l'education.
Je connais la politique de l'opposition.
J'ai passe cinq annees dans l'opposition sous les neo-democrates en Ontario.
Je comprends que l'opposition a fondamentalement pour tache d'etre negative.
Ce n'est tout simplement pas possible.
Cela ne devrait pourtant pas nous etonner.
Voyons donc un peu notre regime electoral.
Les electeurs canadiens ont elu un gouvernement majoritaire.
Ils nous ont donc portes au pouvoir.
Nous avons ensuite presente un budget.
Il y aura un allegement fiscal touchant les interets sur tous les prets etudiants.
Appelez-les a cet egard!
Je crois que vous allez voir que, dans l'ensemble, ils sont en faveur.
L'exemption des interets est accordee a davantage de diplomes.
Supprimez seulement le cout.
Les contribuables s'occuperont de tout.
Il n'y a rien a craindre.
Dans le monde du socialisme, ils font carrement fausse route sur ce plan.
Imaginez-vous donc que le Parti reformiste est contre cela.
Les exemples sont nombreux dans le monde.
Pourquoi est-on pret a investir dans la Republique d'Irlande?
En fait, c'est un modele a suivre.
Ils ont bien raison.
Nous n'allons pas les laisser faire cela.
Ce que ce plan fait, c'est permettre la cooperation avec les gouvernements provinciaux.
Il permet la cooperation.
Cela susciterait beaucoup d'interet.
Parce qu'ils ont interet a le faire.
Ils veulent produire d'excellents diplomes.
C'est ce que nous faisons.
Vous le faites parce que vous tenez a vous en defaire, au NPD.
Ils veulent relever les prix, reduire les impots, aider les riches.
C'est tout ce qu'ils veulent.
Ils font completement fausse route.
C'est une bonne politique d'interet public.
M. Chuck Strahl (Fraser Valley, Ref.):
C'etait un spectacle impressionnant.
Et pourquoi y a-t-il maintenant attribution de temps?
Imaginez quelque chose d'aussi gros.
Presque aussi miraculeux que le caramel dans la Caramilk.
Le Parti reformiste a souleve la question a la Chambre.
Nous avons une manie de ce cote-ci de la Chambre.
Cela a ete la premiere erreur.
La deuxieme erreur, c'est l'ensemble du budget lui-meme.
Les contribuables souffrent depuis longtemps et sont tres patients.
Mais rien ne se passe.
Mais le gouvernement s'assure que tout le monde est taxe.
C'est presque un dogme.
Nous devons tous payer des impots, et ca dure depuis longtemps.
Les liberaux ont encore raffine la notion.
Imaginez ce qui se passerait si on allegeait le fardeau fiscal des gens.
Imaginez ce que ce serait si on les laissait souffler un peu.
Dimanche, j'ai rencontre une personne de ma paroisse.
Il gagne 17 000 $ par annee au total.
Je n'avais pas de reponse a lui donner.
Nous pourrions lui signaler ceci.
J'ai moi-meme quatre enfants qui sont en age d'aller a l'universite.
J'aimerais bien qu'ils soient instruits grace a l'argent d'autres contribuables.
Les autres 93 etudiants paieront au profit de 7.
Je ne pense pas que cela devrait se passer ainsi.
M. Jean-Paul Marchand (Quebec-Est, BQ):
Beaucoup de choses ont ete dites sur ce projet de loi.
En fait, c'est ce qu'on fait.
Personne ne s'oppose a l'amelioration de l'education au Canada.
On veut que ce montant de 2,5 milliards de dollars soit verse aux etudiants.
On le veut, bien sur, mais pas de cette facon.
Et les etudiants quebecois sont parmi les moins endettes au pays.
On manque de respect encore une fois en fonction de la Constitution.
C'est un cas evident de dedoublement et cela coute plus cher, evidemment.
Cela reduirait les couts d'administration.
Qui profite de ces bourses du millenaire?
M. John Williams (St. Albert, Ref.):
Je voudrais parler de certains aspects de ce budget.
Tout d'abord, alors que le ministre avait annonce un budget equilibre.
Je m'inscris en faux contre la facon dont le ministre a etabli sa comptabilite.
Nous ne l'avons meme pas encore mis de cote.
Cela ne serait pas non plus dans l'interet de personne.
Je dois feliciter le gouvernement d'avoir equilibre le budget.
Pousse par le Parti reformiste, il a fini par y parvenir.
Les recettes fiscales ont leur prix.
Nous venons de connaitre sept ans d'expansion economique.
Nous savons que les periodes d'expansion ont une fin.
Nous nous felicitons de la demission du president Suharto.
Mais il est a craindre que son successeur ne suive son exemple.
Il devrait poursuivre ses efforts pour enrayer le gaspillage.
Je publie regulierement un rapport sur le gaspillage.
Celui de la semaine derniere est consacre aux diverses subventions.
Je me demande en quoi ca peut etre utile aux Canadiens.
Les depenses les plus folles sont legion dans l'appareil gouvernemental.
Dans bien des secteurs, ce n'est que gaspillage, mauvaise gestion et incompetence.
Mme Monique Guay (Laurentides, BQ):
Je reviendrai sur ce sujet un peu plus loin dans mon allocution.
Soyons clairs des maintenant.
Moi, j'ajouterai les qualificatifs suivants: irresponsable, centralisateur et appauvrissant.
Il est prevu que ce surplus atteindra en l'an 2000 25 milliards de dollars.
Imaginez ce qu'on pourrait faire avec tout cet argent qui dort presentement.
Permettez-moi de qualifier ces pratiques budgetaires de frauduleuses et de veritable vol.
Bref, pour ce qui est de l'emploi, ce budget est bidon.
Pourtant, le taux de chomage demeure obstinement autour de 9 p. 100.
Voila les vraies priorites de ce gouvernement.
C'est un enfant sur cinq.
Voila les priorites du gouvernement d'en face.
C'est ce qu'on appelle purement et simplement de l'hypocrisie.
M. Jason Kenney (Calgary-Sud-Est, Ref.):
Ce n'est pas une nouvelle tendance.
J'aimerais maintenant aborder les dispositions essentielles du projet de loi.
C'est le concept du mandat democratique.
Ce n'est pas non plus l'opinion de deputes imbus d'esprit de parti.
Ce que le verificateur general a declare au gouvernement et au Parlement est parfaitement clair.
Que font les liberaux de ces 2,5 milliards de dollars?
Ils creent le fonds des bourses d'etudes du millenaire.
Au nom de la democratie, nous voterons contre ce projet de loi.
M. Rejean Lefebvre (Champlain, BQ):
Pourquoi presentons-nous ce groupe de motions?
Pour une bonne raison.
Encore une fois, c'est une demonstration de la mauvaise foi du gouvernement.
Le premier constat que je fais est la mauvaise foi du gouvernement.
Il y en a un deuxieme.
Le conseil d'administration n'aura pas les pouvoirs de deleguer aux provinces.
Cela veut dire 80 000 dirigeants de petites et moyennes entreprises.
Donc, c'est tres decevant.
Pourquoi n'en reporte-t-on pas l'etude?
Je trouve que c'est de mauvaise foi.
Ce ne sont pas precisement des gens qui sont identifies aux souverainistes du Quebec.
S'il y a une entente, on l'integrera a la loi.
Jamais nous ne le tolererons.
Mme Diane Ablonczy (Calgary-Nose Hill, Ref.):
Voici les problemes en question.
Premierement, il y a toute la conception du fonds.
Deuxiemement, la comptabilite utilisee pour financer ce programme pose probleme.
Je voudrais dire quelques mots au sujet de chacun de ces problemes.
Le deuxieme acte a commence en fevrier avec le budget.
Les liberaux aiment avoir un theme ou une cause.
Ils aiment pouvoir dire qu'ils se preoccupent de quelque chose.
L'avant-dernier budget concernait les enfants pauvres.
Le dernier budget avait l'education pour theme.
On allait aider les etudiants.
Le fait est que cette preoccupation est nee bien subitement.
Elle n'a surgi qu'apres les coupes claires dans l'education postsecondaire.
Ils en remettront donc une partie au gouvernement federal.
L'opposition entretient vraiment des craintes a cet egard.
La mesure ne respecte pas la competence provinciale en matiere d'education.
Le gouvernement federal a-t-il consulte le ministre de l'Education du Manitoba?
A-t-il fait cela?
Un tel geste etait au gout du jour.
Le gouvernement doit avoir effectue quelque sondage qui montre que l'education inquiete les Canadiens.
Reflechissons un peu a cela.
Que se passera-t-il s'il inscrit cet achat dans ses depenses de 1998?
Cet agriculteur ferait rire de lui par les fonctionnaires.
C'est une honte.
M. Ghislain Fournier (Manicouagan, BQ):
Cette partie attribue une dotation de 2,5 milliards de dollars.
Tous les acteurs du milieu quebecois de l'education sont contre ce projet.
Le Quebec, alors dirige par Maurice Duplessis, bloque le projet federal.
Les bourses du millenaire ne sont qu'un pretexte.
Il ne fait que se creer un outil supplementaire de visibilite.
M. Randy White (Langley-Abbotsford, Ref.):
Beaucoup de gens qui suivent nos deliberations a la television ne comprennent pas cela.
Je serais enclin a demander le quorum, mais je ne le ferai pas.
Reflechissons-y un instant.
C'est le fondement de la democratie qui est en jeu.
Il n'y a rien de prevu en fait de bourses d'entretien.
La veritable question est de savoir d'ou vient l'argent.
Il vient du budget de l'exercice 1997-1998.
Pourquoi je dis cela?
Le gouvernement a dit qu'il avait equilibre les comptes.
Il dit qu'il n'a pas d'argent, qu'il a equilibre le budget.
Des 175 ministeriels, trois seulement sont a la Chambre.
C'est vraiment degoutant.
Je crois que je m'adresserai a mes collegues.
Ou est la substance?
Ou est la substance, dit mon collegue.
Ou est la substance dans tout cela?
Il n'y a pas un cent.
Le gouvernement n'a pas depense un cent.
Il n'a pas prevu depenser un cent.
A present, le compte en est rendu a deux.
C'est vraiment degoutant.
Il n'a rien depense qui n'ait quelque importance.
M. Pierre de Savoye (Portneuf, BQ):
Monsieur le President, nous etudions le projet de loi C-36.
Ce projet de loi a un gros probleme qui s'appelle les bourses du millenaire.
Les bourses sont destinees aux etudiants et aux etudiantes.
Donc, encore une fois, a premiere vue, les bourses sont une bonne idee.
J'aimerais aussi parler du mot millenaire.
Pourquoi les bourses du millenaire?
C'est un systeme qui fonctionne bien au Quebec.
Le pays du Quebec est mieux nanti que le pays du Canada sur ce plan.
Revenons a nos bourses.
Ces bourses sont accessibles aux etudiantes et aux etudiants sur la base du besoin.
Les entreprises n'ont pas seulement besoin de gens qui surperforment dans leurs etudes.
Au Quebec, tout le monde est contre ce systeme de bourses du millenaire.
C'est 600 millions de dollars et un peu plus.
C'est beaucoup d'argent.
Six cents millions de dollars, cela reglerait bien des problemes au niveau de la sante.
On paye deux fois.
Six cents millions de dollars, c'est une fortune.
Mais il y a plus.
M. Darrel Stinson (Okanagan-Shuswap, Ref.):
Monsieur le President, je voudrais parler du projet de loi C-36.
Je dois remettre cela en question.
C'est un peu comme vous arracher le bras et vous redonner un doigt.
Le gouvernement est tres bon pour cela.
Ce n'est pas son argent.
C'est notre argent.
Il n'appartient pas au gouvernement.
Il s'agit simplement d'essayer de faire bien paraitre le premier ministre.
C'est ce dont le gouvernement est si fier.
Ils ont appris le bel art de la dictature.
Nous voyons ce qui se passe.
La realite est tout autre.
Le gouvernement etouffe notre economie.
Le nombre de faillites a atteint un niveau record.
Il y a des gens qui souffrent vraiment.
Que fait le gouvernement en reponse a cela?
Ils savent que c'est vrai.
Les Canadiens savent que c'est vrai.
Le verificateur general a exprime d'enormes reserves au sujet de ce fonds.
Que fait le gouvernement?
Il ne porte aucune attention au verificateur general.
Ils ont eu de nombreuses annees d'experience.
Voyons de nouveau ce qui se passe.
C'est ce que le gouvernement a fait, 26 p. 100 en cinq ans.
En sont-ils fiers?
Et ce n'est pas fini.
Ils soutirent de l'argent aux pauvres.
Ils soutirent de l'argent a tous ceux de qui ils peuvent en soutirer.
Et ils ont le culot de dire qu'ils le font pour aider les etudiants.
Nos etudiants veulent de l'emploi.
Ils veulent pouvoir travailler dans ce pays.
Ils veulent poursuivre leurs etudes.
Ils n'aiment pas du tout avoir autant de dettes lorsqu'ils terminent leurs etudes.
Ils ont bien raison de s'inquieter.
Nous le savons tous tres bien.
Il est grand temps que le gouvernement s'en rende compte.
Toute cette affaire est une vraie farce.
C'est une plaisanterie dont les contribuables font les frais.
Mme Marlene Jennings (Notre-Dame-de-Grace-Lachine, Lib.):
La Strategie prend des mesures globales et coordonnees sur sept plans.
Ces investissements sont essentiels a la competitivite de notre pays.
Ce sont la les deux cotes de la medaille.
M. Stephan Tremblay (Lac-Saint-Jean, BQ):
Je n'ai seulement que dix minutes, mais je pourrais en parler pendant une heure.
Quand je dis que les Quebecois ont des valeurs, c'etait cela, leurs valeurs.
Nous n'avons jamais cherche a imposer ces valeurs au reste du Canada.
Jusque-la, je n'ai aucun probleme.
Or, en creant la fondation des bourses du millenaire, on est en train de dedoubler.
Deja la, je pense qu'il y a certainement une perte d'efficacite.
Je dois dire que je n'ai pas de probleme avec cela.
Je n'en revenais pas.
Je pourrais parler de beaucoup d'autres elements.
J'aborderai d'autres elements dont on ne parle peut-etre pas assez.
La fondation des bourses du millenaire sera geree par un conseil d'administration.
A ce niveau, j'ai un grave probleme de conscience.
J'ai l'impression de signer un cheque en blanc.
Il y a de serieuses questions a se poser a ce niveau.
Autre element, la question d'egalite des chances.
Aujourd'hui d'ailleurs, j'ai etudie ce projet de loi en comite.
Mais quelles sont ces proportions?
On pourrait au moins debattre de cette question, mais on ne le peut meme pas.
Deuxiemement, on ne connait meme pas les membres de ce conseil d'administration.
De plus, on ne connait pas l'essence, le but de ce projet.
Recemment, j'ai rencontre des etudiants de l'Alberta.
Ils voient que le gouvernement federal va intervenir et ils en sont bien contents.
S'il veut proceder ainsi, cela ne me derange pas.
Apres cela, on se demande pourquoi nous voulons notre propre pays.
Il me semble que c'est d'une evidence flagrante.
Nous pensons que ce n'est peut-etre pas mauvais en soi.
Mais non, au lieu de cela, le gouvernement federal impose ses regles.
Il me semble que c'est simple.
Ensuite, on nous demandera: What does Quebec want?
Tout cela pour la visibilite du gouvernement federal.
Cela me decoit, car l'education, c'est l'avenir.
M. Monte Solberg (Medicine Hat, Ref.):
Monsieur le President, j'invoque le Reglement.
Je demande le consentement unanime sur la motion suivante.
La Chambre a entendu la motion.
Plait-il a la Chambre d'adopter la motion?
M. Nelson Riis (Kamloops, NPD) propose:
a) le montant total des taxes percues dans l'annee;
b) le montant total de taxes depense;
c) un etat detaille de la nature de ces depenses;
d) tout autre renseignement que le ministre determine par reglement.
M. Nelson Riis (Kamloops, NPD) propose:
maire, d'une amende maximale de 50 000 $ et
a) le montant total des taxes percues dans l'annee;
b) le montant total de taxes depense;
c) un etat detaille de la nature de ces depenses;
d) tout autre renseignement que le ministre determine par reglement.
a) le montant total des taxes percues dans l'annee;
b) le montant total de taxes depense;
c) un etat detaille de la nature de ces depenses;
d) tout autre renseignement que le ministre determine par reglement.
M. Nelson Riis (Kamloops, NPD) propose:
marque et une estampille indiquant qu'il
marque et une estampille indiquant qu'il
Mme Christiane Gagnon (Quebec, BQ) propose:
Que le projet de loi C-36 soit modifie par suppression de l'article 91.
Prestation fiscale complementaire pour enfants
Que le projet de loi C-36 soit modifie par suppression de l'article 100.
M. Nelson Riis (Kamloops, NPD) propose:
L'alinea 15l) de la meme loi est
Mme Christiane Gagnon (Quebec, BQ) propose:
Que le projet de loi C-36 soit modifie par suppression de l'article 103.
Que le projet de loi C-36, a l'article 104, soit modifie
a) par substitution, aux lignes 4 a 10, page 51, de ce qui suit:
(RA2 - RA1) x C1999
Pour l'application du paragraphe (8.2),
M. Scott Brison (Kings-Hants, PC) propose:
Mme Christiane Gagnon (Quebec, BQ) propose:
(RA2 - RA1) x C2000
Pour l'application du paragraphe (8.3),
M. Nelson Riis (Kamloops, NPD) propose:
M. Scott Brison (Kings-Hants, PC) propose:
M. Nelson Riis (Kamloops, NPD) propose:
Que le projet de loi C-36 soit modifie par suppression de l'article 125.
Que le projet de loi C-36 soit modifie par suppression de l'article 126.
Que le projet de loi C-36 soit modifie par suppression de l'article 127.
Que le projet de loi C-36 soit modifie par suppression de l'article 128.
Que le projet de loi C-36 soit modifie par suppression de l'article 129.
Que le projet de loi C-36 soit modifie par suppression de l'article 130.
M. Andre Bachand (Richmond-Arthabaska, PC) propose:
M. Nelson Riis (Kamloops, NPD) propose:
Que le projet de loi C-36 soit modifie par suppression de l'article 131.
Que le projet de loi C-36 soit modifie par suppression de l'article 132.
Que le projet de loi C-36 soit modifie par suppression de l'article 133.
(2) Les articles 127 a 132 entrent en
Apres l'appel du timbre:
Nous allons avoir une serie de votes ce soir.
Le premier vote porte sur la motion no 1.
(La motion no 1, mise aux voix, est rejetee.)
Je declare la motion no 1 rejetee.
Y a-t-il consentement unanime?
Apres le vote initial, ils seront inclus dans notre total.
En ce qui concerne ces votes, je voterai comme mes collegues reformistes.
Nous avons tous vote de la meme facon.
Le Parti reformiste votera oui dans le present cas.
Voir la liste sous le vote no 159.]
Je declare les motions nos 88 et 90 rejetees.
Le vote suivant porte sur la motion no 2.
Est-ce d'accord?
Monsieur le President, les deputes reformistes presents votent oui.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
(La motion no 2, mise aux voix, est rejetee.)
Je m'adresse au whip du Nouveau Parti democratique.
Voir la liste sous le vote no 160.]
Je declare les motions nos 3, 12, 13 et 19 rejetees.
Le vote suivant porte sur la motion no 7.
Monsieur le President, les deputes reformistes presents voteront en faveur de cette motion.
Monsieur le President, les deputes du Bloc quebecois votent contre cette motion.
Monsieur le President, les deputes du NPD present voteront en faveur de cette motion.
Monsieur le President, les deputes de notre parti votent contre cette motion.
Au nom des habitants de York-Sud-Weston, j'appuie cette motion.
(La motion no 7, mise aux voix, est rejetee.)
Je declare la motion no 7 rejetee.
La Chambre consent-elle a ce que nous procedions de cette facon?
Monsieur le President, les deputes reformistes presents voteront contre.
Monsieur le President, les deputes du Bloc s'opposent a cette motion.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
Je voterai en faveur de cette motion.
(La motion no 11, mise aux voix, est rejetee.)
Je declare la motion no 11 rejetee.
Y a-t-il consentement unanime a cet egard?
Monsieur le President, j'invoque le Reglement.
Je voudrais clarifier mes votes sur ces motions.
Je voterai en faveur des motions nos 55, 97 et 103.
Je voterai contre les motions 57 et 58.
(La motion no 57, mise aux voix, est rejetee.)
Je declare les motions nos 55, 57, 58, 97 et 103 rejetees.
Le vote suivant porte sur la motion no 16.
Monsieur le President, les deputes reformistes voteront en faveur de cette motion.
Monsieur le President, les deputes du Bloc quebecois s'opposent a cette motion.
Monsieur le President, les deputes neo-democrates voteront en faveur de la motion.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
(La motion no 16, mise aux voix, est rejetee.)
Je declare la motion no 16 rejetee.
La Chambre consent-elle a ce que nous procedions de cette facon?
Voir la liste sous le vote no 164.]
Le vote suivant porte sur la motion no 67.
Monsieur le President, c'est une bonne motion.
Les deputes reformistes voteront en faveur.
Monsieur le President, les deputes neo-democrates presents voteront contre la motion.
Monsieur le President, les deputes de notre parti votent contre cette motion.
(La motion no 67, mise aux voix, est rejetee.)
Je declare la motion no 67 rejetee.
Le vote suivant porte sur la motion no 68.
La Chambre consent-elle a ce que nous procedions de cette facon?
Voir la liste sous le vote no 165 .]
Je declare donc la motion no 71 rejetee.
Monsieur le President, j'invoque le Reglement.
La motion no 68 fera l'objet d'un vote distinct.
Le vote suivant porte donc sur la motion no 68.
Monsieur le President, j'invoque le Reglement.
La Chambre consent-elle a ce que nous procedions de cette facon?
Monsieur le President, les deputes reformistes presents votent contre a moins d'indication contraire.
Je voterai en faveur de la motion no 68.
Monsieur le President, les deputes du Bloc quebecois voteront en faveur de cette motion.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
(La motion no 68, mise aux voix, est rejetee.)
Je declare la motion no 68 rejetee.
Le vote porte maintenant sur la motion no 69.
Est-on d'accord pour proceder de cette facon?
Monsieur le President, les deputes reformistes qui sont presents votent en faveur de cette motion.
Monsieur le President, les deputes du Bloc quebecois sont d'accord avec cette motion.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
Monsieur le President, je vote en faveur de cette motion.
(La motion no 69, mise aux voix, est rejetee.)
Y a-t-il consentement unanime pour proceder de cette facon?
Voir la liste sous le vote no 167.]
Le vote suivant porte sur la motion no 72.
Y a-t-il consentement unanime pour proceder de cette facon?
Monsieur le President, les deputes reformistes qui sont presents rejettent cette motion.
Monsieur le President, les deputes du Bloc quebecois sont en desaccord avec cette motion.
Monsieur le President, les deputes neo-democrates votent en faveur de cette motion.
Monsieur le President, les deputes de notre parti votent contre cette motion.
Monsieur le President, je vote en faveur de cette motion.
(La motion no 72, mise aux voix, est rejetee.)
Y a-t-il consentement unanime pour proceder de cette maniere?
Voir la liste sous le vote no 168.]
Je declare les motions nos 80 et 81 rejetees.
Le vote porte maintenant sur la motion no 82.
Y a-t-il consentement unanime pour proceder de cette maniere?
Monsieur le President, les deputes reformistes presents votent contre cette motion.
Monsieur le President, les deputes du Bloc quebecois sont en faveur de cette motion.
Monsieur le President, les deputes neo-democrates presents votent contre.
Monsieur le President, les deputes de notre parti votent contre cette motion.
(La motion no 82, mise aux voix, est rejetee.)
Y a-t-il consentement pour proceder de cette maniere?
Voir la liste sous le vote no 169.]
Y a-t-il consentement pour proceder de cette maniere?
Monsieur le President, les deputes reformistes presents votent contre cette motion.
Monsieur le President, les deputes du Bloc quebecois votent en faveur de cette motion.
Monsieur le President, les deputes neo-democrates presents votent en faveur de cette motion.
Monsieur le President, les deputes de notre parti votent contre cette motion.
(La motion no 87, mise aux voix, est rejetee.)
Est-ce d'accord?
Monsieur le President, les deputes reformistes presents votent en faveur de la motion.
Monsieur le President, les deputes du Bloc quebecois s'opposent a cette motion.
Monsieur le President, les deputes neo-democrates presents votent contre la motion.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
(La motion no 89, mise aux voix, est rejetee.)
Est-on d'accord?
Voir la liste sous le vote no 171.]
Est-on d'accord?
Monsieur le President, les deputes reformistes presents votent contre la motion.
Monsieur le President, les deputes du Bloc quebecois sont en faveur de cette motion.
Monsieur le President, les deputes neo-democrates presents votent en faveur de la motion.
Monsieur le President, les deputes de notre parti votent en faveur de cette motion.
(La motion no 96, mise aux voix, est rejetee.)
Je declare la motion no 96 rejetee.
Est-on d'accord pour proceder de cette facon?
Nous sommes tres decus.
Nous allons devoir voter non dans les deux cas.
Monsieur le President, les deputes du Bloc quebecois sont contre cette motion.
Monsieur le President, les deputes neo-democrates presents se sentent obliges de voter non egalement.
Monsieur le President, les deputes de notre parti votent contre cette motion.
(La motion, mise aux voix, est adoptee.)
Je declare la motion adoptee.
Est-on d'accord pour proceder de cette facon?
Monsieur le President, les deputes reformistes presents votent contre ce projet de loi.
Monsieur le President, les deputes du Bloc quebecois s'opposent a cette motion.
Monsieur le President, les deputes neo-democrates votent en faveur de cette motion.
Monsieur le President, les deputes de notre parti votent contre cette motion.
(La motion, mise aux voix, est adoptee.)
Je declare la motion adoptee.
(Le projet de loi, lu pour la troisieme fois, est adopte.)
La Chambre reprend l'etude de la motion, interrompue le 12 mai.
Les votes negatifs seront enregistres dans le meme ordre.
Le vote porte sur la motion.
Avant que le greffier n'annonce le resultat du vote:
(La motion, mise aux voix, est rejetee.)
Je declare la motion rejetee.
Nous voterons de la meme facon que precedemment.
(La motion, mise aux voix, est adoptee.)
Je declare la motion adoptee.
En consequence, le projet de loi est renvoye au Comite permanent de la sante.
(Le projet de loi est lu pour la deuxieme fois et renvoye a un comite.)
LE PROGRAMME NATIONAL BON DEPART
La Chambre reprend l'etude de la motion et de l'amendement.
Le vote porte sur l'amendement.
(L'amendement, mis aux voix, est adopte.)
Je declare l'amendement adopte.
Le vote suivant porte sur la motion modifiee.
Je declare la motion modifiee adoptee.
M. Greg Thompson (Charlotte, PC):
Elles n'auront droit a aucune indemnisation.
Il y a egalement d'autres victimes qui ont ete infectees apres 1986.
Un de mes electeurs me l'a rappele.
Il est question d'equite dans le cadre du programme d'indemnisation.
Il n'en est rien.
Nous attendons une plus grande generosite de la part du ministre de la Sante.
Nous savons qu'il y a deja eu certains progres dans quelques provinces.
Nous voulons qu'elles soient indemnisees...
M. Robert D. Nault (secretaire parlementaire du ministre du Developpement des ressources humaines, Lib.):
Ils l'ont fait tout en sachant que certaines personnes ne seraient pas satisfaites.
Ils se sont assis en face des victimes et ont repondu a leurs questions.
C'est ce que nous avons fait.
C'est ce que nous avons propose de faire le 27 mars dernier.
Nous continuons de rechercher une veritable solution.
M. Paul Crete (Kamouraska-Riviere-du-Loup-Temiscouata-Les Basques, BQ):
C'est totalement inacceptable.
Il faut absolument que le gouvernement bouge de ce cote.
Comment a-t-on pu en arriver la?
Il faut diminuer le nombre d'heures requises lors d'un premier emploi.
On ne sait pas exactement comment s'adapter aux nouvelles conditions du travail precaire.
M. Robert D. Nault (secretaire parlementaire du ministre du Developpement des ressources humaines, Lib.):
Mettons les choses bien au clair.
Le gouvernement du Quebec recevra 2,7 milliards de dollars sur cinq ans...
La presidente suppleante (Mme Thibeault):
Je dois malheureusement interrompre le secretaire parlementaire.
La motion portant que la Chambre s'ajourne maintenant est reputee adoptee.
(La seance est levee a 20 h 20.)
|
{"hexsha": "cfce15aff4658f6923652676640a155d0321c493", "size": 63276, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "data/Hansard/Training/hansard.36.1.house.debates.108.f", "max_stars_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_stars_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/Hansard/Training/hansard.36.1.house.debates.108.f", "max_issues_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_issues_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/Hansard/Training/hansard.36.1.house.debates.108.f", "max_forks_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_forks_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.3141630901, "max_line_length": 109, "alphanum_fraction": 0.7735002213, "num_tokens": 18658}
|
# Directions
const directions = [(-1, 0),(1, 0),(0, 1),(0, -1)]
# For readability
PICK_FOOD_1 = 5
PLACE_FOOD_1 = 6
# Perform a move action
function move(env::NatureEnv, player::Int, dir::Int)
new_pos = env.players[player].pos .+ directions[dir]
outofbounds(env, new_pos) && return
env.players[player].pos = new_pos
end
# Perform a food action
function food(env::NatureEnv, p::Int, idx::Int)
# [ pick1, place1, pick2, place2, ... pickf, placef]
pick = idx % 2 == 1
place = idx % 2 == 0
food_type = floor(Int, ((idx - 1) / 2)) + 1
fframe = env.food_frames[food_type]
player = env.players[p]
num_food = fframe[player.pos...]
place_amount = 0.5
if pick && num_food > 0
player.food_counts = player.food_counts .+ Tuple(num_food * onehot(food_type, env.food_types))
for (placed_player, placed_count) in env.place_record[player.pos][food_type]
if placed_player != p
env.exchanges[food_type] += placed_count
end
end
fframe[player.pos...] = 0
env.place_record[player.pos][food_type] = small_dd_builder() elseif place && player.food_counts[food_type] >= place_amount
player.food_counts = Tuple(player.food_counts .- (place_amount .* onehot(food_type, env.food_types)))
fframe[player.pos...] += place_amount
env.place_record[player.pos][food_type][p] = env.place_record[player.pos][food_type][p] + place_amount
end
end
function comm(env::NatureEnv, p::Int, symbol::Int)
push!(env.comms[env.step], (env.players[p].pos..., p, symbol))
end
|
{"hexsha": "3f3bb2a3f8a9d452d4c8869730a2ee840aced87f", "size": 1597, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/act.jl", "max_stars_repo_name": "jarbus/Nature.jl", "max_stars_repo_head_hexsha": "22aa3b5afce41dc9f5ac5dcee9695ef4339824ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/act.jl", "max_issues_repo_name": "jarbus/Nature.jl", "max_issues_repo_head_hexsha": "22aa3b5afce41dc9f5ac5dcee9695ef4339824ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/act.jl", "max_forks_repo_name": "jarbus/Nature.jl", "max_forks_repo_head_hexsha": "22aa3b5afce41dc9f5ac5dcee9695ef4339824ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9512195122, "max_line_length": 133, "alphanum_fraction": 0.6437069505, "num_tokens": 459}
|
import numpy as np
import numpy.random as rd
from tools import int_plus
from tools import int_subtract
from tools import int_multiply
from tools import int_divide
import json
import datetime
import argparse
import random
parser = argparse.ArgumentParser()
parser.add_argument('-conf',default='conf.json')
conf = parser.parse_args().conf
#load configures
with open(conf) as f:
params = json.load(f)
filename = params['filename']
ansname = params['ansname']
if filename == 'time':
time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
filename = './problemset_' + time_str
ansname = './answer_' + time_str
int_plus_params = params['int_plus']
int_subtract_params = params['int_subtract']
int_multiply_params = params['int_multiply']
int_divide_params = params['int_divide']
unitary = params['unitary']
if unitary is not False:
for item in [int_plus_params, int_subtract_params, int_multiply_params, int_divide_params]:
if item['num'] == 0:
continue
else:
item['num'] = unitary
shuffle = params['shuffle']
problem_list = []
ans_list = []
psa = int_plus(int_plus_params)
if psa is not False:
problem_list += psa[0]
ans_list += psa[1]
psa = int_subtract(int_subtract_params)
if psa is not False:
problem_list += psa[0]
ans_list += psa[1]
psa = int_multiply(int_multiply_params)
if psa is not False:
problem_list += psa[0]
ans_list += psa[1]
psa = int_divide(int_divide_params)
if psa is not False:
problem_list += psa[0]
ans_list += psa[1]
with open(filename,'w') as f:
if shuffle:
random.shuffle(problem_list)
for item in problem_list:
f.write(item)
with open(ansname,'w') as f:
for item in ans_list:
f.write(item)
|
{"hexsha": "efe9ca1a947d5fedd0eb9d627efe4b8e02992e10", "size": 1753, "ext": "py", "lang": "Python", "max_stars_repo_path": "problemset.py", "max_stars_repo_name": "LearnerYme/elementary_arithmetic_problemset", "max_stars_repo_head_hexsha": "7f890a30b55f62868825dbb9ae95da247970a80e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "problemset.py", "max_issues_repo_name": "LearnerYme/elementary_arithmetic_problemset", "max_issues_repo_head_hexsha": "7f890a30b55f62868825dbb9ae95da247970a80e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problemset.py", "max_forks_repo_name": "LearnerYme/elementary_arithmetic_problemset", "max_forks_repo_head_hexsha": "7f890a30b55f62868825dbb9ae95da247970a80e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4057971014, "max_line_length": 95, "alphanum_fraction": 0.698231603, "include": true, "reason": "import numpy", "num_tokens": 461}
|
function ALclear(; verbose = true)
((length(replset.commands)==0) || (replset.commands[end]=="#Session Started")) &&
((verbose && println("Activeset Already Empty")); return)
newhistory = History()
newreplset = activelogicset(newhistory)
setactivehistory!(newhistory)
setreplset!(newreplset)
pushsessionhistory!(newreplset)
verbose && println("Clearing Activeset")
end
function ALClear(; verbose = true)
ALclear(verbose = false)
setsessionhistory!(History())
verbose && println("Clearing Everything!")
end
|
{"hexsha": "15b83b86b94db190228ca07c04b46f64a85b8b57", "size": 558, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/repl/ALclear.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/AbstractLogic.jl-bd85187e-0531-4a3e-9fea-713204a818a2", "max_stars_repo_head_hexsha": "1b8adac10854471ec7ce83b9039cdeb1e43c0208", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-09-24T23:44:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T18:03:25.000Z", "max_issues_repo_path": "src/repl/ALclear.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/AbstractLogic.jl-bd85187e-0531-4a3e-9fea-713204a818a2", "max_issues_repo_head_hexsha": "1b8adac10854471ec7ce83b9039cdeb1e43c0208", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2019-08-08T19:13:18.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-10T01:13:59.000Z", "max_forks_repo_path": "src/repl/ALclear.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/AbstractLogic.jl-bd85187e-0531-4a3e-9fea-713204a818a2", "max_forks_repo_head_hexsha": "1b8adac10854471ec7ce83b9039cdeb1e43c0208", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-22T01:12:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-22T01:12:36.000Z", "avg_line_length": 25.3636363636, "max_line_length": 85, "alphanum_fraction": 0.688172043, "num_tokens": 141}
|
(*
File: Generalized_Primality_Test.thy
Authors: Daniel Stüwe, Manuel Eberl
Generic probabilistic primality test
*)
section \<open>A Generic View on Probabilistic Prime Tests\<close>
theory Generalized_Primality_Test
imports
"HOL-Probability.Probability"
Algebraic_Auxiliaries
begin
definition primality_test :: "(nat \<Rightarrow> nat \<Rightarrow> bool) \<Rightarrow> nat \<Rightarrow> bool pmf" where
"primality_test P n =
(if n < 3 \<or> even n then return_pmf (n = 2) else
do {
a \<leftarrow> pmf_of_set {2..< n};
return_pmf (P n a)
})"
(* TODO Move *)
lemma expectation_of_bool_is_pmf: "measure_pmf.expectation M of_bool = pmf M True"
by (simp add: integral_measure_pmf_real[where A=UNIV] UNIV_bool)
lemma eq_bernoulli_pmfI:
assumes "pmf p True = x"
shows "p = bernoulli_pmf x"
proof (intro pmf_eqI)
fix b :: bool
from assms have "x \<in> {0..1}" by (auto simp: pmf_le_1)
thus "pmf p b = pmf (bernoulli_pmf x) b"
using assms by (cases b) (auto simp: pmf_False_conv_True)
qed
text \<open>
We require a probabilistic primality test to never classify a prime as composite.
It may, however, mistakenly classify composites as primes.
\<close>
locale prob_primality_test =
fixes P :: "nat \<Rightarrow> nat \<Rightarrow> bool" and n :: nat
assumes P_works: "odd n \<Longrightarrow> 2 \<le> a \<Longrightarrow> a < n \<Longrightarrow> prime n \<Longrightarrow> P n a"
begin
lemma FalseD:
assumes false: "False \<in> set_pmf (primality_test P n)"
shows "\<not> prime n"
proof -
from false consider "n \<noteq> 2" "n < 3" | "n \<noteq> 2" "even n" |
a where "\<not> P n a" "odd n" "2 \<le> a" "a < n"
by (auto simp: primality_test_def not_less split: if_splits)
then show ?thesis proof cases
case 1
then show ?thesis
by (cases rule: linorder_neqE_nat) (use prime_ge_2_nat[of n] in auto)
next
case 2
then show ?thesis using primes_dvd_imp_eq two_is_prime_nat by blast
next
case 3
then show ?thesis using P_works by blast
qed
qed
theorem prime:
assumes odd_prime: "prime n"
shows "primality_test P n = return_pmf True"
proof -
have "set_pmf (primality_test P n) \<subseteq> {True, False}"
by auto
moreover from assms have "False \<notin> set_pmf (primality_test P n)"
using FalseD by auto
ultimately have "set_pmf (primality_test P n) \<subseteq> {True}"
by auto
thus ?thesis
by (subst (asm) set_pmf_subset_singleton)
qed
end
text \<open>
We call a primality test \<open>q\<close>-good for a fixed positive real number \<open>q\<close> if the probability
that it mistakenly classifies a composite as a prime is less than \<open>q\<close>.
\<close>
locale good_prob_primality_test = prob_primality_test +
fixes q :: real
assumes q_pos: "q > 0"
assumes composite_witness_bound:
"\<not>prime n \<Longrightarrow> 2 < n \<Longrightarrow> odd n \<Longrightarrow>
real (card {a . 2 \<le> a \<and> a < n \<and> P n a}) < q * real (n - 2)"
begin
theorem composite:
assumes "\<not>prime n"
shows "pmf (primality_test P n) True < q"
using composite_aux[OF assms] by (simp add: expectation_of_bool_is_pmf)
end
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Probabilistic_Prime_Tests/Generalized_Primality_Test.thy"}
|
import logging
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from functools import reduce
from operator import mul
import lab as B
import numpy as np
import wbml.out
from plum import Dispatcher, Self, Referentiable
from .util import Packer, match, lazy_tf as tf, lazy_torch as torch, lazy_jnp as jnp
__all__ = ["Provider", "Vars"]
log = logging.getLogger(__name__)
_dispatch = Dispatcher()
@_dispatch(B.NPNumeric, B.Numeric)
def _assign(x, value):
np.copyto(x, value)
return x
@_dispatch(B.TFNumeric, B.Numeric)
def _assign(x, value):
return x.assign(value)
@_dispatch(B.TorchNumeric, B.Numeric)
def _assign(x, value):
if not isinstance(value, B.TorchNumeric):
value = torch.tensor(value, dtype=x.dtype)
x.data.copy_(value)
return x
@_dispatch(B.JAXNumeric, B.Numeric)
def _assign(x, value):
return jnp.array(value, dtype=x.dtype)
class Provider(metaclass=Referentiable(ABCMeta)):
@abstractmethod
def unbounded(self, init=None, shape=None, dtype=None, name=None): # pragma: no cover
"""Get an unbounded variable.
Args:
init (tensor, optional): Initialisation of the variable.
shape (tuple[int], optional): Shape of the variable. Defaults to scalar.
dtype (data type, optional): Data type of the variable. Defaults to that
of the storage.
name (str, optional): Name of the variable.
Returns:
tensor: Variable.
"""
def get(self, *args, **kw_args):
"""Alias for :meth:`.vars.Provider.unbounded`."""
return self.unbounded(*args, **kw_args)
@abstractmethod
def positive(self, init=None, shape=None, dtype=None, name=None): # pragma: no cover
"""Get a positive variable.
Args:
init (tensor, optional): Initialisation of the variable.
shape (tuple[int], optional): Shape of the variable. Defaults to scalar.
dtype (data type, optional): Data type of the variable. Defaults to that
of the storage.
name (str, optional): Name of the variable.
Returns:
tensor: Variable.
"""
def pos(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.positive`."""
return self.positive(*args, **kw_args)
@abstractmethod
def bounded(
self, init=None, lower=1e-4, upper=1e4, shape=None, dtype=None, name=None
): # pragma: no cover
"""Get a bounded variable.
Args:
init (tensor, optional): Initialisation of the variable.
lower (tensor, optional): Lower bound. Defaults to `1e-4`.
upper (tensor, optional): Upper bound. Defaults to `1e4`.
shape (tuple[int], optional): Shape of the variable. Defaults to scalar.
dtype (data type, optional): Data type of the variable. Defaults to that
of the storage.
name (hashable, optional): Name of the variable.
Returns:
tensor: Variable.
"""
def bnd(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.bounded`."""
return self.bounded(*args, **kw_args)
@abstractmethod
def lower_triangular(
self, init=None, shape=None, dtype=None, name=None
): # pragma: no cover
"""Get a lower-triangular matrix.
Args:
init (tensor, optional): Initialisation of the variable.
shape (int, optional): Number of rows and columns of the matrix.
dtype (data type, optional): Data type of the variable. Defaults to
that of the storage.
name (hashable, optional): Name of the variable.
Returns:
tensor: Variable.
"""
def tril(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.lower_triangular`."""
return self.lower_triangular(*args, **kw_args)
@abstractmethod
def positive_definite(
self, init=None, shape=None, dtype=None, name=None
): # pragma: no cover
"""Get a positive-definite matrix.
Args:
init (tensor, optional): Initialisation of the variable.
shape (int, optional): Number of rows and columns of the matrix.
dtype (data type, optional): Data type of the variable. Defaults to
that of the storage.
name (hashable, optional): Name of the variable.
Returns:
tensor: Variable.
"""
def pd(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.positive_definite`."""
return self.positive_definite(*args, **kw_args)
@abstractmethod
def orthogonal(
self, init=None, shape=None, dtype=None, name=None, method="svd"
): # pragma: no cover
"""Get an orthogonal matrix.
Args:
init (tensor, optional): Initialisation of the variable.
shape (int, optional): Number of rows and columns of the matrix.
dtype (data type, optional): Data type of the variable. Defaults to
that of the storage.
name (hashable, optional): Name of the variable.
method ('svd', 'expm' or 'cayley'): Parametrisation. Method of
parametrisation. Defaults to 'svd'.
Returns:
tensor: Variable.
"""
def orth(self, *args, **kw_args):
"""Alias for :meth:`.vars.Vars.orthogonal`."""
return self.orthogonal(*args, **kw_args)
@abstractmethod
def __getitem__(self, name): # pragma: no cover
"""Get a variable by name.
Args:
name (hashable): Name of variable.
Returns:
tensor: Variable.
"""
@_dispatch(object)
def _check_matrix_shape(shape, square=True):
raise ValueError(f"Object {shape} is not a shape.")
@_dispatch(tuple)
def _check_matrix_shape(shape, square=True):
if len(shape) != 2:
raise ValueError(f"Shape {shape} must be the shape of a matrix.")
if square and shape[0] != shape[1]:
raise ValueError(f"Shape {shape} must be square.")
def _check_init_shape(init, shape):
if init is None and shape is None:
raise ValueError(
f"The shape must be given to automatically initialise "
f"a matrix variable."
)
if shape is None:
shape = B.shape(init)
return init, shape
class Vars(Provider):
"""Variable storage.
Args:
dtype (data type): Data type of the variables.
source (tensor, optional): Tensor to source variables from. Defaults to
not being used.
"""
_dispatch = Dispatcher(in_class=Self)
def __init__(self, dtype, source=None):
self.dtype = dtype
# Source:
self.source = source
self.source_index = 0
# Storage:
self.vars = []
self.transforms = []
self.inverse_transforms = []
# Lookup:
self.name_to_index = OrderedDict()
self._get_vars_cache = {}
# Packing:
self.vector_packer = None
def _resolve_dtype(self, dtype):
if dtype is None:
return self.dtype
else:
return dtype
def _get_var(
self,
transform,
inverse_transform,
init,
generate_init,
shape,
shape_latent,
dtype,
name,
):
# If the name already exists, return that variable.
try:
return self[name]
except KeyError:
pass
# A new variable will be added. Clear lookup cache.
self._get_vars_cache.clear()
# Resolve data type.
dtype = self._resolve_dtype(dtype)
# If no source is provided, get the latent from from the provided
# initialiser.
if self.source is None:
# Resolve initialisation and inverse transform.
if init is None:
init = generate_init(shape=shape, dtype=dtype)
else:
init = B.cast(dtype, init)
if shape is not None and shape != B.shape(init):
raise ValueError(
f"Shape of initial value {B.shape(init)} is not equal to the "
f"desired shape {shape}."
)
# Construct optimisable variable.
latent = inverse_transform(init)
if isinstance(self.dtype, B.TFDType):
latent = tf.Variable(latent)
elif isinstance(self.dtype, B.TorchDType):
pass # All is good in this case.
elif isinstance(self.dtype, B.JAXDType):
latent = jnp.array(latent)
else:
# Must be a NumPy data type.
assert isinstance(self.dtype, B.NPDType)
latent = np.array(latent)
else:
# Get the latent variable from the source.
length = reduce(mul, shape_latent, 1)
latent_flat = self.source[self.source_index : self.source_index + length]
self.source_index += length
# Cast to the right data type.
latent = B.cast(dtype, B.reshape(latent_flat, *shape_latent))
# Store transforms.
self.vars.append(latent)
self.transforms.append(transform)
self.inverse_transforms.append(inverse_transform)
# Get index of the variable.
index = len(self.vars) - 1
# Store name if given.
if name is not None:
self.name_to_index[name] = index
# Generate the variable and return.
return transform(latent)
def unbounded(self, init=None, shape=None, dtype=None, name=None):
# If nothing is specific, generate a scalar.
if init is None and shape is None:
shape = ()
def generate_init(shape, dtype):
return B.randn(dtype, *shape)
return self._get_var(
transform=lambda x: x,
inverse_transform=lambda x: x,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape,
dtype=dtype,
name=name,
)
def positive(self, init=None, shape=None, dtype=None, name=None):
# If nothing is specific, generate a scalar.
if init is None and shape is None:
shape = ()
def generate_init(shape, dtype):
return B.rand(dtype, *shape)
return self._get_var(
transform=lambda x: B.exp(x),
inverse_transform=lambda x: B.log(x),
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape,
dtype=dtype,
name=name,
)
def bounded(
self, init=None, lower=1e-4, upper=1e4, shape=None, dtype=None, name=None
):
# If nothing is specific, generate a scalar.
if init is None and shape is None:
shape = ()
def transform(x):
return lower + (upper - lower) / (1 + B.exp(-x))
def inverse_transform(x):
return B.log(x - lower) - B.log(upper - x)
def generate_init(shape, dtype):
return lower + B.rand(dtype, *shape) * (upper - lower)
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape,
dtype=dtype,
name=name,
)
def lower_triangular(self, init=None, shape=None, dtype=None, name=None):
init, shape = _check_init_shape(init, shape)
_check_matrix_shape(shape)
# Result must be square. Get a side.
side = shape[0]
def transform(x):
return B.vec_to_tril(x)
def inverse_transform(x):
return B.tril_to_vec(x)
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(B.tril_to_vec(mat))
shape_latent = (int(side * (side + 1) / 2),)
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape_latent,
dtype=dtype,
name=name,
)
def positive_definite(self, init=None, shape=None, dtype=None, name=None):
init, shape = _check_init_shape(init, shape)
_check_matrix_shape(shape)
# Result must be square. Get a side.
side = shape[0]
def transform(x):
log_diag = x[:side]
chol = B.vec_to_tril(x[side:], offset=-1) + B.diag(B.exp(log_diag))
return B.matmul(chol, chol, tr_b=True)
def inverse_transform(x):
chol = B.cholesky(B.reg(x))
return B.concat(B.log(B.diag(chol)), B.tril_to_vec(chol, offset=-1))
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return B.matmul(mat, mat, tr_b=True)
shape_latent = (int(side * (side + 1) / 2),)
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape_latent,
dtype=dtype,
name=name,
)
def orthogonal(self, init=None, shape=None, dtype=None, name=None, method="svd"):
init, shape = _check_init_shape(init, shape)
if method == "svd":
_check_matrix_shape(shape, square=False)
n, m = shape
shape_latent = (n, m)
# Fix singular values.
sing_vals = B.linspace(self._resolve_dtype(dtype), 1, 2, min(n, m))
def transform(x):
u, s, v = B.svd(x)
return B.matmul(u, v, tr_b=True)
def inverse_transform(x):
if n >= m:
return x * sing_vals[None, :]
else:
return x * sing_vals[:, None]
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(mat)
elif method == "expm":
_check_matrix_shape(shape)
side = shape[0]
shape_latent = (int(side * (side + 1) / 2 - side),)
def transform(x):
tril = B.vec_to_tril(x, offset=-1)
skew = tril - B.transpose(tril)
return B.expm(skew)
def inverse_transform(x):
return B.tril_to_vec(B.logm(x), offset=-1)
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(B.tril_to_vec(mat, offset=-1))
elif method == "cayley":
_check_matrix_shape(shape)
side = shape[0]
shape_latent = (int(side * (side + 1) / 2 - side),)
def transform(x):
tril = B.vec_to_tril(x, offset=-1)
skew = tril - B.transpose(tril)
eye = B.eye(skew)
return B.solve(eye + skew, eye - skew)
def inverse_transform(x):
eye = B.eye(x)
skew = B.solve(eye + x, eye - x)
return B.tril_to_vec(skew, offset=-1)
def generate_init(shape, dtype):
mat = B.randn(dtype, *shape)
return transform(B.tril_to_vec(mat, offset=-1))
else:
raise ValueError(f'Unknown parametrisation "{method}".')
return self._get_var(
transform=transform,
inverse_transform=inverse_transform,
init=init,
generate_init=generate_init,
shape=shape,
shape_latent=shape_latent,
dtype=dtype,
name=name,
)
def __getitem__(self, name):
index = self.name_to_index[name]
return self.transforms[index](self.vars[index])
def assign(self, name, value, differentiable=False):
"""Assign a value to a variable.
Args:
name (hashable): Name of variable to assign value to.
value (tensor): Value to assign.
differentiable (bool, optional): Do a differentiable assignment.
Returns:
tensor: Assignment result.
"""
index = self.name_to_index[name]
if differentiable:
# Do a differentiable assignment, but ensure that the data type is
# right.
dtype = B.dtype(self.vars[index])
self.vars[index] = B.cast(dtype, value)
return value
else:
# Overwrite data.
self.vars[index] = _assign(
self.vars[index], self.inverse_transforms[index](value)
)
return self.vars[index]
def copy(self, detach=False):
"""Create a copy of the variable manager that shares the variables.
Args:
detach (bool, optional): Detach the variables in PyTorch. Defaults
to `False`.
Returns:
:class:`.vars.Vars`: Copy.
"""
vs = Vars(dtype=self.dtype)
vs.transforms = list(self.transforms)
vs.inverse_transforms = list(self.inverse_transforms)
vs.name_to_index = OrderedDict(self.name_to_index)
vs.vector_packer = self.vector_packer
if detach:
for var in self.vars:
vs.vars.append(var.detach())
else:
vs.vars = list(self.vars)
return vs
def detach(self):
"""Detach all variables held in PyTorch."""
self.vars = [v.detach() for v in self.vars]
def requires_grad(self, value, *names):
"""Set which variables require a gradient in PyTorch.
Args:
value (bool): Require a gradient.
*names (hashable): Specify variables by name.
"""
for var in self.get_vars(*names):
var.requires_grad_(value)
def get_vars(self, *names, return_indices=False):
"""Get latent variables.
If no arguments are supplied, then all latent variables are retrieved.
Furthermore, the same collection of variables is guaranteed to be
returned in the same order.
Args:
*names (hashable): Get variables by name.
return_indices (bool, optional): Get the indices of the variables instead.
Defaults to `False`.
Returns:
list: Matched latent variables or their indices, depending on the
value of `indices`.
"""
# If nothing is specified, return all latent variables.
if len(names) == 0:
if return_indices:
return list(range(len(self.vars)))
else:
return self.vars
# Attempt to use cache.
try:
indices = self._get_vars_cache[names]
except KeyError:
# Collect indices of matches.
indices = set()
for name in names:
a_match = False
for k, v in self.name_to_index.items():
if match(name, k):
indices |= {v}
a_match = True
# Check that there was a match.
if not a_match:
raise ValueError(f'No variable matching "{name}".')
# Sort the indices for a consistent result.
indices = sorted(indices)
# Store in cache before proceeding.
self._get_vars_cache[names] = indices
# Return indices if asked for. Otherwise, return variables.
if return_indices:
return indices
else:
return [self.vars[i] for i in indices]
def get_vector(self, *names):
"""Get all the latent variables stacked in a vector.
If no arguments are supplied, then all latent variables are retrieved.
Args:
*names (hashable): Get variables by name.
Returns:
tensor: Vector consisting of all latent values
"""
vars = self.get_vars(*names)
self.vector_packer = Packer(*vars)
return self.vector_packer.pack(*vars)
def set_vector(self, values, *names, differentiable=False):
"""Set all the latent variables by values from a vector.
If no arguments are supplied, then all latent variables are retrieved.
Args:
values (tensor): Vector to set the variables to.
*names (hashable): Set variables by name.
differentiable (bool, optional): Differentiable assignment. Defaults
to `False`.
Returns:
list: Assignment results.
"""
values = self.vector_packer.unpack(values)
if differentiable:
# Do a differentiable assignment.
for index, value in zip(self.get_vars(*names, return_indices=True), values):
self.vars[index] = value
return values
else:
# Overwrite data.
assignments = []
for index, value in zip(self.get_vars(*names, return_indices=True), values):
self.vars[index] = _assign(self.vars[index], value)
assignments.append(self.vars[index])
return assignments
@property
def names(self):
"""All available names."""
return list(self.name_to_index.keys())
def print(self):
"""Print all variables."""
for name in self.names:
wbml.out.kv(name, self[name])
|
{"hexsha": "c38271b6e5c5db56403b8e0848f4048ad9217006", "size": 21838, "ext": "py", "lang": "Python", "max_stars_repo_path": "varz/vars.py", "max_stars_repo_name": "willtebbutt/varz", "max_stars_repo_head_hexsha": "519e14d202cafb32a0bdf2799bcbde0b5baa1d6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "varz/vars.py", "max_issues_repo_name": "willtebbutt/varz", "max_issues_repo_head_hexsha": "519e14d202cafb32a0bdf2799bcbde0b5baa1d6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "varz/vars.py", "max_forks_repo_name": "willtebbutt/varz", "max_forks_repo_head_hexsha": "519e14d202cafb32a0bdf2799bcbde0b5baa1d6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6952104499, "max_line_length": 90, "alphanum_fraction": 0.563375767, "include": true, "reason": "import numpy", "num_tokens": 4734}
|
[STATEMENT]
lemma has_field_mono:
"\<lbrakk> P \<turnstile> C has F:T (fm) in D; P \<turnstile> C' \<preceq>\<^sup>* C \<rbrakk> \<Longrightarrow> P \<turnstile> C' has F:T (fm) in D"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>P \<turnstile> C has F:T (fm) in D; P \<turnstile> C' \<preceq>\<^sup>* C\<rbrakk> \<Longrightarrow> P \<turnstile> C' has F:T (fm) in D
[PROOF STEP]
by(fastforce simp:has_field_def map_add_def dest: has_fields_mono_lem)
|
{"llama_tokens": 204, "file": "JinjaThreads_Common_TypeRel", "length": 1}
|
import numpy as np
import pandas as pd
# import xarray as xr
# import xskillscore
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.losses import Loss
from sklearn import preprocessing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.close("all")
from scoringRules import es_sample, crps_sample
from igep_models_all_tem_noplot import igep
import tensorflow.compat.v1 as tfv
tfv.disable_v2_behavior()
DIM = 5 # dimension of target values
dist_samples = pd.read_csv('/home/chen_jieyu/IGEP/dist_5samples.csv', header = None)
# Read data
path = '/home/chen_jieyu/IGEP/ens_fc_t2m_complete.feather'
t2m_ens_complete = pd.read_feather(path)
path_add = '/home/chen_jieyu/IGEP/tem_additional_predictors.feather'
t2m_add_complete = pd.read_feather(path_add)
callback = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0.002, patience = 3, restore_best_weights = True)
# loop
x = 100
for k in range(x):
station_sample = dist_samples.iloc[k,]
ens_sample = t2m_ens_complete[t2m_ens_complete['station'].isin(station_sample)]
dateobs_count = ens_sample.groupby('date')['date'].count()
dates = dateobs_count.index
used_dates = dates[dateobs_count == DIM]
used_ens_sample = ens_sample[ens_sample['date'].isin(used_dates)]
add_sample = t2m_add_complete[t2m_add_complete['station'].isin(station_sample)]
used_add_sample = add_sample[add_sample['date'].isin(used_dates)]
# LOAD DATA
# t2m data
t2m_obs = used_ens_sample['obs']
t2m_obs.index = used_ens_sample['date']
data_obs = t2m_obs
# set initial training and test dates
train_dateindex = ((t2m_obs.index.year != 2016) & (t2m_obs.index.year != 2015))
val_dateindex = (t2m_obs.index.year == 2015)
test_dateindex = (t2m_obs.index.year == 2016)
# Predictions
t2m_ens = used_ens_sample.iloc[:,3:53]
t2m_ens.index = used_ens_sample['date']
data_ens = t2m_ens
# added predictors
add_dim = 37
t2m_add = used_add_sample.loc[:,["d2m_mean","d2m_var",
"q_pl850_mean","q_pl850_var",
"tcc_mean","tcc_var",
"u_pl850_mean","u_pl850_var",
"v_pl850_mean","v_pl850_var",
"sshf_mean","sshf_var",
"slhf_mean","slhf_var",
"u10_mean","u10_var",
"v10_mean","v10_var",
"cape_mean","cape_var",
"sp_mean","sp_var",
"u_pl500_mean","u_pl500_var",
"v_pl500_mean","v_pl500_var",
"gh_pl500_mean","gh_pl500_var",
"ssr_mean","ssr_var",
"str_mean","str_var",
"lat","lon","alt","orog","sin_yday"]]
t2m_add.index = used_add_sample['date']
data_add = t2m_add
# SPLIT DATA
# get training and test data
obser = data_obs.copy()
pred = data_ens.copy()
addpre = data_add.copy()
dim = DIM
######### standardization
scaler = preprocessing.StandardScaler().fit(obser[train_dateindex].values.reshape(-1,1))
stand_obs = scaler.transform(obser.values.reshape(-1,1)).reshape(-1)
obser.iloc[:] = stand_obs
for i in range(pred.shape[1]):
pred.iloc[:,i] = scaler.transform(pred.iloc[:,i].values.reshape(-1,1))
ens_mu = pred.mean(axis=1)
ens_sigma = pred.var(axis=1)
ens_max = pred.max(axis=1)
ens_min = pred.min(axis=1)
ens_spread = ens_max - ens_min
for i in range(addpre.shape[1]-1):
scaler_i = preprocessing.StandardScaler().fit(addpre.iloc[train_dateindex,i].values.reshape(-1,1))
addpre.iloc[:,i] = scaler_i.transform(addpre.iloc[:,i].values.reshape(-1,1))
add_pre_mu = addpre.loc[:,["d2m_mean","q_pl850_mean","tcc_mean","u_pl850_mean","v_pl850_mean",
"sshf_mean","slhf_mean","u10_mean","v10_mean","cape_mean","sp_mean",
"u_pl500_mean","v_pl500_mean","gh_pl500_mean","ssr_mean","str_mean"]]
add_pre_sigma = addpre.loc[:,["d2m_var","q_pl850_var","tcc_var","u_pl850_var","v_pl850_var",
"sshf_var","slhf_var","u10_var","v10_var","cape_var","sp_var",
"u_pl500_var","v_pl500_var","gh_pl500_var","ssr_var","str_var"]]
n_add = 16
# Inputs
x_train_m323 = [np.concatenate((ens_mu[train_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[train_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[train_dateindex].values.reshape((-1, dim, 1)),
ens_max[train_dateindex].values.reshape((-1, dim, 1)),
ens_min[train_dateindex].values.reshape((-1, dim, 1)),
ens_spread[train_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[train_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[train_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[train_dateindex].values.reshape((-1, dim, 1)),
ens_max[train_dateindex].values.reshape((-1, dim, 1)),
ens_min[train_dateindex].values.reshape((-1, dim, 1)),
ens_spread[train_dateindex].values.reshape((-1, dim, 1)),
addpre[train_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
x_val_m323 = [np.concatenate((ens_mu[val_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[val_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[val_dateindex].values.reshape((-1, dim, 1)),
ens_max[val_dateindex].values.reshape((-1, dim, 1)),
ens_min[val_dateindex].values.reshape((-1, dim, 1)),
ens_spread[val_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[val_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[val_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[val_dateindex].values.reshape((-1, dim, 1)),
ens_max[val_dateindex].values.reshape((-1, dim, 1)),
ens_min[val_dateindex].values.reshape((-1, dim, 1)),
ens_spread[val_dateindex].values.reshape((-1, dim, 1)),
addpre[val_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
x_test_m323 = [np.concatenate((ens_mu[test_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[test_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[test_dateindex].values.reshape((-1, dim, 1)),
ens_max[test_dateindex].values.reshape((-1, dim, 1)),
ens_min[test_dateindex].values.reshape((-1, dim, 1)),
ens_spread[test_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[test_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[test_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[test_dateindex].values.reshape((-1, dim, 1)),
ens_max[test_dateindex].values.reshape((-1, dim, 1)),
ens_min[test_dateindex].values.reshape((-1, dim, 1)),
ens_spread[test_dateindex].values.reshape((-1, dim, 1)),
addpre[test_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
y_train = obser[train_dateindex].values.reshape((-1, dim, 1))
y_val = obser[val_dateindex].values.reshape((-1, dim, 1))
y_test = obser[test_dateindex].values.reshape((-1, dim, 1))
y_train_tmp = y_train
y_val_tmp = y_val
y_test_tmp = y_test
testy = data_obs[test_dateindex]
# MODEL
BATCH_SIZE = 64
LATENT_DIST = "uniform" # or normal # family of latent varaible distributions
DIM_LATENT = 20 # number of latent variables
learning_rate = 0.01
EPOCHS = 50
N_SAMPLES_TRAIN = 50 # number of samples drawn during training
N_SAMPLES_TEST = 100
VERBOSE = 1
n_layers = 2
n_nodes = 25
ens_m3_output_combine_l = pd.DataFrame()
ens_m3_output_combine_s = pd.DataFrame()
for loop in range(5):
tfv.reset_default_graph()
# initialize model
mdl_m3 = igep(dim_out = DIM,
dim_in_mean = x_train_m323[0].shape[-1],
dim_in_std = x_train_m323[1].shape[-1],
dim_in_features = x_train_m323[2].shape[-1],
dim_latent = DIM_LATENT,
n_samples_train = N_SAMPLES_TRAIN,
layer_number = n_layers,
nodes_number = n_nodes,
model_type = 326,
latent_dist = LATENT_DIST)
#% FIT
mdl_m3.fit(x = x_train_m323,
y = y_train_tmp,
batch_size = BATCH_SIZE,
epochs = EPOCHS,
verbose = VERBOSE,
callbacks = [callback],
validation_split = 0.0,
validation_data = (x_val_m323, y_val_tmp),
sample_weight = None,
learningrate = learning_rate)
# predict and append to list
S_m3 = []
S_m3.append(mdl_m3.predict(x_test_m323, N_SAMPLES_TEST))
pre_dat = np.concatenate(S_m3, axis = 0)
fcst = scaler.inverse_transform(np.reshape(pre_dat, (pre_dat.shape[0]*pre_dat.shape[1],-1)))
ens_m3_output = pd.DataFrame(fcst, index=testy.index)
ens_m3_output_combine_l = pd.concat([ens_m3_output_combine_l, ens_m3_output], axis=1)
ens_m3_output_combine_s = pd.concat([ens_m3_output_combine_s, ens_m3_output.iloc[:, :10] ], axis=1)
ens_m3_long_result = pd.concat([testy, ens_m3_output_combine_l], axis=1)
ens_m3_short_result = pd.concat([testy, ens_m3_output_combine_s], axis=1)
file_name_l = "ig326_t2m_5dim_long_" + str(k) + ".csv"
ens_m3_long_result.to_csv('/Data/Jieyu_data/ig326_t2m_all/' + file_name_l)
file_name_s = "ig326_t2m_5dim_short_" + str(k) + ".csv"
ens_m3_short_result.to_csv('/Data/Jieyu_data/ig326_t2m_all/' + file_name_s)
print('m3:'+str(k))
|
{"hexsha": "6faff5fff467b3a31c6cb6c8a4f44c51df6fe600", "size": 11848, "ext": "py", "lang": "Python", "max_stars_repo_path": "igep326_temperature_100tests.py", "max_stars_repo_name": "jieyu97/mvpp", "max_stars_repo_head_hexsha": "838c2553825b2061f51008b5cbed19526424c2f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "igep326_temperature_100tests.py", "max_issues_repo_name": "jieyu97/mvpp", "max_issues_repo_head_hexsha": "838c2553825b2061f51008b5cbed19526424c2f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "igep326_temperature_100tests.py", "max_forks_repo_name": "jieyu97/mvpp", "max_forks_repo_head_hexsha": "838c2553825b2061f51008b5cbed19526424c2f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5413533835, "max_line_length": 127, "alphanum_fraction": 0.5381498987, "include": true, "reason": "import numpy", "num_tokens": 2780}
|
using MINLPTests, JuMP, Ipopt, Juniper, Test
const OPTIMIZER = MINLPTests.JuMP.with_optimizer(
Juniper.Optimizer, nl_solver=with_optimizer(Ipopt.Optimizer, print_level=0), atol=1e-7
)
@testset "MINLPTests" begin
###
### src/nlp-mi tests.
###
MINLPTests.test_nlp_mi(OPTIMIZER)
end
|
{"hexsha": "afa9c235d7ec0fd57532135f03100ae516aad5bd", "size": 306, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/MINLPTests/run_minlptests.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Juniper.jl-2ddba703-00a4-53a7-87a5-e8b9971dde84", "max_stars_repo_head_hexsha": "75a848d7a281dba768583bbc554532996898ff75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/MINLPTests/run_minlptests.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Juniper.jl-2ddba703-00a4-53a7-87a5-e8b9971dde84", "max_issues_repo_head_hexsha": "75a848d7a281dba768583bbc554532996898ff75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/MINLPTests/run_minlptests.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Juniper.jl-2ddba703-00a4-53a7-87a5-e8b9971dde84", "max_forks_repo_head_hexsha": "75a848d7a281dba768583bbc554532996898ff75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5384615385, "max_line_length": 90, "alphanum_fraction": 0.7091503268, "num_tokens": 99}
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Sequence
import numpy as np
try:
from scipy.sparse.base import spmatrix
except ImportError: # pragma: no cover
spmatrix = None
from ... import opcodes as OperandDef
from ... import tensor as mt
from ...core import Base, Entity
from ...serialize import KeyField, BoolField, TupleField, DataTypeField, AnyField, ListField
from ...tensor.operands import TensorOrder
from ...tiles import TilesError
from ...utils import recursive_tile
from ..operands import LearnOperand, LearnOperandMixin, OutputType
from ..utils import assert_all_finite
class IsMultilabel(LearnOperand, LearnOperandMixin):
_op_type_ = OperandDef.IS_MULTILABEL
_y = AnyField('y')
_unique_y = KeyField('unique_y')
# for chunk
_is_y_sparse = BoolField('is_y_sparse')
def __init__(self, y=None, unique_y=None, is_y_sparse=None, **kw):
super().__init__(_y=y, _unique_y=unique_y,
_is_y_sparse=is_y_sparse, **kw)
self._output_types = [OutputType.tensor]
@property
def y(self):
return self._y
@property
def unique_y(self):
return self._unique_y
@property
def is_y_sparse(self):
return self._is_y_sparse
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
if isinstance(self._y, (Base, Entity)):
self._y = self._inputs[0]
if self._unique_y is not None:
self._unique_y = self._inputs[-1]
def __call__(self, y, y_unique=None):
inputs = [y] if isinstance(y, (Base, Entity)) else []
if y_unique is not None:
inputs.append(y_unique)
return self.new_tileable(inputs, shape=(), dtype=np.dtype(bool),
order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
y = op.y
out = op.outputs[0]
if not (hasattr(y, 'shape') and y.ndim == 2 and y.shape[1] > 1):
result = mt.array(False)._inplace_tile()
return [result]
else:
unique_y = op.unique_y
assert len(unique_y.chunks) == 1
unique_y_chunk = unique_y.chunks[0]
chunk_op = IsMultilabel(unique_y=unique_y_chunk,
is_y_sparse=y.issparse())
chunk = chunk_op.new_chunk([unique_y_chunk], dtype=out.dtype,
order=out.order, index=(0,),
shape=())
new_op = op.copy()
params = out.params
params['nsplits'] = ()
params['chunks'] = [chunk]
return new_op.new_tileables(op.inputs, kws=[params])
@classmethod
def execute(cls, ctx, op):
unique_y = ctx[op.unique_y.key]
if op.is_y_sparse:
# sparse
result = (unique_y.size in (0, 1) and
(unique_y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(unique_y)))
else:
# dense
labels = unique_y
result = len(labels) < 3 and (unique_y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
ctx[op.outputs[0].key] = result
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import mars.tensor as mt
>>> from mars.learn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1]).execute()
False
>>> is_multilabel([[1], [0, 2], []]).execute()
False
>>> is_multilabel(mt.array([[1, 0], [0, 0]])).execute()
True
>>> is_multilabel(mt.array([[1], [0], [0]])).execute()
False
>>> is_multilabel(mt.array([[1, 0, 0]])).execute()
True
"""
if not isinstance(y, (Base, Entity)):
if hasattr(y, '__array__') or isinstance(y, Sequence):
y = np.asarray(y)
if hasattr(y, 'shape'):
yt = y = mt.asarray(y)
else:
yt = None
else:
yt = y = mt.tensor(y)
if hasattr(y, 'dtype') and y.dtype != np.object_:
unique_y = mt.unique(y, aggregate_size=1)
else:
unique_y = None
op = IsMultilabel(y=y, unique_y=unique_y)
return op(yt, unique_y)
class TypeOfTarget(LearnOperand, LearnOperandMixin):
__slots__ = ('_unique_y_chunk', '_check_all_finite_chunk')
_op_type_ = OperandDef.TYPE_OF_TARGET
_y = AnyField('y')
# for chunks
_is_multilabel = KeyField('is_multilabel')
_first_value = KeyField('first_value')
_check_float = KeyField('check_float')
_assert_all_finite = KeyField('assert_all_finite')
_unique_y = KeyField('unique_y')
_y_shape = TupleField('y_shape')
_y_dtype = DataTypeField('y_dtype')
_checked_targets = ListField('checked_targets')
def __init__(self, y=None, is_multilabel=None, first_value=None,
check_float=None, assert_all_finite=None,
unique_y=None, y_shape=None, y_dtype=None,
checked_targets=None, **kw):
super().__init__(_y=y, _is_multilabel=is_multilabel,
_first_value=first_value, _check_float=check_float,
_assert_all_finite=assert_all_finite,
_unique_y=unique_y, _y_shape=y_shape,
_y_dtype=y_dtype, _checked_targets=checked_targets, **kw)
self._output_types = [OutputType.tensor]
@property
def y(self):
return self._y
@property
def is_multilabel(self):
return self._is_multilabel
@property
def first_value(self):
return self._first_value
@property
def check_float(self):
return self._check_float
@property
def assert_all_finite(self):
return self._assert_all_finite
@property
def unique_y(self):
return self._unique_y
@property
def y_shape(self):
return self._y_shape
@property
def y_dtype(self):
return self._y_dtype
@property
def checked_targets(self):
return self._checked_targets
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
for attr in ['_y', '_is_multilabel', '_first_value',
'_check_float', '_assert_all_finite',
'_unique_y']:
v = getattr(self, attr)
if isinstance(v, (Base, Entity)):
setattr(self, attr, next(inputs_iter))
def __call__(self, y):
inputs = [y] if isinstance(y, (Base, Entity)) else []
return self.new_tileable(inputs, shape=(), order=TensorOrder.C_ORDER,
dtype=np.dtype(object))
@classmethod
def tile(cls, op):
out = op.outputs[0]
y = op.y
chunk_inputs = []
is_multilabel_chunk = recursive_tile(is_multilabel(y)).chunks[0]
chunk_inputs.append(is_multilabel_chunk)
if not isinstance(y, (Base, Entity)):
if hasattr(y, '__array__'):
y = np.asarray(y)
y = mt.asarray(y)
if np.isnan(y.size): # pragma: no cover
raise TilesError('y has unknown shape')
chunk_op = TypeOfTarget(is_multilabel=is_multilabel_chunk,
y_shape=y.shape, y_dtype=y.dtype)
if y.ndim <= 2 and y.size > 0 and y.dtype == object:
first_value_chunk = recursive_tile(y[(0,) * y.ndim]).chunks[0]
chunk_inputs.append(first_value_chunk)
chunk_op._first_value = first_value_chunk
if y.dtype.kind == 'f':
check_float_chunk = recursive_tile(mt.any(y != y.astype(int))).chunks[0]
chunk_inputs.append(check_float_chunk)
chunk_op._check_float = check_float_chunk
assert_all_finite_chunk = recursive_tile(assert_all_finite(y)).chunks[0]
chunk_inputs.append(assert_all_finite_chunk)
chunk_op._assert_all_finite = assert_all_finite_chunk
if y.size > 0:
unique_y_chunk = recursive_tile(mt.unique(y, aggregate_size=1)).chunks[0]
chunk_inputs.append(unique_y_chunk)
chunk_op._unique_y = unique_y_chunk
chunk = chunk_op.new_chunk(chunk_inputs, dtype=out.dtype,
shape=out.shape, order=out.order, index=())
params = out.params
params['nsplits'] = ()
params['chunks'] = [chunk]
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[params])
@classmethod
def _execute(cls, ctx, op):
is_multilabel_ = ctx[op.is_multilabel.key]
shape = op.y_shape
ndim = len(shape)
dtype = op.y_dtype
if is_multilabel_:
return 'multilabel-indicator'
if ndim > 2 or (dtype == object and shape[0] and
not isinstance(ctx[op.first_value.key], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if ndim == 2 and shape[1] == 0:
return 'unknown' # [[]]
if ndim == 2 and shape[1] > 1:
suffix = '-multioutput' # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if dtype.kind == 'f' and ctx[op.check_float.key]:
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
assert ctx[op.assert_all_finite.key]
return 'continuous' + suffix
if op.unique_y is not None:
unique_y_len = len(ctx[op.unique_y.key])
else:
# y.size == 0
unique_y_len = 0
if (unique_y_len > 2) or (ndim >= 2 and shape[1] > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
@classmethod
def execute(cls, ctx, op):
target = cls._execute(ctx, op)
if op.checked_targets is not None and len(op.checked_targets) > 0:
if target not in op.checked_targets:
raise ValueError('Unknown label type: {}'.format(target))
ctx[op.outputs[0].key] = target
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d tensor of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d tensor that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, a tensor
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
tensor, sequence of sequences, or a tensor of non-sequence objects.
Examples
--------
>>> import mars.tensor as mt
>>> from mars.learn.utils.multiclass import type_of_target
>>> type_of_target([0.1, 0.6]).execute()
'continuous'
>>> type_of_target([1, -1, -1, 1]).execute()
'binary'
>>> type_of_target(['a', 'b', 'a']).execute()
'binary'
>>> type_of_target([1.0, 2.0]).execute()
'binary'
>>> type_of_target([1, 0, 2]).execute()
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0]).execute()
'multiclass'
>>> type_of_target(['a', 'b', 'c']).execute()
'multiclass'
>>> type_of_target(mt.array([[1, 2], [3, 1]])).execute()
'multiclass-multioutput'
>>> type_of_target([[1, 2]]).execute()
'multiclass-multioutput'
>>> type_of_target(mt.array([[1.5, 2.0], [3.0, 1.6]])).execute()
'continuous-multioutput'
>>> type_of_target(mt.array([[0, 1], [1, 1]])).execute()
'multilabel-indicator'
"""
valid_types = (Sequence, spmatrix) if spmatrix is not None else (Sequence,)
valid = ((isinstance(y, valid_types) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas: # pragma: no cover
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if isinstance(y, (Base, Entity)):
y = mt.tensor(y)
op = TypeOfTarget(y=y)
return op(y)
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
y_type.op._checked_targets = ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']
return y_type
|
{"hexsha": "074335f86646cef5cb11c8c7e5da5bd71008d2a3", "size": 14860, "ext": "py", "lang": "Python", "max_stars_repo_path": "mars/learn/utils/multiclass.py", "max_stars_repo_name": "humaohai/mars", "max_stars_repo_head_hexsha": "11373f64c3039d424f9276e610ae5ad108ea0eb1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-25T13:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-25T13:51:16.000Z", "max_issues_repo_path": "mars/learn/utils/multiclass.py", "max_issues_repo_name": "humaohai/mars", "max_issues_repo_head_hexsha": "11373f64c3039d424f9276e610ae5ad108ea0eb1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mars/learn/utils/multiclass.py", "max_forks_repo_name": "humaohai/mars", "max_forks_repo_head_hexsha": "11373f64c3039d424f9276e610ae5ad108ea0eb1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2396313364, "max_line_length": 92, "alphanum_fraction": 0.5910497981, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3753}
|
from datashape import dshape
import pandas as pd
import numpy as np
import pytest
from datashader.glyphs import (Point, _build_draw_line, _build_map_onto_pixel,
_build_extend_line, _build_draw_triangle,
_build_extend_triangles)
from datashader.utils import ngjit
def test_point_bounds_check():
df = pd.DataFrame({'x': [1, 2, 3], 'y': [5, 6, 7]})
p = Point('x', 'y')
assert p._compute_x_bounds(df['x'].values) == (1, 3)
assert p._compute_y_bounds(df['y'].values) == (5, 7)
def test_point_validate():
p = Point('x', 'y')
p.validate(dshape("{x: int32, y: float32}"))
with pytest.raises(ValueError):
p.validate(dshape("{x: string, y: float32}"))
@ngjit
def append(i, x, y, agg):
agg[y, x] += 1
@ngjit
def tri_append(x, y, agg, n):
agg[y, x] += n
def new_agg():
return np.zeros((5, 5), dtype='i4')
mapper = ngjit(lambda x: x)
map_onto_pixel = _build_map_onto_pixel(mapper, mapper)
# Line rasterization
draw_line = _build_draw_line(append)
extend_line = _build_extend_line(draw_line, map_onto_pixel)
# Triangles rasterization
draw_triangle, draw_triangle_interp = _build_draw_triangle(tri_append)
extend_triangles = _build_extend_triangles(draw_triangle, draw_triangle_interp, map_onto_pixel)
bounds = (-3, 1, -3, 1)
vt = (1., 3., 1., 3.)
def test_draw_line():
x0, y0 = (0, 0)
x1, y1 = (3, 3)
out = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]])
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, True, False, agg)
np.testing.assert_equal(agg, out)
# plot_start = False
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, False, False, agg)
out[0, 0] = 0
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, False, False, agg)
out[0, 0] = 1
out[3, 3] = 0
np.testing.assert_equal(agg, out)
# Flip coords
x0, y0 = (0, 4)
x1, y1 = (3, 1)
out = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0]])
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, True, False, agg)
np.testing.assert_equal(agg, out)
# plot_start = False
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, False, False, agg)
out[4, 0] = 0
np.testing.assert_equal(agg, out)
agg = new_agg()
draw_line(x1, y1, x0, y0, 0, False, False, agg)
out[4, 0] = 1
out[1, 3] = 0
def test_draw_line_same_point():
x0, y0 = (3, 3)
x1, y1 = (3, 3)
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
assert agg.sum() == 2
assert agg[3, 3] == 2
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, False, False, agg)
assert agg.sum() == 1
assert agg[3, 3] == 1
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, True, agg)
assert agg.sum() == 1
assert agg[3, 3] == 1
def test_draw_line_vertical_horizontal():
# Vertical
x0, y0 = (3, 3)
x1, y1 = (3, 0)
agg = new_agg()
draw_line(x0, y0, x1, y1, 0, True, False, agg)
out = new_agg()
out[:4, 3] = 1
np.testing.assert_equal(agg, out)
# Horizontal
agg = new_agg()
draw_line(y0, x0, y1, x1, 0, True, False, agg)
out = new_agg()
out[3, :4] = 1
np.testing.assert_equal(agg, out)
def test_extend_lines():
xs = np.array([0, -2, -2, 0, 0])
ys = np.array([-1, -1, 1.1, 1.1, -1])
out = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 0, 0, 0, 0]])
agg = new_agg()
extend_line(vt, bounds, xs, ys, False, agg)
np.testing.assert_equal(agg, out)
# plot_start = True
out[2, 3] += 1
agg = new_agg()
extend_line(vt, bounds, xs, ys, True, agg)
np.testing.assert_equal(agg, out)
xs = np.array([2, 1, 0, -1, -4, -1, -100, -1, 2])
ys = np.array([-1, -2, -3, -4, -1, 2, 100, 2, -1])
out = np.array([[0, 1, 0, 1, 0],
[1, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[1, 1, 0, 1, 0],
[0, 0, 0, 0, 0]])
agg = new_agg()
extend_line(vt, bounds, xs, ys, True, agg)
np.testing.assert_equal(agg, out)
def test_extend_lines_all_out_of_bounds():
xs = np.array([-100, -200, -100])
ys = np.array([0, 0, 1])
agg = new_agg()
extend_line(vt, bounds, xs, ys, True, agg)
assert agg.sum() == 0
def test_extend_lines_nan():
xs = np.array([-3, -2, np.nan, 0, 1])
ys = np.array([-3, -2, np.nan, 0, 1])
agg = new_agg()
extend_line(vt, bounds, xs, ys, True, agg)
out = np.diag([1, 1, 0, 2, 0])
np.testing.assert_equal(agg, out)
def test_extend_lines_exact_bounds():
xs = np.array([-3, 1, 1, -3, -3])
ys = np.array([-3, -3, 1, 1, -3])
agg = np.zeros((4, 4), dtype='i4')
extend_line(vt, bounds, xs, ys, True, agg)
out = np.array([[2, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]])
np.testing.assert_equal(agg, out)
agg = np.zeros((4, 4), dtype='i4')
extend_line(vt, bounds, xs, ys, False, agg)
out = np.array([[1, 1, 1, 1],
[1, 0, 0, 1],
[1, 0, 0, 1],
[1, 1, 1, 1]])
np.testing.assert_equal(agg, out)
def test_draw_triangle_nointerp():
"""Assert that we draw triangles properly, without interpolation enabled.
"""
# Isosceles triangle
tri = [(2, 0), (0, 2), (4, 2)]
out = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri, (0, 4, 0, 5), (0, 0, 0), agg, 1)
np.testing.assert_equal(agg, out)
# Right triangle
tri = [(2, 0), (0, 2), (2, 2)]
out = np.array([[0, 0, 2, 0, 0],
[0, 2, 2, 0, 0],
[2, 2, 2, 0, 0],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri, (0, 4, 0, 5), (0, 0, 0), agg, 2)
np.testing.assert_equal(agg, out)
# Two right trimesh
tri = [(2, 0), (1, 1), (2, 1),
(2, 1), (2, 2), (3, 2)]
out = np.array([[0, 0, 3, 0, 0],
[0, 3, 6, 0, 0],
[0, 0, 3, 3, 0],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri[:3], (0, 4, 0, 5), (0, 0, 0), agg, 3)
draw_triangle(tri[3:], (0, 4, 0, 5), (0, 0, 0), agg, 3)
np.testing.assert_equal(agg, out)
# Draw isoc triangle with clipping
tri = [(2, 0), (0, 2), (4, 2)]
out = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri, (0, 3, 0, 2), (0, 0, 0), agg, 1)
np.testing.assert_equal(agg, out)
# clip from right and left
out = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri, (1, 3, 0, 2), (0, 0, 0), agg, 1)
np.testing.assert_equal(agg, out)
# clip from right, left, top
out = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri, (1, 3, 1, 2), (0, 0, 0), agg, 1)
np.testing.assert_equal(agg, out)
# clip from right, left, top, bottom
out = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri, (1, 3, 1, 1), (0, 0, 0), agg, 1)
np.testing.assert_equal(agg, out)
def test_draw_triangle_interp():
"""Assert that we draw triangles properly, with interpolation enabled.
"""
# Isosceles triangle
tri = [(2, 0), (0, 2), (4, 2)]
out = np.array([[0, 0, 3, 0, 0],
[0, 3, 3, 3, 0],
[3, 3, 3, 3, 3],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle_interp(tri, (0, 4, 0, 5), (0, 0, 0), agg, (3, 3, 3))
np.testing.assert_equal(agg, out)
tri = [(2, 0), (0, 2), (4, 2)]
out = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 2, 0],
[2, 2, 2, 2, 3],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle_interp(tri, (0, 4, 0, 5), (0, 0, 0), agg, (1, 2, 3))
np.testing.assert_equal(agg, out)
tri = [(2, 0), (0, 2), (4, 2)]
out = np.array([[0, 0, 3, 0, 0],
[0, 4, 5, 6, 0],
[6, 6, 7, 8, 9],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle_interp(tri, (0, 4, 0, 5), (0, 0, 0), agg, (3, 6, 9))
np.testing.assert_equal(agg, out)
tri = [(2, 0), (0, 2), (4, 2)]
out = np.array([[0, 0, 6, 0, 0],
[0, 5, 4, 4, 0],
[4, 3, 3, 2, 2],
[0, 0, 0, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle_interp(tri, (0, 4, 0, 5), (0, 0, 0), agg, (6, 4, 2))
np.testing.assert_equal(agg, out)
def test_draw_triangle_subpixel():
"""Assert that we draw subpixel triangles properly, both with and without
interpolation.
"""
# With interpolation
tri = [(2, 0), (0, 2), (4, 2),
(2, 3), (2, 3), (2, 3),
(2, 3), (2, 3), (2, 3)]
out = np.array([[0, 0, 6, 0, 0],
[0, 5, 4, 4, 0],
[4, 3, 3, 2, 2],
[0, 0, 8, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle_interp(tri[:3], (0, 4, 0, 5), (0, 0, 0), agg, (6, 4, 2))
draw_triangle_interp(tri[3:6], (2, 2, 3, 3), (0, 0, 0), agg, (6, 4, 2))
draw_triangle_interp(tri[6:], (2, 2, 3, 3), (0, 0, 0), agg, (6, 4, 2))
np.testing.assert_equal(agg, out)
# Without interpolation
tri = [(2, 0), (0, 2), (4, 2),
(2, 3), (2, 3), (2, 3),
(2, 3), (2, 3), (2, 3)]
out = np.array([[0, 0, 2, 0, 0],
[0, 2, 2, 2, 0],
[2, 2, 2, 2, 2],
[0, 0, 4, 0, 0]])
agg = np.zeros((4, 5), dtype='i4')
draw_triangle(tri[:3], (0, 4, 0, 5), (0, 0, 0), agg, 2)
draw_triangle(tri[3:6], (2, 2, 3, 3), (0, 0, 0), agg, 2)
draw_triangle(tri[6:], (2, 2, 3, 3), (0, 0, 0), agg, 2)
np.testing.assert_equal(agg, out)
|
{"hexsha": "65b1bc0dbec9567da07df79ad6f2143a14771b94", "size": 11040, "ext": "py", "lang": "Python", "max_stars_repo_path": "datashader/tests/test_glyphs.py", "max_stars_repo_name": "philippjfr/datashader", "max_stars_repo_head_hexsha": "eb9218cb810297aea2ae1030349cef6a6f3ab3cb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-31T22:13:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-31T22:13:14.000Z", "max_issues_repo_path": "datashader/tests/test_glyphs.py", "max_issues_repo_name": "philippjfr/datashader", "max_issues_repo_head_hexsha": "eb9218cb810297aea2ae1030349cef6a6f3ab3cb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datashader/tests/test_glyphs.py", "max_forks_repo_name": "philippjfr/datashader", "max_forks_repo_head_hexsha": "eb9218cb810297aea2ae1030349cef6a6f3ab3cb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-13T16:10:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-13T16:10:22.000Z", "avg_line_length": 32.2807017544, "max_line_length": 95, "alphanum_fraction": 0.4640398551, "include": true, "reason": "import numpy", "num_tokens": 4502}
|
import random
import torch
import numpy as np
import math
from torchvision import transforms as T
from torchvision.transforms import functional as F
from PIL import Image, ImageFilter
"""
Pair transforms are MODs of regular transforms so that it takes in multiple images
and apply exact transforms on all images. This is especially useful when we want the
transforms on a pair of images.
Example:
img1, img2, ..., imgN = transforms(img1, img2, ..., imgN)
"""
class PairCompose(T.Compose):
def __call__(self, *x):
for transform in self.transforms:
x = transform(*x)
return x
class PairApply:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *x):
return [self.transforms(xi) for xi in x]
class PairApplyOnlyAtIndices:
def __init__(self, indices, transforms):
self.indices = indices
self.transforms = transforms
def __call__(self, *x):
return [self.transforms(xi) if i in self.indices else xi for i, xi in enumerate(x)]
class PairRandomAffine(T.RandomAffine):
def __init__(self, degrees, translate=None, scale=None, shear=None, resamples=None, fillcolor=0):
super().__init__(degrees, translate, scale, shear, Image.NEAREST, fillcolor)
self.resamples = resamples
def __call__(self, *x):
if not len(x):
return []
param = self.get_params(self.degrees, self.translate, self.scale, self.shear, x[0].size)
resamples = self.resamples or [self.resample] * len(x)
return [F.affine(xi, *param, resamples[i], self.fillcolor) for i, xi in enumerate(x)]
class PairRandomResizedCrop(T.RandomResizedCrop):
def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolations=None):
super().__init__(size, scale, ratio, Image.BILINEAR)
self.interpolations = interpolations
def __call__(self, *x):
if not len(x):
return []
i, j, h, w = self.get_params(x[0], self.scale, self.ratio)
interpolations = self.interpolations or [self.interpolation] * len(x)
return [F.resized_crop(xi, i, j, h, w, self.size, interpolations[i]) for i, xi in enumerate(x)]
class PairRandomHorizontalFlip(T.RandomHorizontalFlip):
def __call__(self, *x):
if torch.rand(1) < self.p:
x = [F.hflip(xi) for xi in x]
return x
class RandomBoxBlur:
def __init__(self, prob, max_radius):
self.prob = prob
self.max_radius = max_radius
def __call__(self, img):
if torch.rand(1) < self.prob:
fil = ImageFilter.BoxBlur(random.choice(range(self.max_radius + 1)))
img = img.filter(fil)
return img
class PairRandomBoxBlur(RandomBoxBlur):
def __call__(self, *x):
if torch.rand(1) < self.prob:
fil = ImageFilter.BoxBlur(random.choice(range(self.max_radius + 1)))
x = [xi.filter(fil) for xi in x]
return x
class RandomSharpen:
def __init__(self, prob):
self.prob = prob
self.filter = ImageFilter.SHARPEN
def __call__(self, img):
if torch.rand(1) < self.prob:
img = img.filter(self.filter)
return img
class PairRandomSharpen(RandomSharpen):
def __call__(self, *x):
if torch.rand(1) < self.prob:
x = [xi.filter(self.filter) for xi in x]
return x
class PairRandomAffineAndResize:
def __init__(self, size, degrees, translate, scale, shear, ratio=(3./4., 4./3.), resample=Image.BILINEAR, fillcolor=0):
self.size = size
self.degrees = degrees
self.translate = translate
self.scale = scale
self.shear = shear
self.ratio = ratio
self.resample = resample
self.fillcolor = fillcolor
def __call__(self, *x):
if not len(x):
return []
w, h = x[0].size
scale_factor = max(self.size[1] / w, self.size[0] / h)
w_padded = max(w, self.size[1])
h_padded = max(h, self.size[0])
pad_h = int(math.ceil((h_padded - h) / 2))
pad_w = int(math.ceil((w_padded - w) / 2))
scale = self.scale[0] * scale_factor, self.scale[1] * scale_factor
translate = self.translate[0] * scale_factor, self.translate[1] * scale_factor
affine_params = T.RandomAffine.get_params(self.degrees, translate, scale, self.shear, (w, h))
def transform(img):
if pad_h > 0 or pad_w > 0:
img = F.pad(img, (pad_w, pad_h))
img = F.affine(img, *affine_params, self.resample, self.fillcolor)
img = F.center_crop(img, self.size)
return img
return [transform(xi) for xi in x]
class RandomAffineAndResize(PairRandomAffineAndResize):
def __call__(self, img):
return super().__call__(img)[0]
|
{"hexsha": "290ca21373e9165dfd95687e193dafdcd9c5fcb5", "size": 4970, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/augmentation.py", "max_stars_repo_name": "kasperschnack/BackgroundMattingV2", "max_stars_repo_head_hexsha": "65e8b0e0cae8c833b093390939a5210ccd1e7aa4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-01-14T11:22:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T09:24:52.000Z", "max_issues_repo_path": "dataset/augmentation.py", "max_issues_repo_name": "kasperschnack/BackgroundMattingV2", "max_issues_repo_head_hexsha": "65e8b0e0cae8c833b093390939a5210ccd1e7aa4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-12-29T21:24:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-31T21:28:19.000Z", "max_forks_repo_path": "dataset/augmentation.py", "max_forks_repo_name": "jinzishuai/BackgroundMattingV2", "max_forks_repo_head_hexsha": "29bcd887a542bc829b8d1e137d37a96c2c4df3bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-29T00:45:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-19T14:04:02.000Z", "avg_line_length": 32.2727272727, "max_line_length": 123, "alphanum_fraction": 0.6132796781, "include": true, "reason": "import numpy", "num_tokens": 1275}
|
C Copyright restrictions apply - see stsdas$copyright.stsdas
C
SUBROUTINE YCLNEWCOL(ISTAT)
*
* Module number:
*
* Module name: YCLNEWCOL
*
* Keyphrase:
* ----------
* calculate a new column based on the coef's; write to output
*
* Description:
* ------------
* This routine opens the input file, calculates the new column
* based on the coeffs; writes the results to the file.
*
* FORTRAN name: yclnewcol.for
*
* Keywords of accessed files and tables:
* --------------------------------------
*
*
* Subroutines Called:
* -------------------
*
*
* History:
* --------
* Version Date Author Description
* 1 Nov 01 A. Alexov Copy of yclopnfit.f w/some changes
*-------------------------------------------------------------------------------
* inputs:
*
* outputs:
* ISTAT - error status
*
INTEGER ISTAT
C
C UMSPUT DESTINATIONS -- CB, DAO, 4-SEP-87
C
INTEGER STDOUT
PARAMETER (STDOUT = 1)
INTEGER STDERR
PARAMETER (STDERR = 2)
CHARACTER*80 CONTXT
c
C Common block containing confiquration parameters
C
CHARACTER*64 INPUT,FITTAB,OUTPUT
INTEGER ITERATION, PREV_ITERATION, PRINT
REAL*4 RELAX, CHISQ
LOGICAL CONT_FIT,USE_PARAMS
CHARACTER*4 DEP_VAR
CHARACTER*18 DEP_COL,IDEP_COL,SEL_COL,NEW_COL
REAL*8 A,B,C,D,E,O,P,Q,R,S
CHARACTER*5 A_MOD, B_MOD, C_MOD, D_MOD, E_MOD
CHARACTER*5 O_MOD, P_MOD, Q_MOD, R_MOD, S_MOD
COMMON /CONFG1/RELAX,INPUT,FITTAB,OUTPUT,ITERATION,PRINT,
* PREV_ITERATION, CHISQ, CONT_FIT,
* USE_PARAMS,DEP_VAR,DEP_COL,IDEP_COL,SEL_COL,NEW_COL
COMMON /CONFG2/A, B, C, D, E, O, P, Q, R, S
COMMON /CONFG3/A_MOD, B_MOD, C_MOD, D_MOD,
* E_MOD, O_MOD, P_MOD, Q_MOD, R_MOD, S_MOD
LOGICAL MOD_VAL(10)
REAL*8 INIT_GUESS(10), ACTUAL_VAL(10), ERROR_VAL(10)
COMMON /FIT_DATA/INIT_GUESS, ACTUAL_VAL, ERROR_VAL, MOD_VAL
REAL*4 X_ARRAY(4096)
REAL*8 WAVE_ARRAY(4096)
REAL*4 CALCX_ARRAY(4096)
REAL*8 CALCW_ARRAY(4096)
COMMON /XWAVE_DATA/X_ARRAY, WAVE_ARRAY, NUM_IN_PNTS
COMMON /CALC_DATA/CALCX_ARRAY, CALCW_ARRAY
C
INTEGER NN
LOGICAL NULL
REAL*8 VAL_DOUB, YOUT
REAL*4 VAL_REAL
C
C FILE I/O ACCESS MODES
C
INTEGER RDWRIT
PARAMETER (RDWRIT = 2)
INTEGER TBNROW,CTYPE(3)
PARAMETER (TBNROW = 21)
INTEGER IDIN,COLIDS(3),NROWS
CHARACTER*19 CNAM(3),CUNIT(3)
CHARACTER*7 CFORMAT(3)
INTEGER TYDOUB, TYREAL
PARAMETER (TYDOUB = 7)
PARAMETER (TYREAL = 6)
LOGICAL EXISTS
INCLUDE 'fiti.inc'
DOUBLE PRECISION FZDERIV(FZPARMAX)
INCLUDE 'fitc.inc'
C---------------------------------------------------------------------------
IF(DEP_VAR.EQ.'X') THEN ! dep=x (fit_wave2x)
CNAM(1)=IDEP_COL ! read the wavelength column to calc new X
CNAM(3)=DEP_COL
ELSE ! dep=wave (fit_x2wave)
CNAM(1)=IDEP_COL ! read the x column to calc new WAVELENGTH
CNAM(3)=DEP_COL
ENDIF
CNAM(2)=NEW_COL
CUNIT(2)=' '
CFORMAT(2)=' '
c CTYPE(2)=TYDOUB
C Check to make sure the output file exists
CALL UTTACC(INPUT,EXISTS,ISTAT)
IF (EXISTS.EQ.FALSE.OR.ISTAT.NE.0) THEN
CONTXT='ERROR input table does not exist: '//INPUT
GO TO 998
ENDIF
C
C Open table
C
CALL UTTOPN(INPUT,RDWRIT,IDIN,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR opening input table '//INPUT
GO TO 998
ENDIF
C get number of rows
C
CALL UTPGTI(IDIN,TBNROW,NROWS,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR reading input table '//INPUT
GO TO 999
ENDIF
IF(NROWS.GT.4096)THEN
CONTXT='ERROR input tab exceeds max num of rows (4096)'
GO TO 999
ENDIF
C
C Get column ids.
CALL UTCFND(IDIN,CNAM(1),1,COLIDS(1),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR locating needed column in input table '//
* INPUT
GO TO 999
ENDIF
CALL UTCFND(IDIN,CNAM(3),1,COLIDS(3),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR locating needed column in input table '//
* INPUT
GO TO 999
ENDIF
C
C Find out the datatype of the (3) column, in order to create the new
C column with the same type
C
CALL UTCINF(IDIN,COLIDS(3),CNAM(3),CUNIT(3),CFORMAT(3),
* CTYPE(3),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR getting column info in input table '//
* INPUT
GO TO 999
ENDIF
CTYPE(2)=CTYPE(3) ! set the new column type in case need it
C
C Check if the new column exists, if not, then create it (using the
C same data type as the cname(3) column)
C
CALL UTCFND(IDIN,CNAM(2),1,COLIDS(2),ISTAT)
! if the column is not found, then create it
IF(ISTAT.NE.0)THEN
CALL UTCDEF(IDIN,CNAM(2),CUNIT(2),CFORMAT(2),CTYPE(2),1,
* COLIDS(2),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR creating needed column in input table '//
* INPUT
GO TO 999
ENDIF
ELSE ! column has been found in the input file
CONTXT='WARNING overwriting previous values in new column '//
* NEW_COL
CALL YMSPUT(CONTXT,STDOUT+STDERR,0,ISTAT)
CALL UTCINF(IDIN,COLIDS(2),CNAM(2),CUNIT(2),CFORMAT(2),
* CTYPE(2),ISTAT) ! get the ctype(2) information
IF(ISTAT.NE.0)THEN
CONTXT='ERROR getting column info in input table '//
* INPUT
GO TO 999
ENDIF
ENDIF
C
C check to make sure the data types are correct
CALL UTCINF(IDIN,COLIDS(1),CNAM(1),CUNIT(1),CFORMAT(1),
* CTYPE(1),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR getting column info in input table '//
* INPUT
GO TO 999
ENDIF
CALL UTCINF(IDIN,COLIDS(2),CNAM(2),CUNIT(2),CFORMAT(2),
* CTYPE(2),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR getting column info in input table '//
* INPUT
GO TO 999
ENDIF
IF ((CTYPE(1).NE.TYDOUB).AND.(CTYPE(1).NE.TYREAL)) THEN
CONTXT='ERROR col type is neither double nor real in '//
* INPUT
GO TO 999
ENDIF
IF ((CTYPE(2).NE.TYDOUB).AND.(CTYPE(2).NE.TYREAL)) THEN
CONTXT='ERROR col type is neither double nor real in '//
* INPUT
GO TO 999
ENDIF
C get the data
DO 2213 NN = 1, NROWS
IF(CTYPE(1).EQ.TYREAL) THEN
CALL UTRGTR(IDIN,COLIDS(1),1,NN,VAL_REAL,NULL,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR reading input table '//INPUT
GO TO 999
ENDIF
IF (NULL) THEN ! don't calculate or write to output table
ccc WRITE (CONTXT,9001) 'ROW #', NN, ', IDEP=INDEF',
ccc * ', YOUT=INDEF'
ccc CALL YMSPUT(CONTXT,STDOUT+STDERR,0,ISTAT)
ELSE
IF (DEP_VAR.EQ.'X') THEN ! wave input, x output
CALL FIT_WAVE2X(1,VAL_REAL,10,FZVALUE(1),
* YOUT,FZDERIV(1))
CALCX_ARRAY(NN)=REAL(YOUT)
ccc WRITE (CONTXT,9000) 'ROW #', NN, ', WAVE IN=',
ccc * VAL(NN), ', X CALC=', YOUT
IF (CTYPE(2).EQ.TYREAL) THEN
CALL UTRPTR(IDIN,COLIDS(2),1,NN,
* REAL(YOUT),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ELSE
CALL UTRPTD(IDIN,COLIDS(2),1,NN,
* YOUT,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ENDIF
ELSE ! x input, wave output
CALL FIT_X2WAVE(1,VAL_REAL,10,FZVALUE(1),
* YOUT,FZDERIV(1))
ccc WRITE (CONTXT,9000) 'ROW #', NN, ', X IN=',
ccc * VAL(NN), ', WAVE CALC=', YOUT
CALCW_ARRAY(NN)=YOUT
IF (CTYPE(2).EQ.TYREAL) THEN
CALL UTRPTR(IDIN,COLIDS(2),1,NN,
* REAL(YOUT),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ELSE
CALL UTRPTD(IDIN,COLIDS(2),1,NN,
* YOUT,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ENDIF
ENDIF
ccc CALL YMSPUT(CONTXT,STDOUT+STDERR,0,ISTAT)
ENDIF
ELSE ! when data type is double
CALL UTRGTD(IDIN,COLIDS(1),1,NN,VAL_DOUB,NULL,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR reading input table '//INPUT
GO TO 999
ENDIF
IF (NULL) THEN ! don't calculate or write to output table
ccc WRITE (CONTXT,9001) 'ROW #', NN, ', IDEP=INDEF',
ccc * ', YOUT=INDEF'
ccc CALL YMSPUT(CONTXT,STDOUT+STDERR,0,ISTAT)
ELSE
VAL_REAL=REAL(VAL_DOUB)
IF (DEP_VAR.EQ.'X') THEN ! wave input, x output
CALL FIT_WAVE2X(1,VAL_REAL,10,FZVALUE(1),
* YOUT,FZDERIV(1))
CALCX_ARRAY(NN)=REAL(YOUT)
ccc WRITE (CONTXT,9000) 'ROW #', NN, ', WAVE IN=',
ccc * VAL(NN), ', X CALC=', YOUT
IF (CTYPE(2).EQ.TYREAL) THEN
CALL UTRPTR(IDIN,COLIDS(2),1,NN,
* REAL(YOUT),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ELSE
CALL UTRPTD(IDIN,COLIDS(2),1,NN,
* YOUT,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ENDIF
ELSE ! x input, wave output
CALL FIT_X2WAVE(1,VAL_REAL,10,FZVALUE(1),
* YOUT,FZDERIV(1))
CALCW_ARRAY(NN)=YOUT
ccc WRITE (CONTXT,9000) 'ROW #', NN, ', X IN=',
ccc * VAL(NN), ', WAVE CALC=', YOUT
IF (CTYPE(2).EQ.TYREAL) THEN
CALL UTRPTR(IDIN,COLIDS(2),1,NN,
* REAL(YOUT),ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ELSE
CALL UTRPTD(IDIN,COLIDS(2),1,NN,
* YOUT,ISTAT)
IF(ISTAT.NE.0)THEN
CONTXT='ERROR writing to input table '
* //INPUT
GO TO 999
ENDIF
ENDIF
ENDIF
ccc CALL YMSPUT(CONTXT,STDOUT+STDERR,0,ISTAT)
ENDIF
ENDIF
ccc 9000 FORMAT(A,I,A,D17.8,A,D17.8)
ccc 9001 FORMAT(A,I,A,A)
2213 CONTINUE
C close the table
CALL UTTCLO(IDIN,ISTAT)
ISTAT=0
GO TO 1000
C
999 CALL UTTCLO(IDIN,ISTAT)
998 CALL YMSPUT(CONTXT,STDOUT+STDERR,0,ISTAT)
ISTAT=1
1000 RETURN
END
|
{"hexsha": "fce876805c07225495ea1dbf956ba7c6760e8d5c", "size": 13496, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "stsdas/pkg/hst_calib/stpoa/poa_fos/fos_dispfit/yclnewcol.f", "max_stars_repo_name": "iraf-community/stsdas", "max_stars_repo_head_hexsha": "043c173fd5497c18c2b1bfe8bcff65180bca3996", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-20T10:06:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-20T10:06:48.000Z", "max_issues_repo_path": "stsdas/pkg/hst_calib/stpoa/poa_fos/fos_dispfit/yclnewcol.f", "max_issues_repo_name": "spacetelescope/stsdas_stripped", "max_issues_repo_head_hexsha": "043c173fd5497c18c2b1bfe8bcff65180bca3996", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stsdas/pkg/hst_calib/stpoa/poa_fos/fos_dispfit/yclnewcol.f", "max_forks_repo_name": "spacetelescope/stsdas_stripped", "max_forks_repo_head_hexsha": "043c173fd5497c18c2b1bfe8bcff65180bca3996", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-12T20:01:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-19T08:04:30.000Z", "avg_line_length": 36.6739130435, "max_line_length": 80, "alphanum_fraction": 0.4467249555, "num_tokens": 3545}
|
import numpy as np
import matplotlib.pyplot as plt
from eulerspiral import eulerspiral
hdg = 0 * np.pi / 180
x0 = 0
y0 = 0
fig, axs = plt.subplots(1, 2)
for ax, length in zip(axs, [5, 10]):
s = np.linspace(0, length, 20)
for curvStart in [-0.5, -0.1, 0.0, 0.1, 0.5]:
for curvEnd in [-0.5, -0.1, 0.0, 0.1, 0.5]:
spiral = eulerspiral.EulerSpiral.createFromLengthAndCurvature(length, curvStart, curvEnd)
(x, y, t) = spiral.calc(s, x0, y0, curvStart, hdg)
ax.plot(x, y, marker="x", linewidth=0.5)
ax.grid(True)
ax.set_xlim(-5, 11)
ax.set_ylim(-10, 10)
ax.set_aspect('equal', 'datalim')
ax.set_title('length = {}'.format(length))
plt.show()
|
{"hexsha": "8e7e827e024627428155d81b12503eb41da3bde6", "size": 758, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "stefan-urban/pyeulerspiral", "max_stars_repo_head_hexsha": "f7485b3575274a246872c46131846ae9882db7ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-04-24T09:57:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T08:45:35.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "stefan-urban/pyeulerspiral", "max_issues_repo_head_hexsha": "f7485b3575274a246872c46131846ae9882db7ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-07T08:50:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-01T21:19:40.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "stefan-urban/pyeulerspiral", "max_forks_repo_head_hexsha": "f7485b3575274a246872c46131846ae9882db7ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-03-19T09:00:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T07:44:38.000Z", "avg_line_length": 24.4516129032, "max_line_length": 101, "alphanum_fraction": 0.5659630607, "include": true, "reason": "import numpy", "num_tokens": 268}
|
(* Copyright 2021 Joshua M. Cohen
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*)
(*We define polynomials as lists of field elements so that we can compute with them, unlike mathcomp's. However,
to use the theorems in mathcomp, we relate lpolys to polys*)
From mathcomp Require Import all_ssreflect.
Require Import mathcomp.algebra.ssralg.
Require Import mathcomp.algebra.poly.
Require Import mathcomp.algebra.polydiv.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Set Bullet Behavior "Strict Subproofs".
Require Import CommonSSR.
(*Stuff from helper, mathcomp versions*)
Section LPoly.
Local Open Scope ring_scope.
Variable F : fieldType.
Definition lpoly := polynomial F.
(*Transform an arbitrary list into a valid lpoly*)
Lemma lpoly_Poly_eq: forall (p q : lpoly),
Poly p = Poly q -> p = q.
Proof.
move => p q. by rewrite !polyseqK.
Qed.
Lemma lpolyP: forall (p q : lpoly), nth 0 p =1 nth 0 q <-> p = q.
Proof.
move => p q. apply polyP.
Qed.
Definition seq_to_lpoly (s: seq F) : lpoly :=
Polynomial (rem_trail_zero_wf s).
Definition lpoly_to_seq (l: lpoly) : seq F := l.
(*We want a computable Euclidean division algorithm, so we need computable polynomial operations. We start with
addition*)
Section Add.
(*Can't define with [zip] and [map] because we need to handle case when 1 poly has ended with implicit zeroes.
It is inefficient to add zeroes to the end of the list before summing*)
Fixpoint lpoly_add_aux (s1 s2: seq F) : seq F :=
match (s1, s2) with
| (x1 :: t1, x2 :: t2) => (x1 + x2) :: lpoly_add_aux t1 t2
| (_, _ :: _) => s2
| (_, _) => s1
end.
Definition lpoly_add (l1 l2: lpoly) : lpoly :=
seq_to_lpoly (lpoly_add_aux l1 l2).
Lemma lpoly_add_aux_nth: forall s1 s2 i,
(lpoly_add_aux s1 s2)`_i = s1`_i + s2`_i.
Proof.
move => s1. elim : s1 => [/= s2 i | h t /= IH s2 i].
- case : s2 => [//=| h t /=].
+ by rewrite nth_nil GRing.addr0.
+ by rewrite nth_nil GRing.add0r.
- case : s2 => [//= | h1 t1 /=].
+ by rewrite nth_nil GRing.addr0.
+ case : i => [//= | i /=]. apply IH.
Qed.
Lemma lpoly_add_nth: forall l1 l2 i,
(lpoly_add l1 l2)`_i = l1`_i + l2`_i.
Proof.
move => l1 l2 i. by rewrite /lpoly_add -rem_trail_zero_nth lpoly_add_aux_nth.
Qed.
Lemma lpoly_add_spec: forall l1 l2,
Poly (lpoly_add l1 l2) = Poly l1 + Poly l2.
Proof.
move => l1 l2. rewrite -polyP => i.
by rewrite coef_add_poly !polyseqK lpoly_add_nth.
Qed.
End Add.
(*In Euclidean division, we only need to multiply a polynomial p by kx^n for some scalar k. We can do this
more efficiently than general multiplication by just using a single append and map*)
Section Shift.
(*Scalar multiply*)
Definition lpoly_sc_mul_aux (s: seq F) (k: F) : seq F :=
map (fun x => k * x) s.
Lemma lpoly_sc_mul_aux_nth: forall s k i,
(lpoly_sc_mul_aux s k)`_i = k * s`_i.
Proof.
move => s k i. case Hi : (i < size s).
- by rewrite (nth_map 0).
- rewrite !nth_default //. by rewrite GRing.mulr0. by rewrite leqNgt Hi.
by rewrite size_map leqNgt Hi.
Qed.
Lemma lpoly_sc_mul_aux_wf: forall (l: lpoly) k,
k != 0 ->
last 1 (lpoly_sc_mul_aux l k) != 0.
Proof.
move => l k Hk0. rewrite /lpoly_sc_mul_aux.
have->: 1 = k * k^-1 by rewrite GRing.mulfV. rewrite last_map.
case : l => [s Hlast]. rewrite /= GRing.mulf_neq0 //.
move : Hlast. case : s => [/= H10|//]. by apply GRing.invr_neq0.
Qed.
Definition lpoly_sc_mul_aux_full (s: seq F) (k: F) : seq F :=
if k == 0 then nil else lpoly_sc_mul_aux s k.
Lemma lpoly_sc_mul_aux_full_nth: forall s k i,
(lpoly_sc_mul_aux_full s k)`_i = k * s`_i.
Proof.
move => s k i. rewrite /lpoly_sc_mul_aux_full. case Hk : (k == 0) => [/= | /=].
- apply (elimT eqP) in Hk. subst. by rewrite nth_nil GRing.mul0r.
- apply lpoly_sc_mul_aux_nth.
Qed.
Lemma lpoly_sc_mul_aux_full_wf: forall (l: lpoly) k,
last 1 (lpoly_sc_mul_aux_full l k) != 0.
Proof.
move => l k. rewrite /lpoly_sc_mul_aux_full. case Hk : (k == 0) => [/= | /=].
- apply GRing.oner_neq0.
- apply lpoly_sc_mul_aux_wf. by rewrite Hk.
Qed.
Definition lpoly_sc_mul (l: lpoly) k : lpoly :=
Polynomial (lpoly_sc_mul_aux_full_wf l k).
Lemma lpoly_sc_mul_spec: forall (l: lpoly) k,
Poly (lpoly_sc_mul l k) = k%:P * (Poly l).
Proof.
move => l k. rewrite /= -polyP => i.
rewrite !polyseqK /=. rewrite (@PolyK _ 1). 2: apply lpoly_sc_mul_aux_full_wf.
by rewrite coefCM lpoly_sc_mul_aux_full_nth.
Qed.
Lemma lpoly_sc_mul_1: forall (l: lpoly),
lpoly_sc_mul l 1 = l.
Proof.
move => l. apply lpoly_Poly_eq. by rewrite lpoly_sc_mul_spec GRing.mul1r.
Qed.
(*Now similarly, multiply by x^n*)
Definition lpoly_shift_aux (s: seq F) (n: nat) :=
nseq n 0 ++ s.
Lemma lpoly_shift_aux_nth: forall s n i,
(lpoly_shift_aux s n)`_i = if i < n then 0 else s`_(i - n).
Proof.
move => s n i. rewrite /lpoly_shift_aux nth_cat size_nseq nth_nseq.
by case : (i < n).
Qed.
Lemma lpoly_shift_aux_wf: forall (l: lpoly) n,
~~ nilp l ->
last 1 (lpoly_shift_aux l n) != 0.
Proof.
move => l n. rewrite /lpoly_shift_aux last_cat. case : l => [s Hlast].
rewrite /=. move: Hlast. by case : s.
Qed.
Definition lpoly_shift_aux_full (s: seq F) (n: nat) :=
if nilp s then nil else (lpoly_shift_aux s n).
Lemma lpoly_shift_aux_full_nth: forall s n i,
(lpoly_shift_aux_full s n)`_i = if i < n then 0 else s`_(i - n).
Proof.
move => s n i. rewrite /lpoly_shift_aux_full. case : s => [/= | h t /=].
- rewrite !nth_nil. by case : (i < n).
- apply lpoly_shift_aux_nth.
Qed.
Lemma lpoly_shift_aux_full_wf: forall (l: lpoly) n,
last 1 (lpoly_shift_aux_full l n) != 0.
Proof.
move => l n. rewrite /lpoly_shift_aux_full.
case Hs: (nilp l) => [//= | //=].
- apply GRing.oner_neq0.
- apply lpoly_shift_aux_wf. by rewrite Hs.
Qed.
Definition lpoly_shift (l: lpoly) (n: nat) : lpoly :=
Polynomial (lpoly_shift_aux_full_wf l n).
Lemma lpoly_shift_spec: forall l n,
Poly (lpoly_shift l n) = 'X^n * Poly l.
Proof.
move => l n. rewrite -polyP => i. rewrite /= coefXnM polyseqK (@PolyK _ 1).
by rewrite lpoly_shift_aux_full_nth. apply lpoly_shift_aux_full_wf.
Qed.
(*For our purposes, we would like to multiply by kx^n. We can make this more efficient by only mapping
over the extra part*)
Definition lpoly_sc_mul_shift_aux (s: seq F) (k: F) (n: nat) :=
nseq n 0 ++ map (fun x => k * x) s.
Lemma lpoly_sc_mul_shift_aux_equiv: forall s k n,
lpoly_sc_mul_shift_aux s k n = lpoly_sc_mul_aux (lpoly_shift_aux s n) k.
Proof.
move => s k n. rewrite /lpoly_sc_mul_shift_aux /lpoly_sc_mul_aux /lpoly_shift_aux.
rewrite map_cat. f_equal. apply (@eq_from_nth _ 0). by rewrite size_map.
move => i. rewrite size_nseq => Hi. rewrite (nth_map 0). by rewrite !nth_nseq Hi GRing.mulr0.
by rewrite size_nseq.
Qed.
Definition lpoly_sc_mul_shift_aux_full s k n :=
if (nilp s) || (k == 0) then nil else lpoly_sc_mul_shift_aux s k n.
Lemma lpoly_sc_mul_shift_aux_full_wf: forall (l: lpoly) k n,
last 1 (lpoly_sc_mul_shift_aux_full l k n) != 0.
Proof.
move => l k n. rewrite /lpoly_sc_mul_shift_aux_full.
case Hl: (nilp l) => [/= | /=].
- apply GRing.oner_neq0.
- case Hk: (k == 0) => [/= | /=].
+ apply GRing.oner_neq0.
+ rewrite lpoly_sc_mul_shift_aux_equiv.
have: last 1 (lpoly_sc_mul_aux (lpoly_shift l n) k) != 0.
apply lpoly_sc_mul_aux_wf. by rewrite Hk. by rewrite /= /lpoly_shift_aux_full Hl.
Qed.
Definition lpoly_sc_mul_shift (l: lpoly) k n :=
Polynomial (lpoly_sc_mul_shift_aux_full_wf l k n).
Lemma lpoly_sc_mul_shift_spec: forall (l: lpoly) k n,
Poly (lpoly_sc_mul_shift l k n) = k%:P * ('X^n * Poly l).
Proof.
move => l k n; rewrite /= /lpoly_sc_mul_shift_aux_full.
case Hl: (nilp l) => [/= | /=].
- apply (elimT nilP) in Hl. rewrite Hl /=. by rewrite !GRing.mulr0.
- case Hk: (k == 0).
+ apply (elimT eqP) in Hk. subst. by rewrite GRing.mul0r.
+ rewrite lpoly_sc_mul_shift_aux_equiv.
have->: Poly (lpoly_sc_mul_aux (lpoly_shift_aux l n) k) =
k %:P * Poly (lpoly_shift_aux l n). {
have Hnil: ~~(nilp l) by rewrite Hl.
pose proof lpoly_sc_mul_spec as Hmul; move : Hmul => /(_ (Polynomial (lpoly_shift_aux_wf n Hnil)) k) /=.
by rewrite /lpoly_sc_mul_aux_full Hk. }
f_equal. pose proof lpoly_shift_spec as Hspec; move: Hspec. by rewrite /lpoly_shift /= /lpoly_shift_aux_full =>
/( _ l n); rewrite Hl.
Qed.
Lemma lpoly_sc_mul_shift_1: forall (l: lpoly) n,
lpoly_sc_mul_shift l 1 n = lpoly_shift l n.
Proof.
move => l n. apply lpoly_Poly_eq. by rewrite lpoly_sc_mul_shift_spec lpoly_shift_spec GRing.mul1r.
Qed.
End Shift.
Section Monomial.
Definition lpoly_mono_aux (n: nat) : seq F := rcons (nseq n 0) 1.
Lemma lpoly_mono_aux_wf: forall n,
last 1 (lpoly_mono_aux n) != 0.
Proof.
move => n. rewrite /lpoly_mono_aux. rewrite last_rcons. apply GRing.oner_neq0.
Qed.
Definition lpoly_mono (n: nat) : lpoly := Polynomial (lpoly_mono_aux_wf n).
Lemma lpoly_mono_spec: forall n,
Poly (lpoly_mono n) = 'X^n.
Proof.
move => n. by rewrite /= /lpoly_mono_aux /= -polyseqXn polyseqK.
Qed.
End Monomial.
(*Euclidean Division*)
Section Div.
(*Lots of definitions to unfold*)
Definition lpoly_redivp_rec (l: lpoly) :=
let sq := size l in
let cq := last 0 l in
fix loop (k: nat) (qq r : lpoly) (n: nat) {struct n} : nat * lpoly * lpoly :=
if size r < sq then (k, qq, r)
else
let lc := last 0 r in
let qq1 := lpoly_add (lpoly_sc_mul qq cq) (lpoly_sc_mul (lpoly_mono (size r - sq)) lc) in
let r1 := lpoly_add (lpoly_sc_mul r cq) (lpoly_sc_mul_shift l (- lc) (size r - sq)) in
match n with
| 0 => (k.+1, qq1, r1)
| n1.+1 => loop k.+1 qq1 r1 n1
end.
Definition tuple_to_poly (x: (nat * lpoly * lpoly)) : nat * {poly F} * {poly F} :=
match x with
| (n, p1, p2) => (n, Poly p1, Poly p2)
end.
Lemma size_Poly_lpoly: forall (l: lpoly),
size (Poly l) = size l.
Proof.
move => l. f_equal. by rewrite polyseqK.
Qed.
Lemma lead_coef_Poly: forall (l: lpoly),
(lead_coef (Poly l)) = last 0 l.
Proof.
move => l. by rewrite /lead_coef nth_last /= polyseqK.
Qed.
Lemma lpoly_redivp_rec_spec: forall l k qq r n,
tuple_to_poly (lpoly_redivp_rec l k qq r n) = Pdiv.Ring.redivp_rec (Poly l) k (Poly qq) (Poly r) n.
Proof.
move => l k qq r n. move: l k qq r. elim : n => [/= l k qq r | n /= IH l k qq r].
- rewrite !size_Poly_lpoly. case Hsz: (size r < size l).
+ by [].
+ rewrite /tuple_to_poly !lpoly_add_spec !lpoly_sc_mul_spec !lead_coef_Poly !lpoly_mono_spec
lpoly_sc_mul_shift_spec /=. f_equal.
* f_equal. rewrite GRing.mulrC. f_equal. by rewrite mul_polyC.
* rewrite GRing.mulrC. f_equal. by rewrite -!mul_polyC polyCN !GRing.mulrA !GRing.mulNr.
- rewrite !size_Poly_lpoly. case Hsz: (size r < size l).
+ by [].
+ rewrite !lead_coef_Poly IH. f_equal.
* by rewrite !lpoly_add_spec !lpoly_sc_mul_spec !lpoly_mono_spec -!mul_polyC GRing.mulrC.
* rewrite !lpoly_add_spec !lpoly_sc_mul_spec !lpoly_sc_mul_shift_spec -!mul_polyC GRing.mulrC. f_equal.
by rewrite polyCN GRing.mulNr GRing.mulrA.
Qed.
Lemma zero_nil : 0%R = seq_to_lpoly nil.
Proof.
apply /eqP. rewrite eq_sym.
by rewrite -nil_poly /=.
Qed.
Definition lpoly_redivp (p q: lpoly) : nat * lpoly * lpoly :=
if nilp q then (0%N, 0, p) else lpoly_redivp_rec q 0 (seq_to_lpoly nil) p (size p).
Lemma lpoly_zero: forall (l: lpoly),
(Poly l == 0) = nilp l.
Proof.
move => l. by rewrite nil_poly polyseqK.
Qed.
Lemma lpoly_redivp_spec: forall p q,
tuple_to_poly (lpoly_redivp p q) = Pdiv.Ring.redivp (Poly p) (Poly q).
Proof.
move => p q. rewrite /lpoly_redivp locked_withE /Pdiv.Ring.redivp_expanded_def lpoly_zero.
case Hq: (nilp q).
- move: Hq; case : (polyseq q) => [//= Htriv | //=]. rewrite /=. f_equal. f_equal. by rewrite polyseqK.
- move : Hq; case : q => [l Hl]. move: Hl; case : l => [// | h t Hl Hq].
have->: polyseq (Polynomial Hl) = h :: t by [].
rewrite lpoly_redivp_rec_spec. f_equal. by rewrite polyseqK.
Qed.
(*For computability reasons, we don't want to use "==". Luckily we are in a field, so testing
for a unit is easily computable*)
Lemma f_eq_dec : forall (x y : F), { x = y } + { x <> y}.
Proof.
move => x y. apply (decP eqP).
Defined.
Definition lpoly_edivp (p q: lpoly) : nat * lpoly * lpoly :=
let '(k, d, r) := lpoly_redivp p q in
let lc := last 0 q in
if (f_eq_dec lc 0) then (k, d, r) else (0%N, (lpoly_sc_mul d (lc ^- k)), lpoly_sc_mul r (lc ^-k)).
Lemma lpoly_edivp_spec: forall p q,
tuple_to_poly (lpoly_edivp p q) = Pdiv.Field.edivp (Poly p) (Poly q).
Proof.
move => p q. rewrite /lpoly_edivp /Pdiv.Field.edivp locked_withE /Pdiv.Field.edivp_expanded_def !lead_coef_Poly.
rewrite -lpoly_redivp_spec /= /tuple_to_poly.
case Hdiv: (lpoly_redivp p q) => [[k d] r] .
rewrite GRing.unitfE. case: (f_eq_dec (last 0 q) 0) => [Hlast /= | Hlast].
- by rewrite Hlast eq_refl /=.
- rewrite /=. apply (introF eqP) in Hlast. rewrite Hlast !polyseqK /=. f_equal. f_equal.
by rewrite lpoly_sc_mul_spec /= mul_polyC polyseqK.
by rewrite lpoly_sc_mul_spec /= mul_polyC polyseqK.
Qed.
End Div.
End LPoly.
(*We will be working over GF(2), so we can give simpler functions because all leading coefficients are 1. The
code will be more efficient, which is important because this will be run many times in a loop*)
Require Import BoolField.
Require Import PolyField.
Section BoolPolyDiv.
Local Open Scope ring_scope.
Definition F := bool_fieldType.
(*Some facts about the field of booleans*)
Lemma bool_1_0: forall (f: F),
(f != 0) = (f == 1).
Proof.
move => f. by case : f.
Qed.
Lemma bool_lc: forall (l: lpoly F),
~~(nilp l) ->
last 0 l = 1.
Proof.
move => l. case : l => [l /=]. case : l => [// | h t /= Hlast Htriv]. apply /eqP. by rewrite -bool_1_0.
Qed.
Lemma neg_one: GRing.one F = - 1.
Proof.
by [].
Qed.
Definition bool_redivp_rec (l: lpoly F) :=
let sq := size l in
fix loop (qq r : lpoly F) (n: nat) {struct n} : lpoly F * lpoly F :=
if size r < sq then (qq, r)
else
let qq1 := lpoly_add qq (lpoly_mono F (size r - sq)%N) in
let r1 := lpoly_add r (lpoly_shift l (size r - sq)) in
match n with
| 0 => (qq1, r1)
| n1.+1 => loop qq1 r1 n1
end.
(*Last two elts of a tuple*)
Definition last_two {A B C : Type} (x: A * B * C) : B * C :=
match x with
| (a, b, c) => (b, c)
end.
Lemma bool_redivp_rec_spec: forall (l: lpoly F) q r n k,
~~(nilp l) ->
(bool_redivp_rec l q r n) = last_two (lpoly_redivp_rec l k q r n).
Proof.
move => l q r n. move: l q r. elim : n => [/= l q r k Hl | n /= IH l q r k Hl].
- case Hsz: (size r < size l).
+ by [].
+ rewrite /= !bool_lc //. by rewrite !lpoly_sc_mul_1 -neg_one lpoly_sc_mul_shift_1. by apply (larger_not_nil Hl).
- case Hsz: (size r < size l).
+ by [].
+ rewrite /= !bool_lc //. by rewrite !lpoly_sc_mul_1 /= -neg_one lpoly_sc_mul_shift_1 -IH.
by apply (larger_not_nil Hl).
Qed.
Definition bool_edivp (p q : lpoly F) : lpoly F * lpoly F :=
if nilp q then (0, p) else bool_redivp_rec q (seq_to_lpoly nil) p (size p).
Lemma bool_edivp_spec: forall p q,
bool_edivp p q = last_two (lpoly_edivp p q).
Proof.
move => p q. rewrite /bool_edivp /lpoly_edivp /lpoly_redivp /=.
case Hq: (nilp q) => [/= | /=].
- case: (f_eq_dec (last 0 q) 0) => [Hz //= | Hz /=].
move : Hz Hq. case : q => q. by case : q.
- rewrite (bool_redivp_rec_spec _ _ _ 0); last first. by rewrite Hq.
case Hr: (lpoly_redivp_rec q 0 (seq_to_lpoly [::]) p (size p)) => [ [k d] r].
case : (f_eq_dec (last 0 q) 0) => [Hz //= | Hz /=].
rewrite !bool_lc; last first. by rewrite Hq. by rewrite !GRing.expr1n !GRing.invr1 !lpoly_sc_mul_1.
Qed.
(*We need to enumerate all polynomials up to a certain length. This takes a bit of work due to the dependent types*)
Fixpoint seq_of_polyseqs (n: nat) : (seq (seq F)) * (seq (seq F)) :=
match n with
| 0 => ([:: [:: true]], [:: [:: true]])
| n'.+1 => let (leq_seq, eq_seq) := (seq_of_polyseqs n') in
let new_eq_seq := (map (cons true) eq_seq) ++ (map (cons false) eq_seq) in
(leq_seq ++ new_eq_seq, new_eq_seq)
end.
Lemma nil_notin_seq: forall n,
(nil \notin (seq_of_polyseqs n).1) && (nil \notin (seq_of_polyseqs n).2).
Proof.
move => n. elim : n => [//= | n /=].
case Hseq : (seq_of_polyseqs n) => [leq_seq eq_seq]. rewrite /= => /andP[Hle Heq].
rewrite !mem_cat !negb_or Hle /= {1}andbC andbA -(andbA ([::] \notin [seq false :: i | i <- eq_seq]))
andbb andbC andbA andbb. apply /andP. split; apply /mapP => [[x Hx] //].
Qed.
Lemma zero_notin_seq: forall n,
([:: 0] \notin (seq_of_polyseqs n).1) && ([:: 0] \notin (seq_of_polyseqs n).2).
Proof.
move => n. elim : n => [//= | n /=].
case Hseq : (seq_of_polyseqs n) => [leq_seq eq_seq]. rewrite /= => /andP[Hle Heq].
rewrite !mem_cat !negb_or Hle /= {1}andbC andbA -(andbA ([:: 0] \notin [seq false :: i | i <- eq_seq]))
andbb andbC andbA andbb. apply /andP.
pose proof (nil_notin_seq n) as Hnil. move: Hnil; rewrite Hseq /= => /andP[Hnileq Hnilleq].
split. apply /mapP => [[x Hin [Hx]]]. subst. by rewrite Hin in Hnilleq.
by apply /mapP => [[x Hin [Hx]]].
Qed.
Lemma seq_of_polyseqs_last: forall n s,
(s \in (seq_of_polyseqs n).1) || (s \in (seq_of_polyseqs n).2) ->
last 1 s != 0.
Proof.
move => n. elim : n => [//= s | n /=].
- rewrite !in_cons in_nil orbF orbb => /eqP Hs. rewrite Hs. by [].
- case Hseq: (seq_of_polyseqs n) => [leq_seq eq_seq]. rewrite /=. move => IH s.
rewrite !mem_cat -orbA -orbA orbC -orbA (orbC (s \in [seq false :: i | i <- eq_seq])) -!orbA
(orbA (s \in [seq false :: i | i <- eq_seq])) !orbb orbA orbb =>
/orP[/mapP [x Hx Hs] | /orP[/mapP [x Hx Hs] | Hleq]].
+ rewrite Hs /=. apply IH. by rewrite Hx orbT.
+ rewrite Hs /=. have->: (last false x) = (last 1 x). move: Hx Hs. case : x => [//= Hnil | //=].
pose proof (nil_notin_seq n) as Hnil'; move: Hnil'. by rewrite Hseq Hnil /= andbF.
apply IH. by rewrite Hx orbT.
+ apply IH. by rewrite Hleq.
Qed.
Lemma size1P: forall {T: Type} (s: seq T),
reflect (exists (x: T), s = [:: x]) (size s == 1%N) .
Proof.
move => T s. case : s => [/= | h t /=].
- apply ReflectF. by move =>[x Hx].
- case : t => [|h' t' /=].
+ apply ReflectT. by exists h.
+ apply ReflectF. by move => [x Hx].
Qed.
Lemma in_seq_of_polyseqs_snd: forall n s,
s \in (seq_of_polyseqs n).2 = ((last 1 s != 0) && (size s == n.+1)).
Proof.
move => n. elim : n => [//= s | n /=].
- rewrite !in_cons in_nil orbF.
case Hs: (s == [:: true]). apply (elimT eqP) in Hs. by rewrite Hs.
case Hsz: ((size s) == 1%N).
+ apply (elimT (size1P s)) in Hsz. case : Hsz => [x Hsx]. move: Hsx; case : x =>[ //=|->//=].
move => /eqP Hst. by rewrite Hst in Hs.
+ by rewrite andbF.
- case Hseq: (seq_of_polyseqs n) => [leq_seq eq_seq]. rewrite /= => IH s. rewrite !mem_cat.
case Hfst: (s \in [seq true :: i | i <- eq_seq]) =>[/= | /=].
+ move: Hfst => /mapP [x Hx Hs]. subst. by rewrite /= eqSS -IH Hx.
+ case Hsnd: (s \in [seq false :: i | i <- eq_seq]).
* move : Hsnd => /mapP [x Hx Hs]. subst. rewrite /= eqSS.
have->: (last false x = last 1 x). move : {Hfst} Hx. case : x =>[/= Hnil|//].
by pose proof (nil_notin_seq n) as Hnils; move : Hnils; rewrite Hseq /= Hnil andbF.
by rewrite -IH Hx.
* case Hin: (((last 1 s) != 0) && ((size s) == (n.+2))) =>[|//].
move: Hin Hsnd Hfst. case : s => [//= | h].
case : h => [ t /= /andP[Hlast Hsz] Hsnd Hfst | t /= /andP[Hlast Hsz] Hsnd Hfst];
rewrite eqSS in Hsz.
-- have Htin: (t \in eq_seq) by rewrite IH Hlast Hsz. by rewrite map_f in Hfst.
-- have Hlast': (last false t = last 1 t). move {Hsnd Hfst Hsz}. move: Hlast.
by case : t. rewrite Hlast' in Hlast. have Htin: (t \in eq_seq) by rewrite IH Hlast Hsz.
by rewrite map_f in Hsnd.
Qed.
Lemma in_seq_of_polyseqs_fst: forall n s,
s \in (seq_of_polyseqs n).1 = (last 1 s != 0) && (0 < size s <= n.+1).
Proof.
move => n. elim : n => [//= s | n /=].
- pose proof (in_seq_of_polyseqs_snd 0 s) as Hsnd. move: Hsnd; rewrite /=; move ->.
f_equal. by rewrite eq_sym eqn_leq.
- case Hseq: (seq_of_polyseqs n) => [leq_seq eq_seq]. rewrite /= => IH s. rewrite mem_cat.
pose proof (in_seq_of_polyseqs_snd (n.+1) s) as Hsnd. move: Hsnd; rewrite /= Hseq /=.
move ->. rewrite IH /=. rewrite -(andb_orr). f_equal. rewrite (leq_eqVlt _ (n.+2)).
rewrite andb_orr orbC. rewrite (@andb_idl _ (size s == n.+2)) //.
move => /eqP Hsz. by rewrite Hsz.
Qed.
Lemma seq_of_polyseqs_all_last: forall n,
all (fun x => last 1 x != 0) (seq_of_polyseqs n).1.
Proof.
move => n. rewrite all_in => x Hin. apply (@seq_of_polyseqs_last n).
by rewrite Hin.
Qed.
Definition seq_of_lpoly (n: nat) : (seq (lpoly F)) :=
sub_seq (polynomial_subType F) (seq_of_polyseqs_all_last n).
(*Finally we have what we want: an lpoly is in the list iff it is a nonzero polynomial of degree at most n*)
Lemma seq_of_lpoly_in: forall n (l: lpoly F),
(l \in seq_of_lpoly n) = (0 < size l <= n.+1).
Proof.
move => n l. rewrite sub_seq_in /= in_seq_of_polyseqs_fst /=.
case : l => [s Hs /=]. by rewrite Hs.
Qed.
(*Test for irreducibility*)
Lemma size_one: forall (p: {poly F}),
(size p == 1%N) = (p == 1).
Proof.
move => p. rewrite -val_eqE /= polyseq1. case : p => [l /=].
case : l => [//= Htriv | h t /=].
case : t => [//= | h' t' /= Hlast]. by case : h.
have->: ((size t').+2 == 1%N) = false by [].
case Hseq : ([:: h, h' & t'] == [:: 1]) =>[|//].
apply (elimT eqP) in Hseq. by case: Hseq.
Qed.
(*In boolean field, %= is the same as =*)
Lemma eqp_eq: forall (p q: {poly F}),
(p %= q) = (p == q).
Proof.
move => p q. case Hq0: (q == 0).
- apply (elimT eqP) in Hq0. subst. by rewrite eqp0.
- case Hdiv: (p %= q).
+ move: Hdiv; rewrite /eqp /dvdp => /andP[/eqP Hdivqp /eqP Hdivpq].
pose proof (divp_eq p q) as Hp.
pose proof (divp_eq q p) as Hq.
move: Hp Hq. rewrite Hdivqp Hdivpq !GRing.addr0 => Hp. rewrite {2} Hp GRing.mulrA.
rewrite GRing.mulrC -{1}(GRing.mulr1 q) => Hq. apply GRing.mulfI in Hq.
have: 1%N = size (q %/ p * (p %/ q)) by rewrite -(size_poly1 F) Hq.
move => /eqP Hsz1. move: Hsz1. rewrite eq_sym size_mul_eq1 => /andP[Hqp1 Hpq1].
move: Hpq1; rewrite size_one => /eqP Hpq1. move: Hp. rewrite Hpq1 GRing.mul1r. move ->.
by rewrite eq_refl. by rewrite Hq0.
+ case Heq: (p == q) =>[|//].
apply (elimT eqP) in Heq. rewrite Heq in Hdiv. by rewrite eqpxx in Hdiv.
Qed.
Lemma propTp: forall (P: Prop),
true * P <-> P.
Proof.
move => P. split. by move => [Htriv p].
move => Hp. by split.
Qed.
Lemma irreducible_poly_factor: forall (p: {poly F}),
1 < size p ->
irreducible_poly p <-> (forall (f: {poly F}), size f < size p -> (f == 1) || ~~ (f %| p)).
Proof.
move => p Hsz. rewrite /irreducible_poly Hsz /= propTp.
split.
- move => Hirred f Hszf.
case Hf1: (f == 1) =>[//|/=]. rewrite -size_one in Hf1.
case Hdiv: (f %| p) =>[|//].
apply Hirred in Hdiv. move: Hdiv; rewrite eqp_eq => /eqP Hfp. subst.
by rewrite ltnn in Hszf. by rewrite Hf1.
- move => Halt q Hszq Hqp. case Hq0: (q == 0).
+ apply (elimT eqP) in Hq0. subst. move: Hqp => /dvd0pP Hp. subst.
by rewrite eqpxx.
+ have: (size q <= size p). apply dvdp_leq. rewrite -size_poly_eq0.
case Hszp: (size p == 0%N) => [|//]. apply (elimT eqP) in Hszp. by rewrite Hszp in Hsz.
by []. rewrite leq_eqVlt => /orP[Hszpq | Hszlt].
* by rewrite -dvdp_size_eqp.
* apply Halt in Hszlt. move: Hszlt => /orP[Hq1 | Hdiv].
move: Hq1; rewrite -size_one => /eqP Hq1. by rewrite Hq1 in Hszq.
by rewrite Hqp in Hdiv.
Qed.
Lemma pred_sum: forall (n m : nat),
n != 0%N ->
m != 0%N ->
(n.-1 + m.-1)%N = (n+m).-2.
Proof.
move => n m Hn Hm. rewrite -matrix.mx'_cast. rewrite (addnC n (m.-1)) -matrix.mx'_cast //.
by rewrite addnC. all: apply pred_ord; by rewrite lt0n.
Qed.
(*We can make the search more efficient by only considering polynomials up to degree (deg p) /2*)
Lemma irreducible_poly_factor_small: forall (p : {poly F}) n,
1 < size p ->
n < (size p).-1 <= n.*2 ->
irreducible_poly p <-> (forall (f: {poly F}), (size f) <= n.+1 -> (f == 1) || ~~ (f %| p)).
Proof.
move => p n Hp /andP[Hnle Hngt]. rewrite (irreducible_poly_factor Hp).
split; move => Hirred f Hsz.
- apply Hirred. apply (leq_ltn_trans Hsz). by rewrite -ltn_predRL.
- case (orP (ltn_leq_total n.+1 (size f))) => [Hge | Hlt]; last first.
+ by apply Hirred.
+ case Hdiv : (f %| p) =>[//= | /=]; last first. by apply orbT.
have: (f %| p) by []. apply divp_dvd in Hdiv.
rewrite /dvdp => /eqP Hmod.
pose proof (divp_eq p f) as Hpf. move: Hpf; rewrite Hmod GRing.addr0 => Hpf.
have Hf0: (f == 0) = false. { case Hf0: (f == 0) => [|//].
apply (elimT eqP) in Hf0. move: Hpf. rewrite Hf0 GRing.mulr0 => Hp0.
subst. by rewrite size_poly0 in Hsz. }
have Hpf0: (p %/ f == 0) = false. { case Hpf0: (p %/ f == 0)=>[|//].
apply (elimT eqP) in Hpf0. move: Hpf. rewrite Hpf0 GRing.mul0r => Hp0.
subst. by rewrite size_poly0 in Hsz. }
have Hszsum: size p = (size (p %/f) + size f).-1
by rewrite {1}Hpf size_proper_mul // GRing.mulf_eq0 !lead_coef_eq0 Hf0 Hpf0.
case: (orP (ltn_leq_total n.+1 (size (p %/ f)))) => [Hge' | Hlt']; last first.
* apply Hirred in Hlt'. move: Hlt' => /orP [/eqP Hpf1 | Hpfdiv].
-- move: Hpf; rewrite Hpf1 GRing.mul1r => Hpf. subst. by rewrite ltnn in Hsz.
-- by rewrite Hdiv in Hpfdiv.
* rewrite -ltn_predRL in Hge. rewrite -ltn_predRL in Hge'.
have Hn2big: n.*2 < (size f).-1 + (size (p %/ f)).-1. rewrite -addnn. by apply ltn_add2rl.
have: (size p).-1 < (size f).-1 + (size (p %/ f)).-1 by apply (leq_ltn_trans Hngt).
rewrite Hszsum pred_sum. by rewrite addnC ltnn. by rewrite size_poly_eq0 Hf0.
by rewrite size_poly_eq0 Hpf0.
Qed.
Definition isOne (l: lpoly F) :=
match (polyseq l) with
| true :: nil => true
| _ => false
end.
Lemma isOne_spec: forall l,
(isOne l) = (l == 1).
Proof.
move => l. rewrite -size_one /= /isOne.
case : l => [l Hl /=]. move: Hl. case : l => [//= | h t /=]. case : h => [/=|//=].
case : t =>[//= | //=]. by case : t.
Qed.
Definition bool_modp (p q: lpoly F) : lpoly F :=
(bool_edivp p q).2.
Lemma bool_modp_spec: forall p q,
Poly (bool_modp p q) = modp (Poly p) (Poly q).
Proof.
move => p q. rewrite /modp /bool_modp. rewrite bool_edivp_spec /last_two /=.
case Hdiv: (lpoly_edivp p q) => [[k' q'] r' /=].
by rewrite -lpoly_edivp_spec Hdiv /=.
Qed.
Definition bool_dvdp (p q: lpoly F) : bool :=
nilp (bool_modp q p).
Lemma bool_dvdp_spec: forall p q,
(bool_dvdp p q) = ((Poly p) %| (Poly q)).
Proof.
move => p q. rewrite /dvdp /bool_dvdp.
by rewrite -bool_modp_spec /= lpoly_zero.
Qed.
(*Finally, a (computable) function to find irreducible polynomials of lpolys*)
Definition find_irred (l: lpoly F) (n: nat) : option (lpoly F) :=
find_val_option (fun q => ~~ (isOne q) && (bool_dvdp q l)) (seq_of_lpoly n).
Lemma find_irredP: forall (l: lpoly F) n,
1 < size l ->
n < (size l).-1 <= n.*2 ->
reflect (irreducible_poly (Poly l)) ((find_irred l n == None)).
Proof.
move => l n Hl Hn. case Hfind: (find_irred l n) => [p /= | /=].
- move: Hfind; rewrite /find_irred => Hfind.
have Hin: p \in (seq_of_lpoly n) by apply (find_val_option_some_in Hfind).
apply find_val_option_some in Hfind.
apply ReflectF. rewrite (@irreducible_poly_factor_small _ n); last first.
by rewrite size_Poly_lpoly. by rewrite size_Poly_lpoly.
move => Hall; move: Hfind => /andP[Hone Hdiv].
rewrite seq_of_lpoly_in in Hin. move: Hin => /andP[Hp0 Hpn].
apply Hall in Hpn. move: Hpn => /orP [Hp1 | Hpdiv].
+ rewrite isOne_spec in Hone. by rewrite Hp1 in Hone.
+ rewrite bool_dvdp_spec in Hdiv. rewrite polyseqK in Hdiv. by rewrite Hdiv in Hpdiv.
- apply ReflectT. rewrite (@irreducible_poly_factor_small _ n); last first.
by rewrite size_Poly_lpoly. by rewrite size_Poly_lpoly.
have: (find_irred l n) == None. by apply /eqP. rewrite -isSome_none /find_irred -find_val_option_none
all_in => Hall f Hszf.
case Hf: (f == 0). apply (elimT eqP) in Hf. subst. rewrite dvd0p.
case Hl0 : (Poly l == 0) => [//= | //=]. rewrite lpoly_zero in Hl0. apply (elimT nilP) in Hl0.
move: Hl. by rewrite Hl0 /=. by rewrite orbT.
have Hinf: (f \in (seq_of_lpoly n)). by rewrite seq_of_lpoly_in Hszf size_poly_gt0 Hf.
apply Hall in Hinf. move: Hinf. rewrite negb_and negbK => /orP[Hone | Hdiv].
+ by rewrite -isOne_spec Hone.
+ move: Hdiv; rewrite bool_dvdp_spec polyseqK. move ->. by rewrite orbT.
Qed.
(** Testing for Primitive lpolys*)
(*Similarly, we want a computable method for determining if a polynomial over GF(2) is primitive. We want to
enumerate 'X^n - 1 = 'X^n + 1 for all 0 < n < b for some bound. Because this will be quite large, we
want to do this as efficiently as possible*)
Definition xn1_seq (n: nat) : seq F := true :: rcons (nseq (n.-1) false) true.
Lemma xn1_seq_wf: forall n,
last 1 (xn1_seq n) != 0.
Proof.
move => n. by rewrite /xn1_seq /= last_rcons.
Qed.
Definition xn1_lpoly (n: nat) : lpoly F := Polynomial (xn1_seq_wf n).
Lemma xn1_lpoly_spec: forall n,
n != 0%N ->
Poly (xn1_lpoly n) = 'X^n - 1.
Proof.
move => n Hn. rewrite -polyP => i.
rewrite coef_Poly /= /xn1_seq /= coefB coef1 coefXn.
case Hi0: (i == 0%N).
- apply (elimT eqP) in Hi0. subst. have->: (0%N == n) = false. rewrite eq_sym. by apply negbTE.
by [].
- have Hi: i = i.-1.+1 by rewrite prednK // lt0n Hi0. rewrite {1}Hi /= nth_rcons size_nseq nth_nseq.
case Hin: (i == n) => [/= | /=].
+ apply (elimT eqP) in Hin. subst. by rewrite ltnn eq_refl.
+ case : (i.-1 < n.-1)=>[//|/=].
case Hin': (i.-1 == n.-1) =>[|//]. apply (elimT eqP) in Hin'. apply PeanoNat.Nat.pred_inj in Hin'.
subst. by rewrite eq_refl in Hin. apply /eqP. by rewrite Hi0. by apply /eqP.
Qed.
Definition all_xn1 (n: nat) : seq (lpoly F) :=
map xn1_lpoly (iota 1 (n.-1)).
Lemma all_xn1_in: forall n l,
reflect (exists2 i: nat, (0 < i < n) & (l = xn1_lpoly i)) (l \in (all_xn1 n)).
Proof.
move => n l.
rewrite /all_xn1. case Hn0: (n == 0%N).
- move: Hn0 => /eqP Hn0. subst. rewrite /=. apply ReflectF. move => [x].
by rewrite ltn0 andbF.
- case Hin: (l \in [seq xn1_lpoly i | i <- iota 1 n.-1]).
+ apply ReflectT. move: Hin => /mapP [x Hin Hlx]. subst. exists x. move: Hin.
by rewrite mem_iota addnC addn1 prednK // lt0n Hn0. by [].
+ apply ReflectF. move: Hin => /mapP Hin [x] Hx Hl.
subst. apply Hin. exists x. by rewrite mem_iota addnC addn1 prednK // lt0n Hn0. by [].
Qed.
Definition prim_div_check (l: lpoly F) : option (lpoly F) :=
find_val_option (bool_dvdp l) (all_xn1 (2%N ^ ((size l).-1)).-1).
Lemma prim_div_check_spec: forall (l: lpoly F),
reflect (forall n, (Poly l) %| 'X^n - 1 -> (n == 0%N) || (((#|F|^((size (Poly l)).-1)).-1) <= n))
(prim_div_check l == None).
Proof.
move => l. case Hcheck : (prim_div_check l ) => [q /= | /=].
- apply ReflectF. move: Hcheck; rewrite /prim_div_check => Hcheck.
have Hinq: q \in (all_xn1 (2 ^ (size l).-1).-1) by apply (find_val_option_some_in Hcheck).
apply find_val_option_some in Hcheck. rewrite card_bool. move => Hall.
apply (elimT (all_xn1_in ((2 ^ (size l).-1).-1) _)) in Hinq.
case : Hinq => [i /andP[Hi0 Hisz] Hqi]. subst.
have Hi0f: (i == 0%N) = false by rewrite eqn0Ngt Hi0.
move : Hcheck; rewrite bool_dvdp_spec xn1_lpoly_spec; last first. by rewrite Hi0f.
move => Hdiv. apply Hall in Hdiv. move: Hdiv => /orP [Hi0t | Hiszbig].
+ by rewrite Hi0t in Hi0f.
+ move: Hiszbig. by rewrite size_Poly_lpoly leqNgt Hisz.
- apply ReflectT. move => n Hdiv.
move: Hcheck; rewrite /prim_div_check => /eqP Hcheck.
move: Hcheck; rewrite -isSome_none -find_val_option_none all_in => Hall.
have: 0 <= n by []. rewrite leq_eqVlt => /orP[Hn0 | Hn0]. by rewrite eq_sym Hn0.
have->/=: (n == 0%N) = false by rewrite eqn0Ngt Hn0. rewrite card_bool size_Poly_lpoly.
case (orP (ltn_leq_total n ((2 ^ (size l).-1).-1))) => [Hin | //].
move: Hall => /(_ (xn1_lpoly n)) /=.
have-> : (xn1_lpoly n \in all_xn1 (2 ^ (size l).-1).-1). apply /all_xn1_in.
exists n. by rewrite Hn0 Hin. by []. move => Hnodiv. apply rem_impl in Hnodiv.
move: Hnodiv. rewrite bool_dvdp_spec xn1_lpoly_spec. by rewrite Hdiv. by apply lt0n_neq0.
Qed.
Definition find_prim (l: lpoly F) (n: nat) : bool :=
((find_irred l n) == None) && (bool_dvdp l (xn1_lpoly ((2^((size l).-1)).-1))) &&
((prim_div_check l) == None).
Lemma find_primP: forall (l: lpoly F) n,
1 < size l ->
n < (size l).-1 <= n.*2 ->
reflect (primitive_poly l) (find_prim l n).
Proof.
move => l n Hszl Hn.
have Hpow0: (2 ^ (size l).-1).-1 != 0%N. { have: 2^ 1 <= 2 ^((size l).-1) by rewrite leq_exp2l // ltn_predRL.
have->: 2 ^ 1 = 2%N by []. move => Hbound. by rewrite -lt0n ltn_predRL. }
case Hprim: (find_prim l n).
- apply ReflectT. move: Hprim; rewrite /find_prim /primitive_poly =>
/andP[/andP [/(find_irredP Hszl Hn) Hirred Hdiv] / prim_div_check_spec Hdivcheck].
split. by rewrite polyseqK in Hirred.
split. rewrite /= card_bool. move: Hdiv. rewrite bool_dvdp_spec polyseqK xn1_lpoly_spec //.
rewrite /=. move: Hdivcheck. by rewrite polyseqK card_bool.
- apply ReflectF. move: Hprim; rewrite /find_prim /primitive_poly => Hfalse [Hirred [Hdiv Halldiv]].
have: (irreducible_poly (Poly l)) by rewrite polyseqK. move => /(find_irredP Hszl Hn) Hirrb.
have Hprimb: (prim_div_check l == None). apply /prim_div_check_spec. rewrite /= polyseqK. apply Halldiv.
move: Hfalse; rewrite Hprimb Hirrb /= andbT bool_dvdp_spec polyseqK xn1_lpoly_spec.
move: Hdiv. by rewrite /= card_bool; move ->. by [].
Qed.
(** Concrete Polynomials*)
(*The following are the polynomials that appear in the FEC code. Only p256 is used, but the
others could be in theory if some flags are changed*)
(*1011*)
Definition p8 := seq_to_lpoly [:: true; true; false; true].
(*10011*)
Definition p16 := seq_to_lpoly [:: true; true; false; false; true].
(*100101*)
Definition p32 := seq_to_lpoly [:: true; false; true; false; false; true].
(*1000011*)
Definition p64 := seq_to_lpoly [:: true; true; false; false; false; false; true].
(*10001001*)
Definition p128:= seq_to_lpoly [:: true; false; false; true; false; false; false; true].
(*100011101*)
Definition p256 := seq_to_lpoly [:: true; false; true; true; true; false; false; false; true].
Lemma p8_primitive : primitive_poly p8.
Proof.
have Hsz: 1 < size p8 by [].
have Hn: 2 < (size p8).-1 <= 2.*2 by [].
apply (elimT (find_primP Hsz Hn)). by vm_compute.
Qed.
Lemma p16_primitive: primitive_poly p16.
Proof.
have Hsz: 1 < size p16 by [].
have Hn: 2 < (size p16).-1 <= 2.*2 by [].
apply (elimT (find_primP Hsz Hn)). by vm_compute.
Qed.
Lemma p32_primitive: primitive_poly p32.
Proof.
have Hsz: 1 < size p32 by [].
have Hn: 3 < (size p32).-1 <= 3.*2 by [].
apply (elimT (find_primP Hsz Hn)). by vm_compute.
Qed.
Lemma p64_primitive: primitive_poly p64.
Proof.
have Hsz: 1 < size p64 by [].
have Hn: 3 < (size p64).-1 <= 3.*2 by [].
apply (elimT (find_primP Hsz Hn)). by vm_compute.
Qed.
Lemma p128_primitive: primitive_poly p128.
Proof.
have Hsz: 1 < size p128 by [].
have Hn: 4 < (size p128).-1 <= 4.*2 by [].
apply (elimT (find_primP Hsz Hn)). by vm_compute.
Qed.
Lemma p256_primitive: primitive_poly p256.
Proof.
have Hsz: 1 < size p256 by [].
have Hn: 4 < (size p256).-1 <= 4.*2 by [].
apply (elimT (find_primP Hsz Hn)). by vm_compute.
Qed.
End BoolPolyDiv.
|
{"author": "verified-network-toolchain", "repo": "Verified-FEC", "sha": "b96e4b3442d0f0611bbcace57c6fff2b229ed4e2", "save_path": "github-repos/coq/verified-network-toolchain-Verified-FEC", "path": "github-repos/coq/verified-network-toolchain-Verified-FEC/Verified-FEC-b96e4b3442d0f0611bbcace57c6fff2b229ed4e2/proofs/Poly/ListPoly.v"}
|
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from dtoolkit.util import multi_if_else
if TYPE_CHECKING:
from typing import Iterable
from dtoolkit._typing import OneDimArray
from dtoolkit._typing import SeriesOrFrame
from dtoolkit._typing import TwoDimArray
def get_inf_range(inf: str = "all") -> list[float]:
return multi_if_else(
[
(inf == "all", [np.inf, -np.inf]),
(inf == "pos", [np.inf]),
(inf == "neg", [-np.inf]),
(inf is not None, ValueError(f"invalid inf option: {inf}")),
],
TypeError("must specify inf"),
)
def get_mask(how: str, mask: TwoDimArray, axis: int) -> OneDimArray:
return multi_if_else(
[
(how == "any", mask.any(axis=axis)),
(how == "all", mask.all(axis=axis)),
(how is not None, ValueError(f"invalid inf option: {how}")),
],
TypeError("must specify how"),
)
def isin(
df: pd.DataFrame,
values: Iterable | SeriesOrFrame | dict[str, list[str]],
axis: int | str = 0,
) -> pd.DataFrame:
"""
Extend :meth:`~pandas.DataFrame.isin` function. When ``values`` is
:obj:`dict` and ``axis`` is 1, ``values``' key could be index name.
"""
from collections import defaultdict
axis = df._get_axis_number(axis)
if isinstance(values, dict) and axis == 1:
values = defaultdict(list, values)
result = (df.iloc[[r]].isin(values[i]) for r, i in enumerate(df.index))
return pd.concat(result, axis=0)
return df.isin(values)
# based on more_itertools/more.py
def collapse(iterable: Iterable):
def walk(node):
if isinstance(node, (str, bytes)):
yield node
return
try:
tree = iter(node)
except TypeError:
yield node
return
else:
for child in tree:
yield from walk(child)
yield from walk(iterable)
|
{"hexsha": "9a856e97238dce313458d7adc27a2d5d02d6d275", "size": 2034, "ext": "py", "lang": "Python", "max_stars_repo_path": "dtoolkit/accessor/_util.py", "max_stars_repo_name": "Zeroto521/my-data-toolkit", "max_stars_repo_head_hexsha": "bde37f625aa81e65b97648798535f6d931864888", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-09T04:50:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-09T04:50:58.000Z", "max_issues_repo_path": "dtoolkit/accessor/_util.py", "max_issues_repo_name": "Zeroto521/my-data-toolkit", "max_issues_repo_head_hexsha": "bde37f625aa81e65b97648798535f6d931864888", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 427, "max_issues_repo_issues_event_min_datetime": "2021-06-04T02:40:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:55:52.000Z", "max_forks_repo_path": "dtoolkit/accessor/_util.py", "max_forks_repo_name": "Zeroto521/my-data-toolkit", "max_forks_repo_head_hexsha": "bde37f625aa81e65b97648798535f6d931864888", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-09T09:56:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-09T09:56:44.000Z", "avg_line_length": 25.746835443, "max_line_length": 79, "alphanum_fraction": 0.5860373648, "include": true, "reason": "import numpy", "num_tokens": 501}
|
#!/usr/bin/env python
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import division
from __future__ import print_function
import math
import time
import json
import numpy as np
import tensorflow as tf
import sys
class Model(object):
"""The model."""
def __init__(self, config, is_training=True, loss_fct="softmax", test_opti=False, use_fp16=False):
self.config = config
self.loss_fct = loss_fct
self.is_training = is_training
self.use_fp16=use_fp16
self.embed_dim = config.hidden_size
if hasattr(config, 'embed_dim'):
self.embed_dim = config.embed_dim
self._build_model()
def _build_model(self):
batch_size = self.batch_size
hidden_size = self.hidden_size
vocab_size = self.vocab_size
num_layers = self.num_layers
keep_prob = self.keep_prob
is_training = self.is_training
data_type = self.data_type
embed_dim = self.embed_dim
_inputs = tf.placeholder(tf.int32, [batch_size, None], "inputs")
_targets = tf.placeholder(tf.int32, [batch_size, None], "targets")
# Embedding data
with tf.device("/cpu:0"):
embedding = tf.get_variable(
"embedding", [vocab_size, embed_dim], dtype=data_type)
inputs = tf.nn.embedding_lookup(embedding, _inputs)
# Droupout
if is_training and keep_prob < 1:
inputs = tf.nn.dropout(inputs, keep_prob)
# Creating the cells
import inspect
def lstm_cell():
# With the latest TensorFlow source code (as of Mar 27, 2017),
# the BasicLSTMCell will need a reuse parameter which is unfortunately not
# defined in TensorFlow 1.0. To maintain backwards compatibility, we add
# an argument check here:
size = hidden_size
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.BasicLSTMCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True,
reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.BasicLSTMCell(
size, forget_bias=0.0, state_is_tuple=True)
def gru_cell():
size = hidden_size
if 'reuse' in inspect.getargspec(
tf.contrib.rnn.GRUCell.__init__).args:
return tf.contrib.rnn.BasicLSTMCell(
size, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.GRUCell(
size)
if self.config.cell == 'gru':
_cell_creator = gru_cell
else:
_cell_creator = lstm_cell
if is_training and keep_prob < 1:
cell_creator = lambda:tf.contrib.rnn.DropoutWrapper(
_cell_creator(), output_keep_prob=keep_prob)
else:
cell_creator = _cell_creator
cell = tf.contrib.rnn.MultiRNNCell([cell_creator() for _ in range(num_layers)], state_is_tuple=True)
_initial_state = cell.zero_state(batch_size, data_type)
# if num_steps == 0 we are in 'sentence mode' aka. dynamic
if self.config.num_steps == 0:
_mask = tf.sign(tf.to_float(_inputs))
else:
_mask = tf.ones([self.batch_size, self.config.num_steps])
_seq_len = tf.reduce_sum(_mask, reduction_indices=1)
# outputs is [bs x ts x hidden_size] (ts may be None)
_outputs, state = tf.nn.dynamic_rnn(cell=cell, inputs=inputs,
initial_state=_initial_state,
sequence_length=_seq_len)
_mask = tf.reshape(_mask, [-1])
# output: [bs*ts x hidden_size]
_output = tf.reshape(_outputs, [-1, hidden_size])
self.inputs = _inputs
self.targets= _targets
self.mask = _mask
self.seq_len = _seq_len
self.output = _output
loss, logits = self.compute_loss()
_cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
self._initial_state = _initial_state
self.loss, self.logits = loss, logits
self.cost = _cost
if logits is not None:
elems = tf.range(vocab_size)
self.choices = tf.multinomial(logits, 1)
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(_cost, tvars),
self.config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def compute_loss(self):
fct = self.loss_fct
fast_test = self.fast_test
vocab_size = self.vocab_size
hidden_size = self.hidden_size
# TF 1.1
_sequence_loss_by_example = tf.contrib.legacy_seq2seq.sequence_loss_by_example
loss, logits = None, None
if self.is_training or not (fast_test and fct == "softmax"):
self.w = tf.get_variable("w", [vocab_size, hidden_size], dtype=self.data_type)
self.b = tf.get_variable("b", [vocab_size], dtype=self.data_type)
if fct == "softmax":
# Softmax uses transposed weights which is very slow.
# See 'transpose.py' for more information about fast_test
self.w_t = tf.transpose(self.w)
else:
# The fast test tricks uses a Model saved with w_t instead of w
# See 'transpose.py' to transpose your models in order to use fast test
self.w_t = tf.get_variable("w_t", [hidden_size, vocab_size], dtype=self.data_type)
self.b = tf.get_variable("b", [vocab_size], dtype=self.data_type)
if fct == "softmax":
logits = tf.matmul(self.output, self.w_t)+self.b
loss = _sequence_loss_by_example(
[logits],
[tf.reshape(self.targets, [-1])],
[self.mask])
elif fct == "sampledsoftmax":
def _loss_fct(labels, logits):
return tf.nn.sampled_softmax_loss(
self.w,
self.b,
tf.cast(labels, tf.float32),
tf.cast(logits,tf.float32),
self.num_samples,
vocab_size)
loss = _sequence_loss_by_example(
[self.output],
[tf.reshape(self.targets, [-1,1])],
[self.mask],
softmax_loss_function=_loss_fct)
elif fct == "nce":
def _loss_fct(labels, logits):
labels = tf.reshape(labels, [-1, 1])
return tf.nn.nce_loss(self.w,
self.b,
tf.cast(labels, tf.float32),
tf.cast(logits, tf.float32),
self.num_samples,
self.vocab_size,
partition_strategy="div")
loss = _loss_fct(self.targets, self.output)
else:
raise ValueError("Unsupported loss function: %s" % fct)
return loss, logits
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
def save_config(self):
self.config.step = self.step
self.config.epoch = self.epoch
self.config.save()
@property
def initial_state(self):
return self._initial_state
@property
def final_state(self):
return self._final_state
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
@property
def data_type(self):
return tf.float16 if self.use_fp16 else tf.float32
@property
def batch_size(self):
return self.config.batch_size
@property
def vocab_size(self):
# vocab_size is increase by two for <eos> a <bos>
# since the first index 0 is used for padding
return self.config.vocab_size+2
@property
def hidden_size(self):
return self.config.hidden_size
@property
def keep_prob(self):
return self.config.keep_prob
@property
def num_layers(self):
return self.config.num_layers
@property
def num_samples(self):
return self.config.num_samples
@property
def fast_test(self):
return self.config.fast_test
|
{"hexsha": "0c283e71e54d9f598410ad2679e4d0842146e9f7", "size": 8834, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "OhadRubin/laughing-carnival", "max_stars_repo_head_hexsha": "172bfd3b009254cc6e55ec24ca99ec7b45593bfa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2017-02-06T06:01:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T02:04:51.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "OhadRubin/laughing-carnival", "max_issues_repo_head_hexsha": "172bfd3b009254cc6e55ec24ca99ec7b45593bfa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-02-09T02:40:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-04T09:13:43.000Z", "max_forks_repo_path": "model.py", "max_forks_repo_name": "OhadRubin/laughing-carnival", "max_forks_repo_head_hexsha": "172bfd3b009254cc6e55ec24ca99ec7b45593bfa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2017-01-06T10:56:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-16T14:34:18.000Z", "avg_line_length": 30.8881118881, "max_line_length": 104, "alphanum_fraction": 0.6391215757, "include": true, "reason": "import numpy", "num_tokens": 2107}
|
##
# \file landmark_visualizer.py
# \brief Class to create image mask from landmark coordinates. Landmarks
# can also be embedded in image.
#
# \author Michael Ebner (michael.ebner.14@ucl.ac.uk)
# \date June 2018
#
import os
import numpy as np
import scipy.ndimage
import SimpleITK as sitk
import skimage.measure
import pysitk.python_helper as ph
import pysitk.simple_itk_helper as sitkh
IMPLEMENTED_MARKERS = ["dot", "cross", "sphere", "hollow_sphere"]
##
# Class to create image mask from landmark coordinates. Landmarks can also be
# embedded in image.
# \date 2018-06-08 16:54:56-0600
#
class LandmarkVisualizer(object):
def __init__(self, landmarks_nda, direction, origin, spacing, size):
self._landmarks_nda = landmarks_nda
self._direction = direction
self._origin = origin
self._spacing = np.array(spacing)
self._size = size
self._landmark_image_sitk = None
self._get_marker = {
"hollow_sphere": self._get_marker_hollow_sphere,
"sphere": self._get_marker_sphere,
"cross": self._get_marker_cross,
}
def set_landmarks_nda(self, landmarks_nda):
self._landmarks_nda = landmarks_nda
def get_image_sitk(self):
return sitk.Image(self._landmark_image_sitk)
def get_image_nda(self):
return np.array(self._landmark_image_nda)
def build_landmark_image_sitk(self, marker="cross", radius=2):
if marker not in IMPLEMENTED_MARKERS:
raise ValueError("Marker not recognized. "
"Allowed options are: %s" % ", ".join(
IMPLEMENTED_MARKERS))
nda = np.zeros(self._size[::-1], dtype=np.uint8)
foo = sitk.GetImageFromArray(nda)
foo.SetSpacing(self._spacing)
foo.SetDirection(self._direction)
foo.SetOrigin(self._origin)
for i in range(self._landmarks_nda.shape[0]):
landmark = self._landmarks_nda[i, :]
index = foo.TransformPhysicalPointToIndex(landmark)[::-1]
if True in np.isnan(landmark):
continue
if marker == "dot":
nda[index] = i + 1
else:
marker_ = self._get_marker[marker](
radius=radius,
spacing=self._spacing,
)
nda = self._apply_marker(nda, index, marker_, value=i + 1)
self._landmark_image_sitk = sitk.GetImageFromArray(nda)
self._landmark_image_sitk.SetSpacing(self._spacing)
self._landmark_image_sitk.SetDirection(self._direction)
self._landmark_image_sitk.SetOrigin(self._origin)
self._landmark_image_nda = nda
def annotate_landmarks_on_image_sitk(self, image_sitk, value=0):
image_nda = sitk.GetArrayFromImage(image_sitk)
indices = np.where(self._landmark_image_nda > 0)
image_nda[indices] = value
image_landmarks_sitk = sitk.GetImageFromArray(image_nda)
image_landmarks_sitk.CopyInformation(image_sitk)
return sitk.Image(image_landmarks_sitk)
@staticmethod
def _get_marker_hollow_sphere(radius, spacing=np.ones(3), thickness=0.5):
a = radius + np.ceil(thickness)
x = np.linspace(-a, a, 2 * a + 1)
xx, yy, zz = np.meshgrid(x, x, x)
marker = np.zeros_like(xx)
values = \
(xx / spacing[0])**2 + \
(yy / spacing[1])**2 + \
(zz / spacing[2])**2
marker[values <= (radius + thickness)**2] = 1
marker[values < (radius - thickness)**2] = 0
return marker
@staticmethod
def _get_marker_sphere(radius, spacing=np.ones(3), thickness=0):
a = radius + np.ceil(thickness)
x = np.linspace(-a, a, 2 * a + 1)
xx, yy, zz = np.meshgrid(x, x, x)
marker = np.zeros_like(xx)
values = \
(xx / spacing[0])**2 + \
(yy / spacing[1])**2 + \
(zz / spacing[2])**2
marker[values <= (radius + thickness)**2] = 1
return marker
@staticmethod
def _get_marker_cross(radius, spacing=np.ones(3), thickness=None):
a = [int(np.round(radius / s)) for s in spacing[::-1]]
x = np.arange(2 * a[0] + 1)
y = np.arange(2 * a[1] + 1)
z = np.arange(2 * a[2] + 1)
xx, yy, zz = np.meshgrid(x, y, z, indexing='ij')
marker = np.zeros_like(xx)
marker[:, a[1], a[2]] = 1
marker[a[0], :, a[2]] = 1
marker[a[0], a[1], :] = 1
return marker
@staticmethod
def _apply_marker(nda, index, marker, value):
a = int((marker.shape[0] - 1) / 2)
index = np.array(index)
indices = np.array(np.where(marker == 1)) - a
indices = tuple(indices + index[..., np.newaxis])
try:
nda[indices] = value
except:
pass
return nda
|
{"hexsha": "abd538429da31cfddce3aef4a534599f0ac93382", "size": 4939, "ext": "py", "lang": "Python", "max_stars_repo_path": "simplereg/landmark_visualizer.py", "max_stars_repo_name": "gift-surg/SimpleReg", "max_stars_repo_head_hexsha": "9d9a774f5b7823c2256844c9d0260395604fb396", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-11-10T15:09:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-12T07:48:46.000Z", "max_issues_repo_path": "simplereg/landmark_visualizer.py", "max_issues_repo_name": "gift-surg/SimpleReg", "max_issues_repo_head_hexsha": "9d9a774f5b7823c2256844c9d0260395604fb396", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simplereg/landmark_visualizer.py", "max_forks_repo_name": "gift-surg/SimpleReg", "max_forks_repo_head_hexsha": "9d9a774f5b7823c2256844c9d0260395604fb396", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-03-20T14:13:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-15T01:32:51.000Z", "avg_line_length": 32.0714285714, "max_line_length": 77, "alphanum_fraction": 0.589593035, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1317}
|
from PIL import Image
import os
import numpy as np
from gym_pcgrl.envs.probs.problem import Problem
from gym_pcgrl.envs.helper import get_range_reward, get_tile_locations, calc_certain_tile, get_floor_dist, get_type_grouping, get_changes
from gym_pcgrl.envs.probs.loderunner.engine import get_score
from pdb import set_trace as TT
class LoderunnerProblem(Problem):
def __init__(self):
super().__init__()
self._width = 12
self._height = 8
self._prob = {"solid": 0.03, "brick": 0.23, "ladder": 0.10, "rope": 0.032, "empty": 0.56, "gold":0.02, "enemy":0.05, "player":0.01}
self._border_size = (0,0)
self._min_enemies = 1
self._max_enemies = 3
self._min_gold = 1
self._max_gold = 10
chars_to_tiles = \
{
'.': 'empty',
'B': 'solid',
'b': 'brick',
'#': 'ladder',
'-': 'rope',
'E': 'enemy',
'G': 'gold',
'M': 'player',
}
self.tiles_to_chars = {v: k for k, v in chars_to_tiles.items()}
self._reward_weights = {
"player": 1,
# "enemies": 1,
"enemies": 0,
# "gold": 1,
"gold": 0,
"win": 1,
# "path-length": 2,
"path-length": 0,
}
def get_tile_types(self):
return ["empty", "brick", "ladder", "rope", "solid", "gold", "enemy", "player"]
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._min_enemies = kwargs.get('min_enemies', self._min_enemies)
self._max_enemies = kwargs.get('max_enemies', self._max_enemies)
self._min_gold = kwargs.get('min_gold', self._min_gold)
self._max_gold = kwargs.get('max_gold', self._max_gold)
rewards = kwargs.get('rewards')
if rewards is not None:
for t in rewards:
if t in self._reward_weights:
self._reward_weights[t] = rewards[t]
def _run_game(self, map):
# string_to_char = dict((s, gameCharacters[i]) for i, s in enumerate(self.get_tile_types()))
lvl= []
for i in range(len(map)):
line = []
for j in range(len(map[i])):
string = map[i][j]
line.append(self.tiles_to_chars[string])
lvl.append(line)
score, dist = get_score(lvl)
return score, dist
def get_stats(self, map):
map_locations = get_tile_locations(map, self.get_tile_types())
map_stats = {
"player": calc_certain_tile(map_locations, ["player"]),
"enemies": calc_certain_tile(map_locations, ["enemy"]),
"gold": calc_certain_tile(map_locations, ["gold"]),
"win": 0,
"path-length": 0
}
# if map_stats["player"] == 1 and map_stats["gold"] > 0:
if map_stats["player"] == 1:
map_stats["win"], map_stats["path-length"] = self._run_game(map)
return map_stats
#TODO: calculate reward as below for NCA
def get_reward(self, new_stats, old_stats):
#longer path is rewarded and less number of regions is rewarded
rewards = {
"player": get_range_reward(new_stats["player"], old_stats["player"], 1, 1),
# "enemies": get_range_reward(new_stats["enemies"], old_stats["enemies"], self._min_enemies, self._max_enemies),
# "gold": get_range_reward(new_stats["gold"], old_stats["gold"], self._min_gold, self._max_gold),
"win": get_range_reward(new_stats["win"], old_stats["win"], 0, 1),
# "path-length": get_range_reward(new_stats["path-length"], old_stats["path-length"], np.inf, np.inf),
}
#calculate the total reward
return rewards["player"] * self._reward_weights["player"] +\
rewards["enemies"] * self._reward_weights["enemies"] +\
rewards["gold"] * self._reward_weights["gold"] +\
rewards["win"] * self._reward_weights["win"] +\
rewards["path-length"] * self._reward_weights["path-length"]
def get_episode_over(self, new_stats, old_stats):
return new_stats["win"] == 1 and new_stats["path-length"] >= 20
def get_debug_info(self, new_stats, old_stats):
return {
"player": new_stats["player"],
"enemies": new_stats["enemies"],
"gold": new_stats["gold"],
"win": new_stats["win"],
"path-length": new_stats["path-length"]
}
def render(self, map):
#new_map = self._get_runnable_lvl(map)
if self._graphics == None:
self._graphics = {
"solid": Image.open(os.path.dirname(__file__) + "/loderunner/solid.png").convert('RGBA'),
"brick": Image.open(os.path.dirname(__file__) + "/loderunner/brick.png").convert('RGBA'),
"ladder": Image.open(os.path.dirname(__file__) + "/loderunner/ladder.png").convert('RGBA'),
"rope": Image.open(os.path.dirname(__file__) + "/loderunner/rope.png").convert('RGBA'),
"enemy": Image.open(os.path.dirname(__file__) + "/loderunner/enemy.png").convert('RGBA'),
"gold": Image.open(os.path.dirname(__file__) + "/loderunner/gold.png").convert('RGBA'),
"empty": Image.open(os.path.dirname(__file__) + "/loderunner/empty.png").convert('RGBA'),
"player": Image.open(os.path.dirname(__file__) + "/loderunner/player.png").convert('RGBA')
}
#self._border_size = (0, 0)
img = super().render(map)
#self._border_size = (3, 0)
return img
|
{"hexsha": "3ecd4f1652e285ca9fc84a6d15ab66eee6910a3a", "size": 5757, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym_pcgrl/gym_pcgrl/envs/probs/loderunner_prob.py", "max_stars_repo_name": "JiangZehua/control-pcgrl3D", "max_stars_repo_head_hexsha": "f9b04e65e1cbf70b7306f4df251450d83c6fb2be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym_pcgrl/gym_pcgrl/envs/probs/loderunner_prob.py", "max_issues_repo_name": "JiangZehua/control-pcgrl3D", "max_issues_repo_head_hexsha": "f9b04e65e1cbf70b7306f4df251450d83c6fb2be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym_pcgrl/gym_pcgrl/envs/probs/loderunner_prob.py", "max_forks_repo_name": "JiangZehua/control-pcgrl3D", "max_forks_repo_head_hexsha": "f9b04e65e1cbf70b7306f4df251450d83c6fb2be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1214285714, "max_line_length": 139, "alphanum_fraction": 0.5615772104, "include": true, "reason": "import numpy", "num_tokens": 1452}
|
#!/usr/bin/env python3
import numpy as np
from pathlib import Path
from astropy.time import Time
import multiprocessing
from bin import sjd, influx_fetch
from sdssobstools import sdss_paths
try:
import tpmdata
except ImportError:
tpmdata = None
__version__ = "3.0.0"
def get_tpm_packet(out_dict):
tpmdata.tinit()
data = tpmdata.packet(1, 1)
for key, val in data.items():
out_dict[key] = val
return 0
def get_enclosure_state(t_start, t_end, out_dict):
enclosure_path = Path(
sdss_paths.__file__).parent.parent / "flux/enclosure.flux"
with enclosure_path.open('r') as fil:
state = influx_fetch.query(fil.read(), t_start, t_end, interval="5m")[0]
enclosure_hist = ""
last_state = 0
for row in state.records:
if row.get_value() > last_state:
last_state = row.get_value()
t = Time(row.get_time())
enclosure_hist += f"Opened at {t.isot[11:19]}\n"
elif row.get_value() < last_state:
last_state = row.get_value()
t = Time(row.get_time())
enclosure_hist += f"Closed at {t.isot[11:19]}\n"
if enclosure_hist == "":
enclosure_hist = "Closed all night\n"
out_dict["enclosure_hist"] = enclosure_hist
def get_chiller_state(t_start, t_end, out_dict):
chiller_path = Path(
sdss_paths.__file__).parent.parent / "flux/chiller_status.flux"
with chiller_path.open('r') as fil:
table = influx_fetch.query(fil.read(), t_start, t_end, interval="1m")
chiller_vals = {}
for col in table:
for row in col.records:
if row.get_field() not in chiller_vals.keys():
chiller_vals[row.get_field()] = row.get_value()
# print(chiller_vals)
for k in ["FLOW1", "FLOW2", "STATUS_FLUID_FLOW", "FLOW_USER_SETPOINT",
"DISPLAY_VALUE"]:
if k not in chiller_vals.keys():
chiller_vals[k] = np.nan
chiller_output = f"Chiller Flow: {chiller_vals['FLOW1']:.2f} L/min to FPS,"
chiller_output += f" {chiller_vals['FLOW2']:.2f} L/min to GFAs,"
chiller_output += f" {chiller_vals['STATUS_FLUID_FLOW']:.1f}"
chiller_output += f"/{chiller_vals['FLOW_USER_SETPOINT']:.1f} gpm total\n"
chiller_output += f"Chiller Temp: {chiller_vals['DISPLAY_VALUE']:.1f}C\n"
alarms = []
for key, val in chiller_vals.items():
if "ALARM" in key:
if val != 0:
alarms.append((key, val))
if len(alarms) == 0:
chiller_output += "No chiller alarms\n"
else:
chiller_output += "Active chiller alarms:\n"
for name, val in alarms:
chiller_output += f"{name}: {val}\n"
out_dict["chiller_output"] = chiller_output
def query():
t_start = Time(sjd.sjd() - 0.3, format="mjd")
t_end = Time.now()
if tpmdata is None:
raise ConnectionError("Cannot query the tpm without tpmdata installed")
data = multiprocessing.Manager().dict()
encl_thread = multiprocessing.Process(target=get_enclosure_state,
args=(t_start, t_end, data))
chiller_thread = multiprocessing.Process(target=get_chiller_state,
args=(t_end - 15 / 60 / 24, t_end,
data))
tpm_thread = multiprocessing.Process(target=get_tpm_packet, args=(data,))
encl_thread.start()
chiller_thread.start()
tpm_thread.start()
tpm_thread.join(2)
chiller_thread.join(5)
encl_thread.join(2)
if tpm_thread.is_alive():
tpm_thread.kill()
raise ConnectionError("Could not reach TPM")
if chiller_thread.is_alive():
chiller_thread.kill()
raise ConnectionError("Chiller query timeout")
if encl_thread.is_alive():
encl_thread.kill()
raise ConnectionError("Enclosure query timeout")
# print(data.keys())
t = Time(data["ctime"], format="unix")
output = data["enclosure_hist"] + '\n'
output += f"Status at: {t.isot[12:19]}Z\n"
output += (f"Telescope Position: "
f"{data['az_actual_pos']*data['az_spt']/3600:>5.1f},"
f" {data['alt_actual_pos']*data['alt_spt']/3600:>5.1f},"
f" {data['rot_actual_pos']*data['rot_spt']/3600:>5.1f} mount\n")
# epics_data = epics_fetch.get_data(["25m:mcp:instrumentNum"],
# start_time=Time.now().to_datetime(),
# end_time=Time.now().to_datetime())
cart = "FPS" if data["inst_id_0"] == 0 else f"{data['inst_id_0']:.0f}"
output += f"Instrument mounted: {cart}\n"
output += (f"Counterweights at: {data['plc_cw_0']:.1f},"
f" {data['plc_cw_1']:.1f},"
f" {data['plc_cw_2']:.1f},"
f" {data['plc_cw_3']:.1f}\n")
# TODO Check if this is supposed to do something
if data["dewar_sp1_psi"] > 10:
output += f"LN2 autofill systems: Connected and turned on\n"
else:
output += f"LN2 autofill systems: Disconnected\n"
output += (f"180L LN2 dewar scale: SP1 {data['dewar_sp1_lb']:6.1f} lbs,"
f" {data['dewar_sp1_psi']:6.1f} psi\n")
output += data["chiller_output"]
return output
def main():
print(query())
if __name__ == "__main__":
main()
|
{"hexsha": "61730d37eb1e2b3f9f6746a2d072ddf0e7d97ac1", "size": 5309, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/telescope_status.py", "max_stars_repo_name": "StarkillerX42/ObserverTools", "max_stars_repo_head_hexsha": "a3bc48179a1ed445e7f4232426dce8c1c28bb8e4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-13T18:18:01.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-14T22:55:38.000Z", "max_issues_repo_path": "bin/telescope_status.py", "max_issues_repo_name": "StarkillerX42/ObserverTools", "max_issues_repo_head_hexsha": "a3bc48179a1ed445e7f4232426dce8c1c28bb8e4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/telescope_status.py", "max_forks_repo_name": "StarkillerX42/ObserverTools", "max_forks_repo_head_hexsha": "a3bc48179a1ed445e7f4232426dce8c1c28bb8e4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8716216216, "max_line_length": 80, "alphanum_fraction": 0.6035034846, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1467}
|
from sklearn.ensemble import IsolationForest
class IsolationModel:
"""
Simple Isolation Model based on contamination
"""
def __init__(self, data):
self.normalized_data = (data - data.mean()) / data.std()
self.iso = IsolationForest(contamination=.001, behaviour='new')
self.iso.fit(self.normalized_data)
self.iso.predict(self.normalized_data)
def predict_outlier(self, data):
return self.iso.predict(data)
from models.isolation_model import IsolationModel
import backtrader as bt
import pandas as pd
import numpy as np
class IsolationStrategy(bt.Strategy):
'''
Explanation:
The isolation forest identifies what it deems to be anomalies,
overbought or oversold opportunities for entry. I append known data
after fitting the isolation forest for the next day, making it an
online unsupervised learningalgorithm.
Current Issue: Positioning, Sizing, Exposure
'''
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self, data):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataopen = self.datas[0].open
self.datahigh = self.datas[0].high
self.datalow = self.datas[0].low
self.dataclose = self.datas[0].close
self.datavolume = self.datas[0].volume
self.model_data = pd.read_csv(data)
self.buyOut = False
self.sellOut = False
self.orderPosition = 0
self.cooldown = 7
# This is the code that gets copied into the trading system
def next(self):
self.log(self.dataclose[0])
# Construct dataframe to predict
x = pd.DataFrame(
data=[[
self.dataopen[0], self.datahigh[0], self.datalow[0],
self.dataclose[0], self.datavolume[0]
]], columns='Open High Low Close Volume'.split()
)
# Create the model with all known data for normalization
model = IsolationModel(self.model_data)
# Append today's data for tomorrow's normalization
self.model_data = self.model_data.append(x, ignore_index=True)
# Dataframe to help normalize x
mean_to_normalize = pd.DataFrame(data=[[
np.mean(self.model_data['Open']), np.mean(self.model_data['High']),
np.mean(self.model_data['Low']), np.mean(self.model_data['Close']),
np.mean(self.model_data['Volume'])
]], columns='Open High Low Close Volume'.split())
# Dataframe to help normalize x
std_to_normalize = pd.DataFrame(data=[[
np.std(self.model_data['Open']), np.std(self.model_data['High']),
np.std(self.model_data['Low']), np.std(self.model_data['Close']),
np.std(self.model_data['Volume'])
]], columns='Open High Low Close Volume'.split())
# x is normalized as a parameter
normalized_x = (x - mean_to_normalize) / std_to_normalize
"""
# Write updated Data to CSV - To be included in the live system
self.model_data.to_csv('FB.csv', index=False)
"""
# Same but opposite conditions
if model.predict_outlier(normalized_x) == -1 & \
(self.dataclose[0] > np.mean(self.model_data['Close'])):
self.log('SELL CREATE, %.2f' % self.dataclose[0])
if not self.orderPosition == 0:
self.sell(size=1)
self.orderPosition -= 1
# Same but opposite conditions
if model.predict_outlier(normalized_x) == -1 & \
(self.dataclose[0] < np.mean(self.model_data['Close'])) & \
(self.cooldown == 0):
self.log('BUY CREATE, %.2f' % self.dataclose[0])
self.buy(size=1)
self.orderPosition += 1
self.cooldown = 7
if self.cooldown > 0:
self.cooldown -= 1
import backtrader as bt
import pyfolio as pf
def backtesting_engine(symbol, strategy, fromdate, todate, args=None):
"""
Primary function for backtesting, not entirely parameterized
"""
# Backtesting Engine
cerebro = bt.Cerebro()
# Add a Strategy if no Data Required for the model
if args is None:
cerebro.addstrategy(strategy)
# If the Strategy requires a Model and therefore data
elif args is not None:
cerebro.addstrategy(strategy, args)
# Retrieve Data from Alpaca
data = bt.feeds.YahooFinanceData(
dataname=symbol,
fromdate=fromdate, # datetime.date(2015, 1, 1)
todate=todate, # datetime.datetime(2016, 1, 1)
reverse=False
)
# Add Data to Backtesting Engine
cerebro.adddata(data)
# Set Initial Portfolio Value
cerebro.broker.setcash(100000.0)
# Add Analysis Tools
cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='sharpe')
cerebro.addanalyzer(bt.analyzers.Returns, _name='returns')
cerebro.addanalyzer(bt.analyzers.SQN, _name='sqn')
cerebro.addanalyzer(bt.analyzers.DrawDown, _name='drawdown')
cerebro.addanalyzer(bt.analyzers.PositionsValue, _name='posval')
cerebro.addanalyzer(bt.analyzers.PyFolio, _name='pyfolio')
# Starting Portfolio Value
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run the Backtesting Engine
backtest = cerebro.run()
# Print Analysis and Final Portfolio Value
print(
'Final Portfolio Value: %.2f' % cerebro.broker.getvalue()
)
print(
'Return: ', backtest[0].analyzers.returns.get_analysis()
)
print(
'Sharpe Ratio: ', backtest[0].analyzers.sharpe.get_analysis()
)
print(
'System Quality Number: ', backtest[0].analyzers.sqn.get_analysis()
)
print(
'Drawdown: ', backtest[0].analyzers.drawdown.get_analysis()
)
print(
'Active Position Value: ', backtest[0].analyzers.posval.get_analysis()
)
print(
'Pyfolio: ', backtest[0].analyzers.pyfolio.get_analysis()
)
# Print Analysis and Final Portfolio Value
pyfoliozer = backtest[0].analyzers.getbyname('pyfolio')
returns, positions, transactions, gross_lev = pyfoliozer.get_pf_items()
# See if we can add regular FB data to compare against returns of algo
pf.create_full_tear_sheet(
returns, positions=positions, transactions=transactions
)
# TODO: Create pipeline: Optimization -> Testing essentially
class BacktestingPipeline:
"""
Pipeline for in sample optimization and out of sample testing
"""
pass
from datetime import datetime
from strategies.isolation_strategy import IsolationStrategy
from tools.backtesting_tools import backtesting_engine
"""
Script for backtesting strategies
"""
if __name__ == '__main__':
# Run backtesting engine
backtesting_engine(
'TICKER', IsolationStrategy, args='DATA.csv',
fromdate=datetime(2018, 1, 1), todate=datetime(2019, 1, 1)
)
|
{"hexsha": "46e2ee302ce3bcbfb4d0ae20e434c27fbd450f5e", "size": 7128, "ext": "py", "lang": "Python", "max_stars_repo_path": "Machine_Learning/sklearn_trading_bot.py", "max_stars_repo_name": "vhn0912/Finance", "max_stars_repo_head_hexsha": "39cf49d4d778d322537531cee4ce3981cc9951f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 441, "max_stars_repo_stars_event_min_datetime": "2020-04-22T02:21:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:00:24.000Z", "max_issues_repo_path": "Machine_Learning/sklearn_trading_bot.py", "max_issues_repo_name": "happydasch/Finance", "max_issues_repo_head_hexsha": "4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-07-06T15:19:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-23T18:32:29.000Z", "max_forks_repo_path": "Machine_Learning/sklearn_trading_bot.py", "max_forks_repo_name": "happydasch/Finance", "max_forks_repo_head_hexsha": "4f6c5ea8f60fb0dc3b965ffb9628df83c2ecef35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 111, "max_forks_repo_forks_event_min_datetime": "2020-04-21T11:40:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T07:26:17.000Z", "avg_line_length": 33.6226415094, "max_line_length": 79, "alphanum_fraction": 0.6370650954, "include": true, "reason": "import numpy", "num_tokens": 1705}
|
import json
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from bambi.models import Model
from bambi.priors import Family, Prior, PriorFactory
from statsmodels.tools.sm_exceptions import PerfectSeparationError
@pytest.fixture(scope="module")
def diabetes_data():
data_dir = join(dirname(__file__), "data")
data = pd.read_csv(join(data_dir, "diabetes.txt"), sep="\t")
data["age_grp"] = 0
data.loc[data["AGE"] > 40, "age_grp"] = 1
data.loc[data["AGE"] > 60, "age_grp"] = 2
return data
def test_prior_class():
prior = Prior("CheeseWhiz", holes=0, taste=-10)
assert prior.name == "CheeseWhiz"
assert isinstance(prior.args, dict)
assert prior.args["taste"] == -10
prior.update(taste=-100, return_to_store=1)
assert prior.args["return_to_store"] == 1
def test_family_class():
prior = Prior("CheeseWhiz", holes=0, taste=-10)
family = Family("cheese", prior, link="ferment", parent="holes")
for name in ["name", "prior", "link", "parent"]:
assert hasattr(family, name)
def test_prior_factory_init_from_default_config():
pf = PriorFactory()
for d in ["dists", "terms", "families"]:
assert hasattr(pf, d)
assert isinstance(getattr(pf, d), dict)
assert "normal" in pf.dists
assert "common" in pf.terms
assert "gaussian" in pf.families
def test_prior_factory_init_from_config():
config_file = join(dirname(__file__), "data", "sample_priors.json")
pf = PriorFactory(config_file)
for d in ["dists", "terms", "families"]:
assert hasattr(pf, d)
assert isinstance(getattr(pf, d), dict)
config_dict = json.load(open(config_file, "r"))
pf = PriorFactory(config_dict)
for d in ["dists", "terms", "families"]:
assert hasattr(pf, d)
assert isinstance(getattr(pf, d), dict)
assert "feta" in pf.dists
assert "hard" in pf.families
assert "yellow" in pf.terms
pf = PriorFactory(dists=config_dict["dists"])
assert "feta" in pf.dists
pf = PriorFactory(terms=config_dict["terms"])
assert "yellow" in pf.terms
pf = PriorFactory(families=config_dict["families"])
assert "hard" in pf.families
def test_prior_retrieval():
config_file = join(dirname(__file__), "data", "sample_priors.json")
pf = PriorFactory(config_file)
prior = pf.get(dist="asiago")
assert prior.name == "Asiago"
assert isinstance(prior, Prior)
assert prior.args["hardness"] == 10
with pytest.raises(KeyError):
assert prior.args["holes"] == 4
family = pf.get(family="hard")
assert isinstance(family, Family)
assert family.link == "grate"
backup = family.prior.args["backup"]
assert isinstance(backup, Prior)
assert backup.args["flavor"] == 10000
prior = pf.get(term="yellow")
assert prior.name == "Swiss"
# Test exception raising
with pytest.raises(ValueError):
pf.get(dist="apple")
with pytest.raises(ValueError):
pf.get(term="banana")
with pytest.raises(ValueError):
pf.get(family="cantaloupe")
def test_auto_scale(diabetes_data):
# By default, should scale everything except custom Prior() objects
priors = {"S1": 0.3, "BP": Prior("Cauchy", alpha=1, beta=17.5)}
model = Model("BMI ~ S1 + S2 + BP", diabetes_data, priors=priors)
model.build(backend="pymc3")
p1 = model.terms["S1"].prior
p2 = model.terms["S2"].prior
p3 = model.terms["BP"].prior
assert p1.name == p2.name == "Normal"
assert 0 < p1.args["sigma"] < 1
assert p2.args["sigma"] > p1.args["sigma"]
assert p3.name == "Cauchy"
assert p3.args["beta"] == 17.5
# With auto_scale off, everything should be flat unless explicitly named in priors
model = Model("BMI ~ S1 + S2 + BP", diabetes_data, priors=priors, auto_scale=False)
model.build(backend="pymc3")
p1_off = model.terms["S1"].prior
p2_off = model.terms["S2"].prior
p3_off = model.terms["BP"].prior
assert p1_off.name == "Normal"
assert p2_off.name == "Flat"
assert 0 < p1_off.args["sigma"] < 1
assert "sigma" not in p2_off.args
assert p3_off.name == "Cauchy"
assert p3_off.args["beta"] == 17.5
def test_complete_separation():
data = pd.DataFrame({"y": [0] * 5 + [1] * 5, "g": ["a"] * 5 + ["b"] * 5})
with pytest.raises(PerfectSeparationError):
Model("y ~ g", data, family="bernoulli").fit()
# No error is raised
priors = {"common": Prior("Normal", mu=0, sigma=10)}
Model("y ~ g", data, family="bernoulli", priors=priors).fit()
def test_response_prior():
data = pd.DataFrame({"y": np.random.randint(3, 10, size=50), "x": np.random.normal(size=50)})
priors = {"sigma": Prior("Uniform", lower=0, upper=50)}
model = Model("y ~ x", data, priors=priors)
assert model.response.prior.args["sigma"] == priors["sigma"]
priors = {"alpha": Prior("Uniform", lower=1, upper=20)}
model = Model("y ~ x", data, family="negativebinomial", priors=priors)
assert model.response.prior.args["alpha"] == priors["alpha"]
priors = {"alpha": Prior("Uniform", lower=0, upper=50)}
model = Model("y ~ x", data, family="gamma", priors=priors)
assert model.response.prior.args["alpha"] == Prior("Uniform", lower=0, upper=50)
priors = {"alpha": Prior("Uniform", lower=0, upper=50)}
model = Model("y ~ x", data, family="gamma", priors=priors)
assert model.response.prior.args["alpha"] == Prior("Uniform", lower=0, upper=50)
def test_set_response_prior():
data = pd.DataFrame({"y": np.random.randint(3, 10, size=50), "x": np.random.normal(size=50)})
priors = {"sigma": Prior("Uniform", lower=0, upper=50)}
model = Model("y ~ x", data)
model.set_priors(priors)
assert model.response.prior.args["sigma"] == Prior("Uniform", lower=0, upper=50)
priors = {"alpha": Prior("Uniform", lower=1, upper=20)}
model = Model("y ~ x", data, family="negativebinomial")
model.set_priors(priors)
assert model.response.prior.args["alpha"] == Prior("Uniform", lower=1, upper=20)
priors = {"alpha": Prior("Uniform", lower=0, upper=50)}
model = Model("y ~ x", data, family="gamma")
model.set_priors(priors)
assert model.response.prior.args["alpha"] == Prior("Uniform", lower=0, upper=50)
def test_response_prior_fail():
data = pd.DataFrame(
{"y": np.random.randint(3, 10, size=50), "sigma": np.random.normal(size=50)}
)
priors = {"sigma": Prior("Uniform", lower=0, upper=50)}
with pytest.raises(ValueError):
Model("y ~ sigma", data, priors=priors)
data.rename(columns={"sigma": "alpha"}, inplace=True)
priors = {"alpha": Prior("Uniform", lower=0, upper=50)}
with pytest.raises(ValueError):
Model("y ~ alpha", data, family="negativebinomial", priors=priors)
with pytest.raises(ValueError):
Model("y ~ alpha", data, family="gamma", priors=priors)
|
{"hexsha": "ff420858a3f3e43c781d9d5c01d1718ec46046cd", "size": 6915, "ext": "py", "lang": "Python", "max_stars_repo_path": "bambi/tests/test_priors.py", "max_stars_repo_name": "Maruff/bambi", "max_stars_repo_head_hexsha": "f38fafb04af7e1eabbcd3d6779aa6c7560c775e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bambi/tests/test_priors.py", "max_issues_repo_name": "Maruff/bambi", "max_issues_repo_head_hexsha": "f38fafb04af7e1eabbcd3d6779aa6c7560c775e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bambi/tests/test_priors.py", "max_forks_repo_name": "Maruff/bambi", "max_forks_repo_head_hexsha": "f38fafb04af7e1eabbcd3d6779aa6c7560c775e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6443298969, "max_line_length": 97, "alphanum_fraction": 0.6458423717, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1961}
|
[STATEMENT]
lemma (in field) feval_eq0:
assumes "in_carrier xs"
and "fnorm e = (n, d, c)"
and "nonzero xs c"
and "peval xs n = \<zero>"
shows "feval xs e = \<zero>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. feval xs e = \<zero>
[PROOF STEP]
using assms fnorm_correct [of xs e]
[PROOF STATE]
proof (prove)
using this:
in_carrier xs
fnorm e = (n, d, c)
nonzero xs c
peval xs n = \<zero>
\<lbrakk>in_carrier xs; nonzero xs (Cond e)\<rbrakk> \<Longrightarrow> feval xs e = peval xs (Num e) \<oslash> peval xs (Denom e)
\<lbrakk>in_carrier xs; nonzero xs (Cond e)\<rbrakk> \<Longrightarrow> peval xs (Denom e) \<noteq> \<zero>
goal (1 subgoal):
1. feval xs e = \<zero>
[PROOF STEP]
by simp
|
{"llama_tokens": 301, "file": null, "length": 2}
|
import cv2 as cv
import numpy as np
from ch7.pose_estimation_2d2d import find_feature_matches, pose_estimation_2d2d, pixel2cam
K = np.array([[520.9, 0, 325.1],
[0, 521.0, 249.7],
[0, 0, 1]])
def triangulation(kp_1, kp_2, ms, r_mat, t_vec):
T1 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]])
T2 = np.concatenate((r_mat, t_vec), axis=1)
pts_1 = np.array([pixel2cam(kp_1[match.queryIdx].pt, K) for match in ms]).squeeze().transpose()
pts_2 = np.array([pixel2cam(kp_2[match.trainIdx].pt, K) for match in ms]).squeeze().transpose()
pts_4d = cv.triangulatePoints(T1, T2, pts_1, pts_2)
points = pts_4d[:3, :] / pts_4d[3, :]
return points.transpose()
if __name__ == '__main__':
img_1 = cv.imread('1.png')
img_2 = cv.imread('2.png')
key_points_1, key_points_2, matches = find_feature_matches(img_1, img_2)
print('一共找到了', len(matches), '组匹配点')
R, t, E = pose_estimation_2d2d(key_points_1, key_points_2, matches)
points = triangulation(key_points_1, key_points_2, matches, R, t)
for match, point in zip(matches, points):
print('-------------------------------------------------')
pt1_cam = pixel2cam(key_points_1[match.queryIdx].pt, K)
pt1_cam_3d = [point[0] / point[2], point[1] / point[2]]
print('point in the first camera frame: ', pt1_cam.transpose().squeeze())
print('point projected from 3D ', pt1_cam_3d, ', d=', point[2])
pt2_cam = pixel2cam(key_points_2[match.trainIdx].pt, K)
pt2_trans = np.matmul(R, point[:, np.newaxis]) + t
pt2_trans = pt2_trans / pt2_trans[2, 0]
print('point in the second camera frame: ', pt2_cam.transpose().squeeze())
print('point reprojected from second frame: ', pt2_trans.transpose().squeeze())
|
{"hexsha": "f79b81cbe513736552a389416400ad3b65c2731a", "size": 1840, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch7/triangulation.py", "max_stars_repo_name": "hujianhang2996/slambook_python", "max_stars_repo_head_hexsha": "26eabfe5a8d6f3e534452f6ccf5b43af838ffc8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-10-11T03:23:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T21:06:23.000Z", "max_issues_repo_path": "ch7/triangulation.py", "max_issues_repo_name": "hujianhang2996/slambook_python", "max_issues_repo_head_hexsha": "26eabfe5a8d6f3e534452f6ccf5b43af838ffc8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-05-13T04:15:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-14T13:35:04.000Z", "max_forks_repo_path": "ch7/triangulation.py", "max_forks_repo_name": "hujianhang2996/slambook_python", "max_forks_repo_head_hexsha": "26eabfe5a8d6f3e534452f6ccf5b43af838ffc8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-27T05:31:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T05:31:01.000Z", "avg_line_length": 40.8888888889, "max_line_length": 99, "alphanum_fraction": 0.6081521739, "include": true, "reason": "import numpy", "num_tokens": 600}
|
[STATEMENT]
lemma possible_steps_0:
"length i = 1 \<Longrightarrow>
possible_steps drinks 0 r (STR ''select'') i = {|(1, select)|}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length i = 1 \<Longrightarrow> possible_steps drinks 0 r STR ''select'' i = {|(1, select)|}
[PROOF STEP]
apply (simp add: possible_steps_singleton drinks_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length i = 1 \<Longrightarrow> {((origin, dest), t). (origin = 0 \<and> dest = 1 \<and> t = select \<or> origin = 1 \<and> dest = 1 \<and> t = coin \<or> origin = 1 \<and> dest = 1 \<and> t = vend_fail \<or> origin = 1 \<and> dest = 2 \<and> t = vend) \<and> origin = 0 \<and> Label t = STR ''select'' \<and> Arity t = 1 \<and> apply_guards (Guards t) (join_ir i r)} = {((0, 1), select)}
[PROOF STEP]
apply safe
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<And>a b ba. \<lbrakk>length i = 1; ((0, 1), coin) \<notin> {}; Label coin = STR ''select''; Arity coin = 1; apply_guards (Guards coin) (join_ir i r); 0 = 1\<rbrakk> \<Longrightarrow> coin = select
2. \<And>a b ba. \<lbrakk>length i = 1; ((0, 1), vend_fail) \<notin> {}; Label vend_fail = STR ''select''; Arity vend_fail = 1; apply_guards (Guards vend_fail) (join_ir i r); 0 = 1\<rbrakk> \<Longrightarrow> vend_fail = select
3. \<And>a b ba. \<lbrakk>length i = 1; ((0, 2), vend) \<notin> {}; Label vend = STR ''select''; Arity vend = 1; apply_guards (Guards vend) (join_ir i r); 0 = 1\<rbrakk> \<Longrightarrow> 2 = 1
4. \<And>a b ba. \<lbrakk>length i = 1; ((0, 2), vend) \<notin> {}; Label vend = STR ''select''; Arity vend = 1; apply_guards (Guards vend) (join_ir i r); 0 = 1\<rbrakk> \<Longrightarrow> vend = select
5. \<And>a b ba. length i = 1 \<Longrightarrow> Label select = STR ''select''
6. \<And>a b ba. length i = 1 \<Longrightarrow> Arity select = 1
7. \<And>a b ba. length i = 1 \<Longrightarrow> apply_guards (Guards select) (join_ir i r)
[PROOF STEP]
by (simp_all add: transitions apply_guards_def)
|
{"llama_tokens": 783, "file": "Extended_Finite_State_Machines_examples_Drinks_Machine", "length": 3}
|
[STATEMENT]
lemma realrel_in_real [simp]: "realrel``{(x,y)} \<in> Real"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Dedekind_Real.realrel `` {(x, y)} \<in> Real
[PROOF STEP]
by (simp add: Real_def realrel_def quotient_def, blast)
|
{"llama_tokens": 102, "file": "Dedekind_Real_Dedekind_Real", "length": 1}
|
# Import packages
import numpy as np
import pandas as pd
import os ##for directory
import sys
import pprint
# set the directory
os.chdir('/Users/luho/PycharmProjects/model/model_correction/code')
sys.path.append(r"/Users/luho/PycharmProjects/model/cobrapy/code")
pprint.pprint(sys.path)
# import self function
from mainFunction import *
#input the data
compartment = pd.read_excel('../data/Compartment.xlsx')
compartment_SGD = pd.read_excel('../data/Protein location SGD.xlsx')
# gene compartment annotation for SGD database
"""input the compartment annotation from SGD"""
compartment_SGD['go_type'] = compartment_SGD['go_type'].fillna('NA')
compartment_SGD0 = compartment_SGD[compartment_SGD['go_type'].str.contains("component")]
"""refine the compartment annotation from SGD"""
unique_cp_SGD = compartment_SGD0['go_term'].unique()
unique_cp_SGD = unique_cp_SGD.tolist()
unique_cp_SGD = pd.DataFrame(unique_cp_SGD)
unique_cp_SGD.columns = ['compartment']
unique_cp_SGD.columns = unique_cp_SGD.columns.str.strip()
"""standardize location information from SGD"""
ss0 = len(unique_cp_SGD)
similarTarget0 = getSimilarTarget(compartment[['description']],unique_cp_SGD[['compartment']],ss=ss0)
unique_cp_SGD['similar_target'] = similarTarget0
unique_cp_SGD.head()
"""save the standard location information from uniprot for manual check"""
writer = pd.ExcelWriter('../data/unique_compartment_SGD.xlsx')
unique_cp_SGD.to_excel(writer,'Sheet1')
writer.save()
"""replace the compartment information using standard name from model"""
xls = pd.ExcelFile('../data/Compartment.xlsx')
sgdChange = pd.read_excel(xls, 'Sheet2')
sgdChange['SGD'] = sgdChange['SGD'].str.strip()
sgdChange['model_name'] = sgdChange['model_name'].str.strip()
#for the 'membrane', we change it from "not sure" type into "membrane", which will be useful to assign the gene for these transport reactions
for i in range(len(sgdChange['SGD'])):
print(i)
if sgdChange.loc[i,'SGD'] == 'membrane':
sgdChange.loc[i,'model_name'] ='membrane'
else:
sgdChange.loc[i, 'model_name'] = sgdChange.loc[i, 'model_name']
compartment_SGD0['compartment'] = singleMapping(sgdChange['model_name'],sgdChange['SGD'],compartment_SGD0['go_term'])
gene_compartmentSGD = pd.DataFrame({
'gene': compartment_SGD0['systematic_name'].str.strip().unique()
})
gene_compartmentSGD['compartment'] = multiMapping(compartment_SGD0['compartment'],compartment_SGD0['systematic_name'],gene_compartmentSGD['gene'])
# gene compartment annotation for uniprot database
"""input the gene annotation from uniprot database"""
xls = pd.ExcelFile('../data/uniprot_location.xlsx')
uniprot_location = pd.read_excel(xls, 'Sheet1')
uniprot_location['Subcellular location [CC]'] = uniprot_location['Subcellular location [CC]'].str.replace('SUBCELLULAR LOCATION: ','')
uniprot_location['Subcellular location [CC]'] = uniprot_location['Subcellular location [CC]'].str.replace(';',',')
uniprot_location['Subcellular location [CC]'] = uniprot_location['Subcellular location [CC]'].str.replace('.',',')
uniprot_location['Subcellular location [CC]'] = uniprot_location['Subcellular location [CC]'].str.replace(r'\{.+?\}','')
uniprot_location0 = uniprot_location.iloc[:,1].str.split('Note=', expand=True)
uniprot_location['location'] = uniprot_location0.iloc[:,0]
uniprot_location1 = splitAndCombine(uniprot_location['location'],uniprot_location['gene'],sep0=",")
uniprot_location2 = uniprot_location1[uniprot_location1.V2 != '']
uniprot_location2 = uniprot_location2[uniprot_location2.V2 != ' ']
compartment_uniprot = uniprot_location2[uniprot_location2.V2 != 'NA']
compartment_uniprot.columns = ['gene','compartment']
compartment_uniprot.gene = compartment_uniprot.gene.str.strip()
compartment_uniprot.compartment = compartment_uniprot.compartment.str.strip()
unique_compartment_uniprot = pd.Series(uniprot_location2.V2.unique())
unique_compartment_uniprot = unique_compartment_uniprot.str.strip()
unique_compartment_uniprot = unique_compartment_uniprot.unique()
unique_compartment_uniprot = pd.DataFrame(unique_compartment_uniprot)
unique_compartment_uniprot.columns = ['compartment']
"""standardize location information from uniprot database"""
ss0 = len(unique_compartment_uniprot)
similarTarget0 = getSimilarTarget(compartment[['description']],unique_compartment_uniprot[['compartment']],ss=ss0)
unique_compartment_uniprot['similar_target'] = similarTarget0
unique_compartment_uniprot.head()
"""save the standard location information from uniprot for manual check"""
writer = pd.ExcelWriter('../data/unique_compartment_uniprot.xlsx')
unique_compartment_uniprot.to_excel(writer,'Sheet1')
writer.save()
"""replace the compartment information using standard name from model for uniprot gene location annotation"""
xls = pd.ExcelFile('../data/Compartment.xlsx')
uniChange = pd.read_excel(xls, 'Sheet3')
uniChange['uniprot'] = uniChange['uniprot'].str.strip()
uniChange['model_name'] = uniChange['model_name'].str.strip()
#for the 'membrane', we change it from "not sure" type into "membrane", which will be useful to assign the gene for these transport reactions
for i in range(len(uniChange['uniprot'])):
print(i)
if uniChange.loc[i,'uniprot'] == 'Membrane':
uniChange.loc[i,'model_name'] ='membrane'
else:
uniChange.loc[i, 'model_name'] = uniChange.loc[i, 'model_name']
compartment_uniprot['model_name'] = singleMapping(uniChange['model_name'],uniChange['uniprot'],compartment_uniprot['compartment'])
gene_compartmentUNI = pd.DataFrame({
'gene': compartment_uniprot['gene'].str.strip().unique()
})
gene_compartmentUNI['compartment'] = multiMapping(compartment_uniprot['model_name'],compartment_uniprot['gene'],gene_compartmentUNI['gene'])
saveExcel(gene_compartmentSGD,'../result/gene_compartmentSGD_updated_october.xlsx')
saveExcel(gene_compartmentUNI,'../result/gene_compartmentUNI_updated_october.xlsx')
|
{"hexsha": "86fe87738869cc83a957f99966cc021029530351", "size": 5910, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_correction/code/compartment_collection from uniprot and sgd.py", "max_stars_repo_name": "hongzhonglu/yeast-model-update", "max_stars_repo_head_hexsha": "0268d72320caa61a84c4e11634700cb51ffa9009", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-10T07:23:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T15:27:42.000Z", "max_issues_repo_path": "model_correction/code/compartment_collection from uniprot and sgd.py", "max_issues_repo_name": "hongzhonglu/yeast-model-update", "max_issues_repo_head_hexsha": "0268d72320caa61a84c4e11634700cb51ffa9009", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_correction/code/compartment_collection from uniprot and sgd.py", "max_forks_repo_name": "hongzhonglu/yeast-model-update", "max_forks_repo_head_hexsha": "0268d72320caa61a84c4e11634700cb51ffa9009", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-06-05T09:11:51.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-05T09:11:51.000Z", "avg_line_length": 44.7727272727, "max_line_length": 146, "alphanum_fraction": 0.7727580372, "include": true, "reason": "import numpy", "num_tokens": 1524}
|
# -*- coding: utf-8 -*-
"""
@author: ibackus
"""
# External packages
from matplotlib.colors import LogNorm
from matplotlib.cm import get_cmap
import numpy as np
import pynbody as pb
SimArray = pb.array.SimArray
import os
# Internal modules
import cubehelix
import ffmpeg_writer
import pbmov_utils
# setup colormaps
ch=cubehelix.cmap()
cx4 = cubehelix.cmap(reverse=False, start=0., rot=0.5) #mostly reds
cx3 = cubehelix.cmap(reverse=False, start=0.3, rot=-0.5)# mostly blues
cx_default = ch
def render_movie(sim, cameras, targets, nt, vmin=None, vmax=None, camera_rot=0.0,\
res=500, cmap=cx_default, fps=25, savename='movie.mp4', preview=None, nskip=0, **kwargs):
"""
Renders a movie.
**STATIC ARGUMENTS**
sim : snapshot
A pynbody snapshot
nt : int
Numer of frames
res : int
Frame resolution. All frames are square (ie, resolution = res x res)
cmap : matplotlib colormap or str
Colormap to use
fps : int
Frames per second
savename : str
Filename to save movie to
preview : None or int
If int, then frame number preview is plotted in a window instead of
rendering the whole movie
If None (DEFAULT), movie is rendered
nskip : int
(buggy still) Number of frames to skip. Can be used to render every
nth frame
**kwargs
Additional keyword arguments to pass to the pynbody renderer
(see pynbody.plot.sph.image)
**ANIMATEABLE ARGUMENTS**
NOTE: All the animateable arguments can be passed as static if wanted.
e.g., if ONE camera position is passed, then the camera will be treated
as static.
IF a value is to be animated, it should be provided for every frame
cameras : array
Camera x,y,z position(s). shape (nt, 3) if animated
targets : array
Target x,y,z position(s). shape (nt, 3) if animated
vmin, vmax : float or array
min/max values for color map
camera_rot : float or array
Rotation of the camera (in radians) around the axis connecting camera
and the target.
"""
# Make multiples of animated values if needed. This ensures the values
# are all defined at every frame
cameras = repeat_val(cameras, nt, 1)
targets = repeat_val(targets, nt, 1)
vmin = repeat_val(vmin, nt, 0)
vmax = repeat_val(vmax, nt, 0)
camera_rot = repeat_val(camera_rot, nt, 0)
if preview is not None:
# Just preview a frame and end
i = preview
render_frame(sim, cameras[i], targets[i], vmin=vmin[i], vmax=vmax[i], \
camera_rot=camera_rot[i], res=500, cmap=cmap, preview=True, **kwargs)
return
# we'll be making a video. Initialize a video_writer object
video_writer = ffmpeg_writer.FFMPEG_VideoWriter(savename, (res,res), fps)
# Copy initial positions
pos0 = sim['pos'].copy()
# setup frames to render
irange = range(nt)
if nskip != 0:
irange = irange[0::nskip]
fps = max(int(fps/float(nskip)+0.5), 1)
# Loop through frames and render
for i in irange:
print '\n{} of {}\n'.format(i+1, nt)
# render frame
color_im = render_frame(sim, cameras[i], targets[i], pos0=pos0,\
vmin=vmin[i], vmax=vmax[i], camera_rot=camera_rot[i], res=res, cmap=cmap,\
revert_sim_pos=False, **kwargs)
# write to video
video_writer.write_frame(color_im)
# Finalize
video_writer.close()
sim['pos'] = pos0
def repeat_val(x, nt, single_val_dim=0):
"""
Repeats x for every frame if x is constant. If there are multiple values
of x, x is returned (unchanged)
**ARGUMENTS**
x : array or number
array/number to check
nt : int
number of time steps
single_val_dim : int
Number of dimensions a single value would have. 0 for a float, 1
for a 1D array, 2 for a 2D array, etc...
**RETURNS**
x : array
array which is just repeated values of x if x is constant OR x (unchanged)
if x has multiple values
"""
# Check that values has more more dimensions than a single value would
ndim = np.ndim(x)
if ndim - single_val_dim > 1:
raise ValueError, 'x has too many dimensions. At most it can have single_val_dim + 1'
if np.ndim(x) > single_val_dim:
# Check that there is more than 1 value
nx = len(x)
if nx > 1:
# There are multiple values. Make sure there are nt of them
if nx != nt:
raise ValueError, 'x has multiple entries, but not nt of them'
else:
# x has only a single value but an extra dimension.
x = x[0]
else:
# There's only one entry
nx = 1
if nx == 1:
# if there's only one entry
# Copy x, nt times
x_list = []
for i in range(nt):
x_list.append(x)
# make an array
x = np.array(x_list)
return x
|
{"hexsha": "51dd0b0affbff99c8b74b100ca9bd038f56b944a", "size": 5245, "ext": "py", "lang": "Python", "max_stars_repo_path": "pbmov.py", "max_stars_repo_name": "ibackus/pbmov", "max_stars_repo_head_hexsha": "2903ebfd9b9755e1549e0e58a314fc1a09d173d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pbmov.py", "max_issues_repo_name": "ibackus/pbmov", "max_issues_repo_head_hexsha": "2903ebfd9b9755e1549e0e58a314fc1a09d173d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2015-11-18T20:52:59.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-16T02:15:29.000Z", "max_forks_repo_path": "pbmov.py", "max_forks_repo_name": "ibackus/pbmov", "max_forks_repo_head_hexsha": "2903ebfd9b9755e1549e0e58a314fc1a09d173d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1436781609, "max_line_length": 93, "alphanum_fraction": 0.5958055291, "include": true, "reason": "import numpy", "num_tokens": 1365}
|
import datetime as dt
import logging
import os
from random import uniform, randint, sample
from time import perf_counter
import importlib.util
import time
import numpy as np
import pandas as pd
from mesa import Model
from mesa.datacollection import DataCollector
import pickle
from elecsim.plants.fuel.capacity_factor.capacity_factor_calculations import (
get_capacity_factor,
)
from elecsim.agents.demand.demand import Demand
from elecsim.agents.demand.multi_day_demand import MultiDayDemand
from elecsim.agents.generation_company.gen_co import GenCo
from elecsim.constants import ROOT_DIR
from elecsim.market.electricity.market.power_exchange import PowerExchange
from elecsim.mesa_addons.scheduler_addon import OrderedActivation
from elecsim.plants.plant_costs.estimate_costs.estimate_costs import create_power_plant
from elecsim.plants.plant_type.fuel_plant import FuelPlant
import elecsim.data_manipulation.data_modifications.scenario_modifier as scen_mod
from elecsim.role.market.latest_market_data import LatestMarketData
import elecsim.scenario.scenario_data
import os
logger = logging.getLogger(__name__)
"""Model.py: Model for the electricity landscape world"""
__author__ = "Alexander Kell"
__copyright__ = "Copyright 2018, Alexander Kell"
__license__ = "MIT"
__email__ = "Alexander@Kell.es"
class World(Model):
"""
Model for the electricity landscape world
"""
def __init__(
self,
initialization_year,
scenario_file=None,
fitting_params=None,
long_term_fitting_params=None,
future_price_uncertainty_m=None,
future_price_uncertainty_c=None,
carbon_price_scenario=None,
demand_change=None,
demand_distribution=None,
number_of_steps=8,
total_demand=None,
number_of_agents=None,
market_time_splices=8,
data_folder="ElecSim_Output",
time_run=False,
nuclear_subsidy=None,
highest_demand=None,
log_level="warning",
client_rl=None,
distribution_name=None,
dropbox=None,
gencos_rl=[],
write_data_to_file=True,
rl_port_number=9920,
):
"""
Initialize an electricity market in a particular country. Provides the ability to change scenarios from this constructor.
:param int initialization_year: Year to begin simulation.
:param list: float carbon_price_scenario: Scenario containing carbon price for each year of simulation.
:param list: float demand_change: Scenario containing change in demand between each year of simulation.
:param int number_of_steps: Total number of years to run scenario.
:param int total_demand: Total size of country's demand.
:param str data_folder: Directory and folder to save run data to
:param bool time_run:
:param str log_level:
"""
self.start = perf_counter()
logger.info("start: {}".format(self.start))
# Set up model objects
self.year_number = initialization_year
self.years_from_start = 0
self.step_number = 0
self.unique_id_generator = 0
self.time_run = time_run
self.max_number_of_steps = number_of_steps
self.average_electricity_price = 0
self.market_time_splices = market_time_splices
self.nuclear_subsidy = nuclear_subsidy
self.dropbox = dropbox
self.gencos_rl = gencos_rl
self.write_data_to_file = write_data_to_file
self.set_log_level(log_level)
self.scenario_file = scenario_file
self.overwrite_scenario_file(scenario_file)
self.override_highest_demand(highest_demand)
self.override_carbon_scenario(carbon_price_scenario)
self.override_demand_change(demand_change)
self.demand_distribution_uncertainty = demand_distribution
self.distribution_name = distribution_name
self.override_total_demand(total_demand, number_of_agents)
self.schedule = OrderedActivation(self)
# Import company data including financials and plant data
plant_data = elecsim.scenario.scenario_data.power_plants
financial_data = elecsim.scenario.scenario_data.company_financials
if self.gencos_rl:
self.bidding_client = PolicyClient(
"http://127.0.0.1:{}".format(rl_port_number)
)
# Initialize generation companies using financial and plant data
self.initialize_gencos(financial_data, plant_data, gencos_rl)
self.last_added_plant = None
self.last_added_plant_bids = None
# Create PowerExchange
if self.market_time_splices == 1:
self.PowerExchange = PowerExchange(self, demand_distribution)
self.demand = Demand(
self,
self.unique_id_generator,
elecsim.scenario.scenario_data.segment_time,
elecsim.scenario.scenario_data.segment_demand_diff,
)
elif self.market_time_splices > 1:
self.PowerExchange = PowerExchange(self, demand_distribution)
# self.PowerExchange = HighTemporalExchange(self)
self.demand = MultiDayDemand(
self,
self.unique_id_generator,
elecsim.scenario.scenario_data.multi_year_data,
)
else:
raise ValueError("market_time_splices must be equal to or larger than 1.")
self.running = True
self.beginning_of_year = False
self.continue_investing = 0
self.over_invested = False
self.unique_id_generator += 1
self.schedule.add(self.demand)
self.create_data_loggers(data_folder)
self.client = client_rl
if (
elecsim.scenario.scenario_data.investment_mechanism == "RL"
and self.client is not None
and self.gencos_rl
):
# self.client = PolicyClient("http://rllibserver:9900")
self.eid = self.client.start_episode(training_enabled=True)
self.intial_obs = LatestMarketData(self).get_RL_investment_observations()
# logger.info("self.intial_obs: {}".format(self.intial_obs))
elif elecsim.scenario.scenario_data.investment_mechanism == "future_price_fit":
self.future_price_uncertainty_m = future_price_uncertainty_m
self.future_price_uncertainty_c = future_price_uncertainty_c
if fitting_params is not None:
self.fitting_params = fitting_params
elif long_term_fitting_params is not None:
self.long_term_fitting_params = long_term_fitting_params
self.fitting_params = None
else:
raise ValueError(
"If using future_price_fit you must enter a value for long_term_fitting_params or fitting_params in the constructor of World"
)
def step(self, carbon_price=None):
"""Advance model by one step"""
self.beginning_of_year = False
if self.step_number % self.market_time_splices == 0:
self.start = time.perf_counter()
self.operate_constructed_plants()
if self.step_number != 0:
self.year_number += 1
self.years_from_start += 1
# self.operate_constructed_plants()
self.beginning_of_year = True
# logger.info("year: {}".format(self.year_number))
print("{}:".format(self.year_number), end="", flush=True)
else:
print("{}:".format(self.year_number), end="", flush=True)
obs = self.schedule.step()
self.operate_constructed_plants()
if self.over_invested:
return self.datacollector.get_model_vars_dataframe(), self.over_invested
self.continue_investing = 0
if carbon_price is not None:
elecsim.scenario.scenario_data.carbon_price_scenario[
self.year_number + 1
] = carbon_price
else:
elecsim.scenario.scenario_data.carbon_price_scenario = (
elecsim.scenario.scenario_data.carbon_price_scenario
)
if self.beginning_of_year:
self.dismantle_old_plants()
self.dismantle_unprofitable_plants()
self.average_electricity_price = self.PowerExchange.tender_bids(
self.demand.segment_hours, self.demand.segment_consumption
).accepted_price.mean()
self.PowerExchange.price_duration_curve = []
carbon_emitted = self.get_carbon_emitted(self)
self.settle_gencos_financials()
self.datacollector.collect(self)
self.delete_old_bids()
self.step_number += 1
print(".", end="", flush=True)
if self.write_data_to_file:
self.write_scenario_data()
if isinstance(self.average_electricity_price, np.ndarray):
self.average_electricity_price = self.average_electricity_price[0]
if self.step_number % self.market_time_splices == 0:
end = time.perf_counter()
print("time taken: {}".format(end - self.start))
# get_capacity_factor.cache_clear()
if (
self.step_number == self.max_number_of_steps
and elecsim.scenario.scenario_data.investment_mechanism == "RL"
and self.gencos_rl
):
obs = LatestMarketData(self).get_RL_investment_observations()
self.client.end_episode(self.eid, observation=obs)
del self.client
logger.debug(self.datacollector.get_model_vars_dataframe())
return abs(self.average_electricity_price), abs(carbon_emitted)
# return self.datacollector.get_model_vars_dataframe(), self.over_invested
def initialize_gencos(self, financial_data, plant_data, gencos_rl):
"""
Creates generation company agents based on financial data and power plants owned. Estimates cost parameters
of each power plant if data not for power plant not available.
:param financial_data: Data containing information about generation company's financial status
:param plant_data: Data containing information about generation company's plants owned, start year and name.
"""
financial_data = pd.merge(financial_data, plant_data, on="Company", how="inner")
financial_data = financial_data[["Company", "cash_in_bank"]]
# Initialising generator company data
financial_data.cash_in_bank = financial_data.cash_in_bank.replace("nan", np.nan)
financial_data.cash_in_bank = financial_data.cash_in_bank.fillna(0)
companies_groups = plant_data.groupby("Company")
company_financials = financial_data.groupby("Company")
logger.info("Initialising generation companies with their power plants.")
# Initialize generation companies with their respective power plants
for gen_id, ((name, data), (_, financials)) in enumerate(
zip(companies_groups, company_financials), 0
):
rl_bidding = False
if financials.Company.iloc[0] != name:
raise ValueError(
"Company financials name ({}) and agent name ({}) do not match.".format(
financials.Company.iloc[0], name
)
)
elif financials.Company.iloc[0] in gencos_rl:
rl_bidding = True
gen_co = GenCo(
unique_id=gen_id,
model=self,
difference_in_discount_rate=round(uniform(-0.03, 0.03), 3),
look_back_period=randint(3, 7),
name=name,
money=financials.cash_in_bank.iloc[0],
rl_bidding=rl_bidding,
)
self.unique_id_generator += 1
# Add power plants to generation company portfolio
# parent_directory = os.path.dirname(os.getcwd())
pickle_directory = (
"{}/../elecsim/data/processed/pickled_data/power_plants/".format(
ROOT_DIR
)
)
for plant in data.itertuples():
try:
power_plant = pickle.load(
open(
"{}{}-{}.pickle".format(
pickle_directory, plant.Name, plant.Start_date
),
"rb",
)
)
except (OSError, IOError, FileNotFoundError, EOFError) as e:
logger.info("plant: {}".format(plant))
power_plant = create_power_plant(
plant.Name,
plant.Start_date,
plant.Simplified_Type,
plant.Capacity,
)
pickle.dump(
power_plant,
open(
"{}{}-{}.pickle".format(
pickle_directory, plant.Name, plant.Start_date
),
"wb",
),
)
gen_co.plants.append(power_plant)
logger.debug("Adding generation company: {}".format(gen_co.name))
self.schedule.add(gen_co)
logger.info("Added generation companies.")
def get_running_plants(self, plants):
for plant in plants:
# if plant.name in elecsim.scenario.scenario_data.known_plant_retirements:
if (
plant.construction_year <= 1990
and plant.name != "invested_plant"
and plant.name
not in elecsim.scenario.scenario_data.known_plant_retirements
):
# Reset old plants that have been modernised with new construction year
plant.construction_year = randint(
self.year_number - 15, self.year_number
)
# yield plant
elif plant.name in elecsim.scenario.scenario_data.known_plant_retirements:
plant.construction_year = (
elecsim.scenario.scenario_data.known_plant_retirements[plant.name]
- (
plant.operating_period
+ plant.construction_period
+ plant.pre_dev_period
)
- 1
)
logger.info(
"plant.name: {}, plant.construction_year: {}".format(
plant.name, plant.construction_year
)
)
if (
plant.construction_year
+ plant.operating_period
+ plant.construction_period
+ plant.pre_dev_period
>= self.year_number
):
yield plant
else:
logger.debug(
"Taking the plant '{}' out of service, year of construction: {}".format(
plant.name, plant.construction_year
)
)
def dismantle_old_plants(self):
"""
Remove plants that are past their lifetime agent from each agent from their plant list
"""
gencos = self.get_gencos()
for genco in gencos:
plants_filtered = list(self.get_running_plants(genco.plants))
genco.plants = plants_filtered
def dismantle_unprofitable_plants(self):
gencos = self.get_gencos()
for genco in gencos:
profitable_plants = list(self.filter_plants_with_no_income(genco.plants))
genco.plants = profitable_plants
def filter_plants_with_no_income(self, plants):
for plant in plants:
if (self.year_number > 7) and (
plant.get_year_of_operation() + 7 < self.year_number
):
historic_bids = plant.historical_bids
# logger.info("historic_bids {}".format(historic_bids))
# years_to_look_into = list(range(self.year_number,self.year_number-7,-1))
seven_years_previous = self.year_number - 7
if historic_bids:
if historic_bids[-1].year_of_bid > seven_years_previous:
yield plant
else:
logger.debug(
"Plant {}, type {} is unprofitable. Last accepted bid: {}".format(
plant.name,
plant.plant_type,
historic_bids[-1].year_of_bid,
)
)
for bid in plant.accepted_bids:
del bid
else:
logger.debug(
"Plant {}, type {} is unprofitable.".format(
plant.name, plant.plant_type
)
)
for bid in plant.accepted_bids:
del bid
# bids_to_check = list(filter(lambda x: x.year_of_bid in years_to_look_into, historic_bids))
# total_income_in_previous_years = sum(bid.price_per_mwh for bid in bids_to_check)
# for bids in reversed(historic_bids):
# logger.info("bids.year_of_bid: {}".format(bids.year_of_bid))
# if total_income_in_previous_years > 0:
# yield plant
# else:
# logger.debug("Taking plant: {} out of service.".format(plant.name))
else:
yield plant
def get_profitable_plants(self, plants):
for plant in plants:
if (
self.step_number > 7
and plant.get_year_of_operation() + 7 > self.year_number
):
historic_bids = plant.historical_bids
pass
def operate_constructed_plants(self, minimum_operation_year=2018):
gencos = self.get_gencos()
logger.debug("gencos: {}".format(gencos))
for genco in gencos:
logger.debug("genco plants: {}".format(genco.plants))
for plant in genco.plants:
# logger.debug("plant: {}, year_number: {}, construction year+constructioon_period+predev: {}".format(plant, self.year_number, plant.construction_year + plant.construction_period + plant.pre_dev_period))
if plant.construction_year <= minimum_operation_year:
plant.is_operating = True
elif (plant.is_operating is False) and (
self.year_number
>= plant.construction_year
+ plant.construction_period
+ plant.pre_dev_period
):
plant.is_operating = True
def overwrite_scenario_file(self, scenario_file):
if scenario_file:
split_directory = scenario_file.split("/")
file_name = split_directory[-1]
spec = importlib.util.spec_from_file_location(file_name, scenario_file)
scenario_import = importlib.util.module_from_spec(spec)
spec.loader.exec_module(scenario_import)
scen_mod.overwrite_scenario_file(scenario_import)
def settle_gencos_financials(self):
gencos = self.get_gencos()
for genco in gencos:
genco.settle_accounts()
# genco.delete_old_bids()
def delete_old_bids(self):
gencos = self.get_gencos()
for genco in gencos:
genco.delete_old_bids()
def get_gencos(self):
gencos = [genco for genco in self.schedule.agents if isinstance(genco, GenCo)]
return gencos
def clear_all_bids(self):
gencos = self.get_gencos()
for genco in gencos:
genco.delete_old_bids()
@staticmethod
def get_capacity_of_plants(model, plant_type):
gencos = model.get_gencos()
plants = [
plant
for genco in gencos
for plant in genco.plants
if plant.plant_type == plant_type and plant.is_operating
]
total_capacity = sum(plant.capacity_mw for plant in plants)
return total_capacity
@staticmethod
def get_all_plants(model):
gencos = model.get_gencos()
plants = [plant for genco in gencos for plant in genco.plants]
return plants
@staticmethod
def get_current_carbon_tax(model):
carbon_tax = elecsim.scenario.scenario_data.carbon_price_scenario[
model.years_from_start
]
return carbon_tax
@staticmethod
def get_genco_wealth(model):
gencos = model.get_gencos()
total_wealth = 0
for genco in gencos:
total_wealth += genco.money
return total_wealth
@staticmethod
def get_electricity_cost(model):
return model.average_electricity_price
@staticmethod
def get_carbon_emitted(model):
gencos = model.get_gencos()
bids = World.get_accepted_bids(gencos, FuelPlant)
carbon_emitted = sum(
bid.capacity_bid * bid.plant.fuel.co2_density
for bid in bids
if isinstance(bid.plant, FuelPlant)
)
return carbon_emitted
@staticmethod
def get_accepted_bid_capacity(model, plant_type):
gencos = model.get_gencos()
plants = [
plant
for genco in gencos
for plant in genco.plants
if plant.plant_type == plant_type and plant.is_operating
]
capacity_contributed = sum(
bid.capacity_bid for plant in plants for bid in plant.accepted_bids
)
return capacity_contributed
@staticmethod
def get_accepted_bid_capacity_per_segment_hour(model):
gencos = model.get_gencos()
plants = [
plant for genco in gencos for plant in genco.plants if plant.is_operating
]
# capacity_contributed = [ if bid.segment_hours==]
bids_dataframe = [
bid.to_dict() for plant in plants for bid in plant.accepted_bids
]
return bids_dataframe
@staticmethod
def get_accepted_bids(gencos, plant_type=None):
if plant_type:
bids = list(
accepted_bids
for genco in gencos
for plants in genco.plants
for accepted_bids in plants.accepted_bids
if isinstance(plants, plant_type)
)
else:
bids = list(
accepted_bids
for genco in gencos
for plants in genco.plants
for accepted_bids in plants.accepted_bids
)
return bids
def stratify_data(self, demand):
power_plants = elecsim.scenario.scenario_data.power_plants
frac_to_scale = demand / power_plants.Capacity.sum()
stratified_sample = power_plants.groupby(["Fuel"], as_index=False).apply(
lambda x: x.sample(frac=frac_to_scale, replace=True)
)
return stratified_sample
def set_log_level(self, log_level):
if log_level.lower() == "warning":
logging.basicConfig(level=logging.WARNING)
elif log_level.lower() == "info":
logging.basicConfig(level=logging.INFO)
elif log_level.lower() == "debug":
logging.basicConfig(level=logging.DEBUG)
else:
raise ValueError(
"log_level must be warning, info or debug and not {}".format(log_level)
)
def create_data_loggers(self, data_folder):
self.data_folder = data_folder
self.datacollector = DataCollector(
model_reporters={
"contributed_CCGT": lambda m: self.get_accepted_bid_capacity(m, "CCGT"),
"contributed_Coal": lambda m: self.get_accepted_bid_capacity(m, "Coal"),
"contributed_Onshore": lambda m: self.get_accepted_bid_capacity(
m, "Onshore"
),
"contributed_Offshore": lambda m: self.get_accepted_bid_capacity(
m, "Offshore"
),
"contributed_PV": lambda m: self.get_accepted_bid_capacity(m, "PV"),
"contributed_Nuclear": lambda m: self.get_accepted_bid_capacity(
m, "Nuclear"
),
"contributed_Recip_gas": lambda m: self.get_accepted_bid_capacity(
m, "Recip_gas"
),
"contributed_Biomass": lambda m: self.get_accepted_bid_capacity(
m, "Biomass"
),
# "hourly_accepted_bids": lambda m: self.get_accepted_bid_capacity_per_segment_hour(m),
"total_CCGT": lambda m: self.get_capacity_of_plants(m, "CCGT"),
"total_Coal": lambda m: self.get_capacity_of_plants(m, "Coal"),
"total_Onshore": lambda m: self.get_capacity_of_plants(m, "Onshore"),
"total_Offshore": lambda m: self.get_capacity_of_plants(m, "Offshore"),
"total_PV": lambda m: self.get_capacity_of_plants(m, "PV"),
"total_Nuclear": lambda m: self.get_capacity_of_plants(m, "Nuclear"),
"total_Recip_gas": lambda m: self.get_capacity_of_plants(
m, "Recip_gas"
),
"Carbon_tax": lambda m: self.get_current_carbon_tax(m),
"total_genco_wealth": lambda m: self.get_genco_wealth(m),
"Electricity_cost": lambda m: self.get_electricity_cost(m),
"Carbon_emitted": lambda m: self.get_carbon_emitted(m),
}
)
def override_total_demand(self, total_demand, number_of_agents=None):
self.total_demand = total_demand
if total_demand is not None:
elecsim.scenario.scenario_data.power_plants = self.stratify_data(
total_demand
)
demand_modifier = (
elecsim.scenario.scenario_data.power_plants.Capacity.sum()
/ elecsim.scenario.scenario_data.segment_demand_diff[-1]
) / 1.6
logger.info("demand_modifier: {}".format(demand_modifier))
logger.info(
"total available capacity: {}".format(
elecsim.scenario.scenario_data.power_plants.Capacity.sum()
)
)
elecsim.scenario.scenario_data.segment_demand_diff = [
demand_modifier * demand
for demand in elecsim.scenario.scenario_data.segment_demand_diff
]
if number_of_agents is not None:
total_plants = len(elecsim.scenario.scenario_data.power_plants)
fraction_to_replace = total_plants / number_of_agents
company_names = sample(
list(elecsim.scenario.scenario_data.power_plants.Company.unique()),
number_of_agents,
)
company_name_repeated = np.repeat(
company_names, int(fraction_to_replace)
)
company_name_repeated = np.append(
company_name_repeated,
np.array(
["company_{}".format(number_of_agents - 1) for i in range(100)]
),
)
elecsim.scenario.scenario_data.power_plants.Company = (
company_name_repeated[:total_plants]
)
def override_highest_demand(self, highest_demand):
if highest_demand:
elecsim.scenario.scenario_data.initial_max_demand_size = highest_demand
def override_demand_change(self, demand_change):
if demand_change:
elecsim.scenario.scenario_data.yearly_demand_change = demand_change[1:]
self.demand_change_name = str(demand_change[0]).replace(".", "")
else:
self.demand_change_name = "none"
def override_carbon_scenario(self, carbon_price_scenario):
if carbon_price_scenario:
elecsim.scenario.scenario_data.carbon_price_scenario = (
carbon_price_scenario[1:]
)
self.carbon_scenario_name = str(carbon_price_scenario[0]).replace(".", "")
else:
self.carbon_scenario_name = "none"
def write_scenario_data(self):
if self.step_number == self.max_number_of_steps:
parent_directory = os.path.dirname(os.getcwd())
directory = "{}/{}/".format(parent_directory, self.data_folder)
if not os.path.exists(directory):
os.makedirs(directory)
filename = "demand_{}-carbon_{}-datetime_{}-capacity_{}-demand_distribution_{}.csv".format(
self.demand_change_name,
self.carbon_scenario_name,
dt.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
1,
self.distribution_name,
# self.scenario_file.split("/")[-1].split(".")[0],
)
directory_filename = "{}/{}.csv".format(directory, filename)
results_df = self.datacollector.get_model_vars_dataframe()
results_df.to_csv(directory_filename)
class TransferData:
def __init__(self, access_token):
self.access_token = access_token
def upload_file(self, file_from, file_to):
"""upload a file to Dropbox using API v2"""
dbx = dropbox.Dropbox(self.access_token)
with open(file_from, "rb") as f:
dbx.files_upload(f.read(), file_to)
if self.dropbox:
transferData = TransferData(access_token)
file_from = "/{}".format(directory_filename)
file_to = "/{}".format(filename)
# API v2
transferData.upload_file(file_from, file_to)
if self.step_number == self.max_number_of_steps:
end = perf_counter()
time_elapased = end - self.start
self.write_timing_results(end, time_elapased)
def write_timing_results(self, end, time_elapased):
if self.time_run:
timings_data = pd.DataFrame(
{
"time": [time_elapased],
"carbon": [elecsim.scenario.scenario_data.carbon_price_scenario[0]],
"installed_capacity": [
elecsim.scenario.scenario_data.power_plants.Capacity.sum()
],
"datetime": [dt.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")],
}
)
parent_directory = os.path.dirname(os.getcwd())
with open("{}/{}/timing.csv".format(ROOT_DIR, self.data_folder), "a") as f:
timings_data.to_csv(f, header=False)
logger.info("end: {}".format(end))
logger.info(
"time_elapsed: {}, carbon: {}, size: {}".format(
time_elapased,
elecsim.scenario.scenario_data.carbon_price_scenario[0],
elecsim.scenario.scenario_data.power_plants.Capacity.sum(),
)
)
|
{"hexsha": "32d3887a3f5f4924217131da1c3c8acbd1090779", "size": 31708, "ext": "py", "lang": "Python", "max_stars_repo_path": "elecsim/model/world.py", "max_stars_repo_name": "alexanderkell/elecsim", "max_stars_repo_head_hexsha": "35e400809759a8e9a9baa3776344e383b13d8c54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-01-18T21:41:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T15:49:40.000Z", "max_issues_repo_path": "elecsim/model/world.py", "max_issues_repo_name": "alexanderkell/elecsim", "max_issues_repo_head_hexsha": "35e400809759a8e9a9baa3776344e383b13d8c54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:37:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T01:00:07.000Z", "max_forks_repo_path": "elecsim/model/world.py", "max_forks_repo_name": "alexanderkell/elecsim", "max_forks_repo_head_hexsha": "35e400809759a8e9a9baa3776344e383b13d8c54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-03T16:45:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-04T07:45:16.000Z", "avg_line_length": 39.4378109453, "max_line_length": 219, "alphanum_fraction": 0.5898511417, "include": true, "reason": "import numpy", "num_tokens": 6448}
|
function [f,g]=idgtreal(coef,g,a,M,varargin)
%IDGTREAL Inverse discrete Gabor transform for real-valued signals
% Usage: f=idgtreal(c,g,a,M);
% f=idgtreal(c,g,a,M,Ls);
%
% Input parameters:
% c : Array of coefficients.
% g : Window function.
% a : Length of time shift.
% M : Number of channels.
% Ls : length of signal.
% Output parameters:
% f : Signal.
%
% `idgtreal(c,g,a,M)` computes the Gabor expansion of the input coefficients
% *c* with respect to the real-valued window *g*, time shift *a* and number of
% channels *M*. *c* is assumed to be the positive frequencies of the Gabor
% expansion of a real-valued signal.
%
% It must hold that `size(c,1)==floor(M/2)+1`. Note that since the
% correct number of channels cannot be deduced from the input, `idgtreal`
% takes an additional parameter as opposed to |idgt|.
%
% The window *g* may be a vector of numerical values, a text string or a
% cell array. See the help of |gabwin| for more details.
%
% `idgtreal(c,g,a,M,Ls)` does as above but cuts or extends *f* to length *Ls*.
%
% `[f,g]=idgtreal(...)` additionally outputs the window used in the
% transform. This is usefull if the window was generated from a description
% in a string or cell array.
%
% For perfect reconstruction, the window used must be a dual window of the
% one used to generate the coefficients.
%
% If *g* is a row vector, then the output will also be a row vector. If *c* is
% 3-dimensional, then `idgtreal` will return a matrix consisting of one column
% vector for each of the TF-planes in *c*.
%
% See the help on |idgt| for the precise definition of the inverse Gabor
% transform.
%
% `idgtreal` takes the following flags at the end of the line of input
% arguments:
%
% 'freqinv' Use a frequency-invariant phase. This is the default
% convention described in the help for |dgt|.
%
% 'timeinv' Use a time-invariant phase. This convention is typically
% used in filter bank algorithms.
%
% Examples:
% ---------
%
% The following example demostrates the basic pricinples for getting
% perfect reconstruction (short version):::
%
% f=greasy; % test signal
% a=32; % time shift
% M=64; % frequency shift
% gs={'blackman',128}; % synthesis window
% ga={'dual',gs}; % analysis window
%
% [c,Ls]=dgtreal(f,ga,a,M); % analysis
%
% % ... do interesting stuff to c at this point ...
%
% r=idgtreal(c,gs,a,M,Ls); % synthesis
%
% norm(f-r) % test
%
% The following example does the same as the previous one, with an
% explicit construction of the analysis and synthesis windows:::
%
% f=greasy; % test signal
% a=32; % time shift
% M=64; % frequency shift
% Ls=length(f); % signal length
%
% % Length of transform to do
% L=dgtlength(Ls,a,M);
%
% % Analysis and synthesis window
% gs=firwin('blackman',128);
% ga=gabdual(gs,a,M,L);
%
% c=dgtreal(f,ga,a,M); % analysis
%
% % ... do interesting stuff to c at this point ...
%
% r=idgtreal(c,gs,a,M,Ls); % synthesis
%
% norm(f-r) % test
%
% See also: idgt, gabwin, gabdual, dwilt
% AUTHOR : Peter L. Søndergaard.
% TESTING: TEST_DGT
% REFERENCE: OK
% Check input paramameters.
if nargin<4
error('%s: Too few input parameters.',upper(mfilename));
end;
if ~isnumeric(g) && prod(size(g))==1
error('g must be a vector (you probably forgot to supply the window function as input parameter.)');
end;
% Define initial value for flags and key/value pairs.
definput.keyvals.Ls=[];
definput.keyvals.lt=[0 1];
definput.flags.phase={'freqinv','timeinv'};
[flags,kv,Ls]=ltfatarghelper({'Ls'},definput,varargin);
N=size(coef,2);
W=size(coef,3);
% Make a dummy call to test the input parameters
Lsmallest=dgtlength(1,a,M,kv.lt);
M2=floor(M/2)+1;
if M2~=size(coef,1)
error('Mismatch between the specified number of channels and the size of the input coefficients.');
end;
L=N*a;
if rem(L,Lsmallest)>0
error('%s: Invalid size of coefficient array.',upper(mfilename));
end;
if kv.lt(2)>2
error('Only rectangular or quinqux lattices are supported.');
end;
if kv.lt(2)~=1 && flags.do_timeinv
error(['%s: Time-invariant phase for quinqux lattice is not ',...
'supported.'],upper(mfilename));
end
%% ----- step 3 : Determine the window
[g,info]=gabwin(g,a,M,L,kv.lt,'callfun',upper(mfilename));
if L<info.gl
error('%s: Window is too long.',upper(mfilename));
end;
if ~isreal(g)
error('%s: Window must be real-valued.',upper(mfilename));
end;
% Do the actual computation.
f=comp_idgtreal(coef,g,a,M,kv.lt,flags.do_timeinv);
% Cut or extend f to the correct length, if desired.
if ~isempty(kv.Ls)
f=postpad(f,kv.Ls);
else
kv.Ls=L;
end;
f=comp_sigreshape_post(f,Ls,0,[0; W]);
|
{"author": "ltfat", "repo": "ltfat", "sha": "4496a06ad8dddb85cd2e007216b765dc996ef327", "save_path": "github-repos/MATLAB/ltfat-ltfat", "path": "github-repos/MATLAB/ltfat-ltfat/ltfat-4496a06ad8dddb85cd2e007216b765dc996ef327/gabor/idgtreal.m"}
|
"""
This module handles data and provides convenient and efficient access to it.
"""
from __future__ import annotations
import os
import pickle
import sys
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from scipy import sparse
import util.tamer as tamer
from util import utils
from util.constants import *
from util.groups import Groups
from util.utils import GitUtil, SearchOptions, Settings
log = utils.get_logger(__name__)
settings = Settings.get_settings()
class DataHandler:
manuscripts: pd.DataFrame
"""Manuscripts
A dataframe containing all manuscripts with their respective metadata.
The dataframe will have the following structure:
Per row, there will be metadata to one manuscript. The row indices are integers 0..n.
The dataframe contains the following columns:
- 'shelfmark'
- 'shorttitle'
- 'country'
- 'settlement'
- 'repository'
- 'origin'
- 'date'
- 'Terminus post quem'
- 'Terminus ante quem'
- 'meandate'
- 'yearrange'
- 'support'
- 'folio'
- 'height'
- 'width'
- 'extent'
- 'description'
- 'creator'
- 'id'
- 'full_id'
- 'filename'
"""
person_names: Dict[str, str]
"""Name lookup dictionary
Lookup dictionary mapping person IDs to the full name of the person
"""
person_names_inverse: Dict[str, List[str]]
"""Inverted name lookup dictionary
Dictionary mapping person names to a list of IDs of persons with said name"""
text_matrix: pd.DataFrame
"""Text-Manuscript-Matrix
Sparse matrix with a row per manuscript and a column per text name.
True, if the manuscript contains the text.
Allows for lookups, which manuscripts a particular text is connected to.
""" # TODO: Document the type of ID used for MSs in index/row label
person_matrix: pd.DataFrame
"""Person-Manuscript-Matrix
Sparse matrix with a row per manuscript and a column per person ID.
True, if the manuscript is connected to the person (i.e. the description has the person tagged).
Allows for lookups, which manuscripts a particular person is connected to.
"""
groups: Groups
# CHORE: document
def __init__(self) -> None:
"""DataHandler constructor.
Returns a new instance of a DataHandler.
Should not be called directly, but rather through the factory method `DataHandler.get_handler()`.
"""
log.info("Creating new handler")
self.person_names, self.person_names_inverse = DataHandler._load_persons()
log.info("Loaded Person Info")
self.manuscripts = DataHandler._load_ms_info(persons=self.person_names)
log.info("Loaded MS Info")
self.text_matrix = DataHandler._load_text_matrix(self.manuscripts)
log.info("Loaded Text Info")
self.person_matrix = DataHandler._load_person_matrix(self.manuscripts)
log.info("Loaded Person-MSS-Matrix Info")
self.groups = Groups.from_cache() or Groups()
log.debug(f"Groups loaded: {self.groups}")
self.manuscripts.drop(columns=["content", "soup"], inplace=True)
log.info("Successfully created a Datahandler instance.")
GitUtil.update_handler_state()
# Static Methods
# ==============
@staticmethod
def _from_pickle() -> Optional[DataHandler]:
"""Load datahandler from pickle, if available. Returns None otherwise."""
if os.path.exists(HANDLER_PATH_PICKLE):
try:
prev = sys.getrecursionlimit()
with open(HANDLER_PATH_PICKLE, mode='rb') as file:
sys.setrecursionlimit(prev * 100)
obj = pickle.load(file)
sys.setrecursionlimit(prev)
if isinstance(obj, DataHandler):
obj.groups = Groups.from_cache() or Groups()
log.debug(f"Groups loaded: {obj.groups}")
return obj
except Exception:
log.exception("Cound not load handler from pickle")
return None
@staticmethod
def _load_ms_info(persons: Dict[str, str]) -> pd.DataFrame:
"""Load manuscript metadata"""
df = tamer.deliver_handler_data()
df['soup'] = df['content'].apply(lambda x: BeautifulSoup(x, 'xml', from_encoding='utf-8'))
msinfo = df['soup'].apply(lambda x: tamer.get_msinfo(x, persons))
log.info("Loaded MS Info")
df = df.join(msinfo)
return df
@staticmethod
def _load_text_matrix(df: pd.DataFrame) -> pd.DataFrame:
"""Load the text-manuscript-matrix"""
mss_ids, text_names, coords = tamer.get_text_mss_matrix_coordinatres(df)
r, c = map(list, zip(*coords))
row = np.array(r)
col = np.array(c)
data = np.array([True]*len(row))
matrix = sparse.coo_matrix((data, (row, col)))
df = pd.DataFrame.sparse.from_spmatrix(matrix, index=mss_ids, columns=text_names)
return df
@staticmethod
def _load_person_matrix(df: pd.DataFrame) -> pd.DataFrame:
"""Load the person-manuscript-matrix"""
mss_ids, pers_ids, coords = tamer.get_person_mss_matrix_coordinatres(df)
r, c = map(list, zip(*coords))
row = np.array(r)
col = np.array(c)
data = np.array([True]*len(row))
matrix = sparse.coo_matrix((data, (row, col)))
df = pd.DataFrame.sparse.from_spmatrix(matrix, index=mss_ids, columns=pers_ids)
return df
@staticmethod
def _load_persons() -> Tuple[Dict[str, str], Dict[str, List[str]]]:
"""Load person data"""
person_names = tamer.get_person_names()
return person_names, tamer.get_person_names_inverse(person_names)
@staticmethod
def is_cached() -> bool:
"""Check if the data handler should be available from cache."""
return os.path.exists(HANDLER_PATH_PICKLE)
# Class Methods
# =============
@classmethod
def get_handler(cls) -> DataHandler:
"""Get a DataHandler
Factory method to get a DataHandler object.
Returns:
DataHandler: A DataHandler, either loaded from cache or created anew.
"""
log.info("Getting DataHandler")
res: Optional[DataHandler] = cls._from_pickle()
if res:
return res
log.info("Could not get DataHandler from pickle")
res = cls()
res._to_pickle()
log.info("DataHandler ready.")
return res
# Instance Methods
# ================
def _to_pickle(self) -> None:
"""Save the present DataHandler instance as pickle."""
log.info("Saving handler to pickle")
prev = sys.getrecursionlimit()
with open(HANDLER_PATH_PICKLE, mode='wb') as file:
try:
sys.setrecursionlimit(prev * 100)
pickle.dump(self, file)
sys.setrecursionlimit(prev)
except Exception:
log.exception("Failed to pickle the data handler.")
# API Methods
# -----------
def get_all_manuscript_data(self) -> pd.DataFrame:
"""Get the manuscripts dataframe.
Returns:
A dataframe containing all manuscripts with their respective metadata.
The dataframe will have the followin structure:
Per row, there will be metadata to one manuscript. The row indices are integers 0..n.
The dataframe contains the following columns:
- 'shelfmark'
- 'shorttitle'
- 'country'
- 'settlement'
- 'repository'
- 'origin'
- 'date'
- 'Terminus post quem'
- 'Terminus ante quem'
- 'meandate'
- 'yearrange'
- 'support'
- 'folio'
- 'height'
- 'width'
- 'extent'
- 'description'
- 'creator'
- 'id'
- 'full_id'
- 'filename'
"""
return self.manuscripts
def search_manuscript_data(self,
full_ids: Union[List[str], pd.Series, pd.DataFrame] = None,
ms_ids: Union[List[str], pd.Series, pd.DataFrame] = None,
shelfmarks: Union[List[str], pd.Series, pd.DataFrame] = None,
filenames: Union[List[str], pd.Series, pd.DataFrame] = None) -> Optional[pd.DataFrame]:
"""Search manuscript metadata for certain manuscripts.
Basic search function:
Searches for manuscripts with a certain IDs, and returns the metadata for the respective manuscripts.
IDs can either be full_id (i.e. a certain catalogue entry),
ms_ids (i.e. a certain manuscript that can have catalogue entries in multiple languages)
shelfmarks (which will possibly yield multiple results per shelfmark)
or filenames (refers to the XML files of the catalogue entry).
Note: Exactly one of the four optional parameters should be passed.
Args:
full_ids (Union[List[str], pd.Series, pd.DataFrame], optional): List/Series/Dataframe of catalogue entry IDs. Defaults to None.
ms_ids (Union[List[str], pd.Series, pd.DataFrame], optional): List/Series/Dataframe of manuscript IDs. Defaults to None.
shelfmarks (Union[List[str], pd.Series, pd.DataFrame], optional): List/Series/Dataframe of manuscript IDs. Defaults to None.
filenames (Union[List[str], pd.Series, pd.DataFrame], optional): List/Series/Dataframe of XML file names. Defaults to None.
Returns:
Optional[pd.DataFrame]: A dataframe containing the metadata for the requested manuscripts.
Returns None if no manuscript was found or if no parameters were passed.
"""
log.info(f'Searching for manuscripts: {full_ids}/{ms_ids}/{filenames}')
# full id
if full_ids is not None:
if isinstance(full_ids, list) and full_ids:
return self.manuscripts.loc[self.manuscripts['full_id'].isin(full_ids)]
elif isinstance(full_ids, pd.DataFrame):
if full_ids.empty:
return None
return self.manuscripts.loc[self.manuscripts['full_id'].isin(full_ids['full_id'])]
elif isinstance(full_ids, pd.Series):
if full_ids.empty:
return None
return self.manuscripts.loc[self.manuscripts['full_id'].isin(full_ids)]
# id
elif ms_ids is not None:
if isinstance(ms_ids, list) and ms_ids:
return self.manuscripts.loc[self.manuscripts['id'].isin(ms_ids)]
elif isinstance(ms_ids, pd.DataFrame):
if ms_ids.empty:
return None
return self.manuscripts.loc[self.manuscripts['id'].isin(ms_ids['id'])]
elif isinstance(ms_ids, pd.Series):
if ms_ids.empty:
return None
return self.manuscripts.loc[self.manuscripts['id'].isin(ms_ids)]
# filename
elif filenames is not None:
if isinstance(filenames, list) and filenames:
return self.manuscripts.loc[self.manuscripts['filename'].isin(filenames)]
elif isinstance(filenames, pd.DataFrame):
if filenames.empty:
return None
return self.manuscripts.loc[self.manuscripts['filename'].isin(filenames['filename'])]
elif isinstance(filenames, pd.Series):
if filenames.empty:
return None
return self.manuscripts.loc[self.manuscripts['filename'].isin(filenames)]
# shelfmark
elif shelfmarks is not None:
if isinstance(shelfmarks, list) and shelfmarks:
return self.manuscripts.loc[self.manuscripts['shelfmark'].isin(shelfmarks)]
elif isinstance(shelfmarks, pd.DataFrame):
if shelfmarks.empty:
return None
return self.manuscripts.loc[self.manuscripts['shelfmark'].isin(shelfmarks['shelfmark'])]
elif isinstance(shelfmarks, pd.Series):
if shelfmarks.empty:
return None
return self.manuscripts.loc[self.manuscripts['shelfmark'].isin(shelfmarks)]
# no argument passed
return None
def get_all_texts(self) -> pd.DataFrame:
"""return the text-manuscript-matrix"""
return self.text_matrix
def search_manuscripts_containing_texts(self, texts: List[str], searchOption: SearchOptions) -> List[str]:
"""Search manuscripts containing certain texts
Args:
texts (List[str]): A list of text names
searchOption (SearchOption): wether to do an AND or an OR search
Returns:
List[str]: A list of `full_id`s of manuscripts containing either one or all of the passed texts, depending on the chosen searchOption.
Returns an empty list, if none were found.
"""
log.info(f'Searching for manuscripts with texts: {texts} ({searchOption})')
if not texts:
log.debug('Searched texts are empty list')
return []
if searchOption == SearchOptions.CONTAINS_ONE:
hits = []
for t in texts:
df = self.text_matrix[self.text_matrix[t] == True]
mss = list(df.index)
hits += mss
res_ = list(set(hits))
_res = self.manuscripts[self.manuscripts['full_id'].isin(res_)]
res = list(set(_res['shelfmark'].tolist()))
if not res:
log.info('no manuscripts found')
return []
log.info(f'Search result: {res}')
return res
else:
hits = []
for t in texts:
df = self.text_matrix[self.text_matrix[t] == True]
s = set(df.index)
hits.append(s)
if not hits:
log.info('no manuscripts fond')
return []
intersection = set.intersection(*hits)
res_ = list(intersection)
_res = self.manuscripts[self.manuscripts['full_id'].isin(res_)]
res = list(set(_res['shelfmark'].tolist()))
log.info(f'Search result: {res}')
return res
def search_texts_contained_by_manuscripts(self, Inmss: List[str], searchOption: SearchOptions) -> List[str]:
"""Search the texts contained by certain manuscripts.
Search for all texts contained by a given number of manuscripts.
Depending on the search option, either the texts appearing in one of the named manuscripts,
or the texts appearing in all manuscripts will be returned.
Args:
mss (List[str]): a list of manuscript full_id strings
searchOption (SearchOptions): wether to do an AND or an OR search
Returns:
List[str]: A list of text names.
"""
log.info(f'Searching for texts contained by manuscripts: {Inmss} ({searchOption})')
if not Inmss:
log.debug('Searched for empty list of mss')
return []
mss_ = self.manuscripts[self.manuscripts['full_id'].isin(Inmss)]
mss = mss_['full_id'].tolist()
df = self.text_matrix.transpose()
if searchOption == SearchOptions.CONTAINS_ONE:
hits = []
for ms in mss:
d = df[df[ms] == True]
mss = list(d.index)
hits += mss
res = list(set(hits))
if not res:
log.info('no texts found')
return []
log.info(f'Search result: {res}')
return res
else:
sets = []
for ms in mss:
d = df[df[ms] == True]
s = set(d.index)
sets.append(s)
if not sets:
log.info('no texts found')
return []
intersection = set.intersection(*sets)
res = list(intersection)
log.info(f'Search result: {res}')
return res
def get_ms_urls_from_search_or_browse_urls(self, urls: List[str], sharedMode: bool = False) -> Tuple[List[str], pd.DataFrame]:
# CHORE: documentation
# TODO: should probably be moved to tamer, right?
msss: List[pd.DataFrame] = []
for url in urls:
if "/search/results/" in url:
pages = tamer.get_search_result_pages(url)
shelfmarks = tamer.get_shelfmarks_from_urls(pages)
log.info(f"Loaded Shelfmarks: {shelfmarks}")
mss = self.manuscripts[self.manuscripts['shelfmark'].isin(shelfmarks)]
else:
ids = tamer.efnisordResult(url)
mss = self.manuscripts[self.manuscripts['id'].isin(ids)]
msss.append(mss)
if sharedMode: # Looked it over, they don't return the same. I got confused between this branch and stable (stable squishes results in tamer).
res = self.manuscripts
for df in msss:
res = pd.merge(res, df, on='shelfmark', how='inner')
return list(res['shelfmark']), res
else:
all_hits: pd.DataFrame = pd.concat(msss)
unique_hits = all_hits.drop_duplicates().reset_index(drop=True)
return list(unique_hits['shelfmark']), unique_hits
def get_person_name(self, pers_id: str) -> str:
"""Get a person's name, identified by the person's ID"""
return self.person_names.get(pers_id) or ""
def get_person_ids(self, pers_name: str) -> List[str]:
"""Get IDs of all persons with a certain name"""
return self.person_names_inverse[pers_name]
def search_persons_related_to_manuscripts(self, ms_full_ids: List[str], searchOption: SearchOptions) -> List[str]:
log.info(f'Searching for persons related to manuscripts: {ms_full_ids} ({searchOption})')
if not ms_full_ids:
log.debug('Searched for empty list of mss')
return []
df = self.person_matrix.transpose()
if searchOption == SearchOptions.CONTAINS_ONE:
hits = []
for ms in ms_full_ids:
d = df[df[ms] == True]
mss = list(d.index)
hits += mss
res = list(set(hits))
if not res:
log.info('no person found')
return []
log.info(f'Search result: {res}')
return res
else:
sets = []
for ms in ms_full_ids:
d = df[df[ms] == True]
s = set(d.index)
sets.append(s)
if not sets:
log.info('no person fond')
return []
intersection = set.intersection(*sets)
res = list(intersection)
log.info(f'Search result: {res}')
return res
def search_manuscripts_related_to_persons(self, person_ids: List[str], searchOption: SearchOptions) -> List[str]:
# CHORE: Document
log.info(f'Searching for manuscript related to people: {person_ids} ({searchOption})')
if not person_ids:
log.debug('Searched for empty list of ppl')
return []
df = self.person_matrix
if searchOption == SearchOptions.CONTAINS_ONE:
hits = []
for pers in person_ids:
d = df[df[pers] == True]
mss = list(d.index)
hits += mss
res = list(set(hits))
if not res:
log.info('no ms found')
return []
log.info(f'Search result: {res}')
return res
else:
sets = []
for pers in person_ids:
d = df[df[pers] == True]
s = set(d.index)
sets.append(s)
if not sets:
log.info('no ms fond')
return []
intersection = set.intersection(*sets)
res = list(intersection)
log.info(f'Search result: {res}')
return res
|
{"hexsha": "e942c38ee0116a469e8d1a68b27657ee3b47f2bf", "size": 20506, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/datahandler.py", "max_stars_repo_name": "arbeitsgruppe-digitale-altnordistik/Sammlung-Toole", "max_stars_repo_head_hexsha": "502d6128e55622b760c245b03d973574f0adab4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util/datahandler.py", "max_issues_repo_name": "arbeitsgruppe-digitale-altnordistik/Sammlung-Toole", "max_issues_repo_head_hexsha": "502d6128e55622b760c245b03d973574f0adab4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2021-08-29T20:04:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T12:10:47.000Z", "max_forks_repo_path": "util/datahandler.py", "max_forks_repo_name": "arbeitsgruppe-digitale-altnordistik/Sammlung-Toole", "max_forks_repo_head_hexsha": "502d6128e55622b760c245b03d973574f0adab4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6905660377, "max_line_length": 151, "alphanum_fraction": 0.5822686043, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4387}
|
\documentclass{beamer}
%
% Choose how your presentation looks.
%
% For more themes, color themes and font themes, see:
% http://deic.uab.es/~iblanes/beamer_gallery/index_by_theme.html
%
\mode<presentation>
{
\usetheme{default} % or try Darmstadt, Madrid, Warsaw, ...
\usecolortheme{default} % or try albatross, beaver, crane, ...
\usefonttheme{default} % or try serif, structurebold, ...
\setbeamertemplate{navigation symbols}{}
\setbeamertemplate{caption}[numbered]
\setbeamertemplate{footline}[frame number]
\setbeamertemplate{itemize items}[circle]
\setbeamertemplate{theorems}[numbered]
\setbeamercolor*{structure}{bg=white,fg=blue}
\setbeamerfont{block title}{size=\normalsize}
}
% \newtheorem{proposition}[theorem]{Proposition}
% \theoremstyle{definition}
% \newtheorem{algorithm}[theorem]{Algorithm}
% \newtheorem{idea}[theorem]{Idea}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{aligned-overset}
\usepackage{alltt}
\usepackage{amsmath}
\usepackage{csquotes}
% \usepackage{multicol}
% \usepackage{stmaryrd}
\usepackage{tabularx}
% \renewcommand\tabularxcolumn[1]{m{#1}}
% \newcolumntype{R}{>{\raggedleft\arraybackslash}X}=
\newcommand\logeq{\mathrel{\vcentcolon\Leftrightarrow}}
\def\code#1{\texttt{\frenchspacing#1}}
\def\padding{\vspace{0.5cm}}
\def\spadding{\vspace{0.25cm}}
\def\b{\textcolor{blue}}
\def\r{\textcolor{red}}
\def\g#1{{\usebeamercolor[fg]{block title example}{#1}}}
% fix for \pause in align
\makeatletter
\let\save@measuring@true\measuring@true
\def\measuring@true{%
\save@measuring@true
\def\beamer@sortzero##1{\beamer@ifnextcharospec{\beamer@sortzeroread{##1}}{}}%
\def\beamer@sortzeroread##1<##2>{}%
\def\beamer@finalnospec{}%
}
\makeatother
\title[Theoretical Computer Science]{Theoretical Computer Science \\ Context-Free Languages}
\author{Jonas Hübotter}
\date{}
\begin{document}
\begin{frame}
\titlepage
\end{frame}
\begin{frame}{Outline}
\tableofcontents[subsubsectionstyle=hide]
\end{frame}
% \AtBeginSection[]
% {
% \begin{frame}[allowframebreaks]{Plan}
% \tableofcontents[currentsection, sectionstyle=show/hide, hideothersubsections]
% \end{frame}
% }
\section{Overview}
\begin{frame}{Overview}
\begin{block}{Representations of context-free languages}\pause
\begin{itemize}
\item Context-Free Grammar (CFG)\pause
\item Pushdown Automaton (PDA)
\end{itemize}
\end{block}
\end{frame}
\section{Context-Free Grammar (CFG)}
\subsection{Variables}
\begin{frame}{Variables}
\begin{definition}
Given a grammar $G = (V, \Sigma, P, S)$, a variable $X \in V$ is
\begin{itemize}
\item \b{generative} if $\exists X \to_G^* w \in \Sigma^*$\pause;
\item \b{reachable} if $\exists S \to_G^* X$\pause; and
\item \b{helpful} if it is generative and reachable.
\end{itemize}
\end{definition}
\end{frame}
\subsection{Inductive Definition}
\begin{frame}{Inductive Definition}
Given a context-free grammar $G = (V, \Sigma, P, S)$ with $V = \{A_1, \dots, A_k\}$,\pause\par
productions $A_i \to w_0 A_{i_1}w_1 \dots w_{n-1} A_{i_n} w_n$\pause\par
correspond to \begin{align*}
&u_1 \in L_G(A_{i_1}) \land \dots \land u_n \in L_G(A_{i_n}) \\
&\implies w_0 u_1 w_1 \dots w_{n-1} u_n w_n \in L_G(A_i).
\end{align*}\pause
Hence, $L(G) = L_G(S)$.\pause\par\spadding
Productions produce words \r{top-down},\par
inductive definition \textit{produces} words \r{bottom-up}.
\end{frame}
\subsection{Decomposition Lemma}
\begin{frame}{Decomposition Lemma}
\begin{lemma}[Decomposition Lemma]
Any derivation of length $n$ of $\beta$ from $\alpha_1 \alpha_2$ may \textit{split} $\beta$ into two separately derivable parts $\beta_1$ and $\beta_2$ at any position.\pause\ Formally:
\begin{align*}
\alpha_1 \alpha_2 \to_G^n \beta \iff& \exists \beta_1, \beta_2, n_1, n_2.\ \beta = \beta_1 \beta_2 \land n = n_1 + n_2\ \land \\
& \alpha_1 \to_G^{n_1} \beta_1 \land \alpha_2 \to_G^{n_2} \beta_2.
\end{align*}
\end{lemma}
\end{frame}
\subsection{Syntax Tree}
\begin{frame}{Syntax Tree}
\begin{definition}
A \b{syntax tree} of a derivation $\to_G$ given $G = (V, \Sigma, P, S)$ is a tree where\pause
\begin{enumerate}
\item every leaf is labeled with a symbol in $\Sigma \cup \{\epsilon\}$\pause;
\item every inner node is labeled with $A \in V$,\par
assuming its children are $X_1, \dots, X_n \in V \cup \Sigma \cup \{\epsilon\}$, $A \to X_1 \dots X_n \in P$\pause; and
\item a leaf labeled $\epsilon$ is an only child of its parent.
\end{enumerate}\pause\spadding
The \b{border} of a syntax tree is the labels of its leafs concatenated from left to right.
\end{definition}\pause
\begin{align*}
A \to_G^* w &\iff w \in L_G(A) \\
&\iff \exists\ \text{syntax tree with root } A \text{ and border } w.
\end{align*}
\end{frame}
\begin{frame}{Syntax Tree}
\begin{definition}
\begin{itemize}
\item A CFG $G$ is \b{ambiguous} if $\exists w \in L(G)$ that has two distinct syntax trees.
\item A CFL $L$ is \b{inherently ambiguous} if every CFG $G$ with $L(G) = L$ is ambiguous.
\end{itemize}
\end{definition}
\end{frame}
\subsection{Chomsky Normal Form}
\begin{frame}{Chomsky Normal Form}
\begin{definition}[Chomsky Normal Form]
All productions are of the form $A \to a$ or $A \to BC$ for $a \in Sigma$ and $A,B,C \in V$.
\end{definition}\pause
\begin{block}{Algorithm to convert a CFG to Chomsky Normal Form ($\mathcal{O}(|P|^2)$)}
\begin{enumerate}
\item replace every $a \in \Sigma$ occurring in a production with length $>1$ by a non-terminal\pause
\item replace $A \to B_1 \dots B_k$ (where $k>2$) with $A \to B_1 C_2, C_2 \to B_2, \dots, C_k \to B_k$\pause
\item remove $\epsilon$-productions (i.e. $A \to \epsilon$)\pause
\item remove chain productions (i.e. $A \to B$)
\end{enumerate}
\end{block}
\end{frame}
\subsection{Other Normal Forms}
\begin{frame}{Other Normal Forms}
\begin{definition}[Greibach Normal Form]
All productions are of the form $A \to a A_1 \dots A_n$ for $a \in Sigma$ and $A_1, \dots, A_n \in V$.
\end{definition}\pause
\begin{definition}[Backus-Naur Normal Form]
Allows the use of regular expressions in productions (in addition to symbols).
\end{definition}
\end{frame}
\subsection{Cocke-Younger-Kasami Algorithm (CYK)}
\begin{frame}{Cocke-Younger-Kasami Algorithm (CYK)}
Solves the word problem for CFGs.\pause
\begin{block}{Algorithm ($\mathcal{O}(|w|^3)$)}
Given $G = (V, \Sigma, P, S)$ in Chomsky normal form and $w = a_1 \dots a_n \in \Sigma^*$.\pause\par
Define $V_{ij} = \{A \in V \mid A \to_G^* a_i \dots a_j\}$ for $i \leq j$ as the set of all initial symbols that may be used to derive $a_i \dots a_j$.\pause\par
Then $w \in L_G(A) \iff A \in V_{1n}$.\pause\par\spadding
Recursive definition of $V_{ij}$:
\begin{itemize}
\item base: $V_{ii} = \{A \in V \mid (A \to a_i) \in P\}$\pause
\item step: \begin{align*}
V_{ij} = \{A \in V \mid \substack{\exists i \leq k < j, B \in V_{ik}, C \in V_{(k+1)j}.\ \\ (A \to BC) \in P}\}
\end{align*}
\end{itemize}
\end{block}
\end{frame}
\section{Pushdown Automaton (PDA)}
\begin{frame}{PDA}
\begin{definition}
A \b{pushdown automaton (PDA)} $M = (Q, \Sigma, \Gamma, q_0, Z_0, \delta, F)$\pause\ consists of
\begin{itemize}
\item a finite set of \b{states} $Q$\pause;
\item a (finite) \b{input alphabet} $\Sigma$\pause;
\item a (finite) \b{stack alphabet} $\Gamma$\pause;
\item an \b{initial state} $q_0 \in Q$\pause;
\item an \b{initial stack element} $Z_0 \in \Gamma$\pause;
\item a (partial) \b{transition function} $\delta: Q \times (\Sigma \cup \{\epsilon\}) \times \Gamma \to 2^{Q \times \Gamma^*}$\pause; and
\item a set of \b{terminal (accepting) states} $F \subseteq Q$.
\end{itemize}
\end{definition}\pause
Graphically, transitions are denoted as $a, Z/\alpha$ where $a \in \Sigma$ is the input, $Z \in \Gamma$ is the top stack element, and $\alpha \in \Gamma^*$ replaces $Z$ in the new stack.
\end{frame}
\begin{frame}{PDA}
\begin{definition}
The \b{configuration} of a PDA $M$ is a triple $(q, w, \alpha)$ where $q \in Q$ is its state, $w \in \Sigma^*$ is its remaining input, and $\alpha \in \Gamma^*$ is its stack.\pause\par\spadding
The \b{initial configuration} of $M$ on input $w \in \Sigma^*$ is $(q_0,w,Z_0)$.
\end{definition}\pause
\begin{definition}
The \b{transition relation} of a PDA $M$ is \begin{align*}
(q,aw,Z\alpha) \to_M (q',w,\beta\alpha) &\quad\text{if } (q',\beta) \in \delta(q,a,Z) \\
(q,w,Z\alpha) \to_M (q',w,\beta\alpha) &\quad\text{if } (q',\beta) \in \delta(q,\epsilon,Z).
\end{align*}
\end{definition}
\end{frame}
\begin{frame}{PDA}
\begin{definition}
PDA $M$ \b{accepts} $w \in \Sigma^*$ with final state if \begin{align*}
(q_0,w,Z_0) \to_M^* (f,\epsilon,\gamma) \quad\text{for } f \in F, \gamma in \Gamma^*.
\end{align*}
So, $L_F(M) = \{w \in \Sigma^* \mid \exists f \in F, \gamma \in \Gamma^*.\ (q_0,w,Z_0) \to_M^* (f,\epsilon,\gamma)\}$.
\end{definition}\pause
\begin{definition}
PDA $M$ \b{accepts} $w \in \Sigma^*$ with empty stack if \begin{align*}
(q_0,w,Z_0) \to_M^* (q,\epsilon,\epsilon) \quad\text{for } q \in Q.
\end{align*}
So, $L_{\epsilon}(M) = \{w \in \Sigma^* \mid \exists q \in Q.\ (q_0,w,Z_0) \to_M^* (q,\epsilon,\epsilon)\}$.
\end{definition}\pause\padding
\r{Both accepting conditions are equally powerful.}
\end{frame}
\subsection{Lemmas}
\begin{frame}{Lemmas}
\begin{lemma}[Extension Lemma]
Every derivation may occur as a sub-derivation of a larger derivation\pause:
\begin{align*}
(q,u,\alpha) \to_M^n (q',u',\alpha') \implies (q,uv,\alpha\beta) \to_M^n (q',u'v,\alpha'\beta).
\end{align*}
\end{lemma}\pause
\begin{lemma}[Decomposition Lemma]
Every derivation that empties the stack can be divided into sub-derivations that each remove a single symbol from the stack\pause:
Given $(q,w,Z_1 \dots Z_k) \to_M^n (q',\epsilon,\epsilon)$\pause,\par
then $\forall i \in [1,k].\ \exists u_i, p_i, n_i$ such that \begin{align*}
(p_{i-1},u_i,Z_i) \to_M^{n_i} (p_i,\epsilon,\epsilon)
\end{align*} with $w = u_1 \dots u_k$, $q = p_0$, $q_k = p_k$, and $n = \sum_{i=1}^k n_i$.
\end{lemma}
\end{frame}
\subsection{CFG $\to$ PDA}
\begin{frame}{CFG $\to$ PDA}
Given CFG $G = (V,\Sigma,P,S)$,\pause
\begin{enumerate}
\item bring all productions into the form \begin{align*}
A \to b B_1 \dots B_k \quad\text{for } b \in \Sigma \cup \{\epsilon\}
\end{align*}\pause
\item define the PDA $M = (\{q\},\Sigma,V,q,S,\delta)$\pause\ with \begin{align*}
\delta(q,b,A) = \{(q,\beta) \mid (A \to b \beta) \in P\}.
\end{align*}
\end{enumerate}\pause
Then, $L(G) = L_{\epsilon}(M)$.
\end{frame}
\subsection{PDA $\to$ CFG}
\begin{frame}{PDA $\to$ CFG}
Given PDA $G = (Q,\Sigma,\Gamma,q_0,Z_0,\delta,F)$,\pause\ define CFG $G = (V,\Sigma,P,S)$.\pause\par\spadding
We define $V = Q \times \Gamma \times Q \cup \{S\}$ where each $[q,Z,p] \in V$ describes all possibilities of going from state $q \in Q$ to state $p \in Q$ while $Z \in \Gamma$ is the top stack element.\pause\par\spadding
We define the productions $P$ as \begin{itemize}
\item $\forall q \in Q.\ S \to [q_0,Z_0,q]$\pause\ and
\item $\forall (r_0, Z_1 \dots Z_k) \in \delta(q,b,Z).\ \forall r_1, \dots, r_k \in Q.$\pause \begin{align*}
[q,Z,r_k] \to b [r_0,Z_1,r_1][r_1,Z_2,r_2]\dots[r_{k-1},Z_k,r_k].
\end{align*}
\end{itemize}\pause\padding
We observe that \begin{align*}
[q,Z,r_k] \to_G^* w \iff (q,w,Z) \to_M^* (r_k,\epsilon,\epsilon).
\end{align*}\pause
So, $L(G) = L_{\epsilon}(M)$.
\end{frame}
\subsection{Deterministic Pushdown Automaton (DPDA)}
\section{Closure Properties}
\begin{frame}{Closure Properties}
\begin{theorem}
Given the context-free languages $L, L_1, L_2$, then the following are also centext-free languages:\pause
\begin{itemize}
\item $L_1 L_2$\pause;
\item $L_1 \cup L_2$\pause; and
\item $L^*$.
\end{itemize}
\end{theorem}\pause
\begin{theorem}
Given the deterministic context-free language $L$, then $\bar{L}$ is deterministic context-free.
\end{theorem}
\end{frame}
\section{Pumping Lemma}
\begin{frame}{Pumping Lemma}
\begin{lemma}[Pumping Lemma for context-free languages]
Let $L \subseteq \Sigma^*$ be context-free.\pause\ Then there exists some $n > 0$ such that every $z \in L$ with $|z| \geq n$ can be decomposed into $z = uvwxy$\pause\ such that
\begin{itemize}
\item $vx \neq \epsilon$\pause;
\item $|vwx| \leq n$\pause; and
\item $\forall i \geq 0.\ uv^iwx^iy \in L$.
\end{itemize}
\end{lemma}\pause
\r{A necessary condition for context-free languages.}
\end{frame}
\begin{frame}{Pumping Lemma}
\begin{example}[proof structure]
Assume $L$ is context-free.\par
Let $n > 0$ be a Pumping Lemma number.\pause\par
Choose $z \in L$ with $|z| \geq n$.\par
Define $z = uvwxy$ with $vx \neq \epsilon$ and $|vwx| \leq n$.\pause\par
Then, $\forall i \geq 0.\ uv^iwx^iy \in L$.\pause\par
Now, use the last statement to find a contradiction separating all possible cases for $v$ and $x$.
\end{example}
\end{frame}
\end{document}
|
{"hexsha": "d763a68ffd2c92a9ac8cee3062ca182297f67050", "size": 14165, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "materials/context_free_languages.tex", "max_stars_repo_name": "jonhue/teaching-theo", "max_stars_repo_head_hexsha": "d7dd92d81f05db0a82b36f1532fa76e356dffc23", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-15T12:03:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-15T12:03:09.000Z", "max_issues_repo_path": "materials/context_free_languages.tex", "max_issues_repo_name": "jonhue/teaching-theo", "max_issues_repo_head_hexsha": "d7dd92d81f05db0a82b36f1532fa76e356dffc23", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "materials/context_free_languages.tex", "max_forks_repo_name": "jonhue/teaching-theo", "max_forks_repo_head_hexsha": "d7dd92d81f05db0a82b36f1532fa76e356dffc23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4714285714, "max_line_length": 225, "alphanum_fraction": 0.6208259795, "num_tokens": 4778}
|
import numpy as np
from random import random
def crop_square(image, coordinates, ratio=1, keep_area_threshold=0.5):
"""random crop a image into a square image and change the
original coordinates to new coordinates. Some coordinates will be last
if it is at outside of the cropped area.
Args:
image (ndarray): numpy image, should be [height, width, channel]
coordinates (tuple): a tuple of coordinates list, should be
([top, left, bottom, right], ...)
ratio (int, optional): defaults to 1. cropped ratio, relative to the
shorter edge of the image
keep_area_threshold (float, optional): defaults to 0.5. how much area
in the cropped size of a ground truth bounding box to decide whther
to keep it.
Returns:
tuple: (cropped_image, new_coordinates), noticed that new_coordinates
may be an empty list.
"""
size = image.shape[:2]
short_size = np.min(size)
square_size = int(short_size * ratio)
n_top = int((image.shape[0] - square_size) * random())
n_left = int((image.shape[1] - square_size) * random())
n_bottom = n_top + square_size
n_right = n_left + square_size
cropped_image = image[n_top:n_bottom, n_left:n_right]
new_coordinates = []
for coordinate in coordinates:
width = coordinate[3] - coordinate[1]
height = coordinate[2] - coordinate[0]
n_width = max(min(coordinate[3], n_right) - max(coordinate[1], n_left), 0)
n_height = max(min(coordinate[2], n_bottom) - max(coordinate[0], n_top), 0)
# there are some all zero coordinates in wider face
if (width * height) == 0:
continue
area_in_crop = (n_width * n_height) / (width * height)
if area_in_crop < keep_area_threshold:
continue
new_coordinates.append([
max(coordinate[0] - n_top, 0),
max(coordinate[1] - n_left, 0),
max(coordinate[2] - n_top, 0),
max(coordinate[3] - n_left, 0),
*coordinate[4:]
])
return cropped_image, new_coordinates
def random_horizontal_flip(image, coordinates):
"""randomly horizontal flip a image and its coodinates
Args:
image (ndarray): numpy image, should be [height, width, channel]
coordinates (tuple): a tuple of coordinates list, should be
([top, left, bottom, right], ...)
Returns:
tuple: (image, new_coordinates), noticed that new_coordinates
may be an empty list.
"""
if random() > 0.5:
return image, coordinates
image = image[:, ::-1, :]
new_coordinates = []
for coordinate in coordinates:
new_coordinates.append([
coordinate[0],
image.shape[1] - coordinate[1],
coordinate[2],
image.shape[1] - coordinate[3],
*coordinate[4:]
])
return image, new_coordinates
|
{"hexsha": "c31fedd79a1cb449f865121dcfe30583a5caba6a", "size": 2960, "ext": "py", "lang": "Python", "max_stars_repo_path": "imageaug.py", "max_stars_repo_name": "87ZGitHub/sfd.pytorch", "max_stars_repo_head_hexsha": "66108ab35d8b1c1601c326b151141d9115a1409e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 124, "max_stars_repo_stars_event_min_datetime": "2018-07-08T14:36:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-08T14:01:20.000Z", "max_issues_repo_path": "imageaug.py", "max_issues_repo_name": "87ZGitHub/sfd.pytorch", "max_issues_repo_head_hexsha": "66108ab35d8b1c1601c326b151141d9115a1409e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-07-09T07:17:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-11T12:26:13.000Z", "max_forks_repo_path": "imageaug.py", "max_forks_repo_name": "87ZGitHub/sfd.pytorch", "max_forks_repo_head_hexsha": "66108ab35d8b1c1601c326b151141d9115a1409e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2018-07-09T04:51:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-06T15:40:45.000Z", "avg_line_length": 35.6626506024, "max_line_length": 83, "alphanum_fraction": 0.6155405405, "include": true, "reason": "import numpy", "num_tokens": 711}
|
import argparse
import glob
import pickle
import random
import time
import sys
import torch.optim as optim
import torch.nn as nn
import torch
import numpy as np
from transformers import *
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from models import BERT_NN, BERT_NN_SEP
from loss_functions import SoftmaxLoss
def tensor_to_numpy(x):
''' Need to cast before calling numpy()
'''
# return (Variable(x).data).cpu().numpy()
return x.data.type(torch.DoubleTensor).cpu().numpy()
def seq_padding(input_ids, max_seq_length, pad_token_id):
#pad_token = 0
if len(input_ids) < max_seq_length:
padding_length = max_seq_length - len(input_ids)
else:
padding_length = 0
input_ids = input_ids[:max_seq_length]
input_mask = [1] * len(input_ids) + [0] * padding_length
input_ids = input_ids + [pad_token_id] * padding_length
# print(input_ids)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
return input_ids, input_mask
def prepare_input_tensor(input_ids_list, keyword_idx_list, sentence_max_length, pad_token_id):
input_lenList = [len(input_ids) for input_ids in input_ids_list]
# use given sentence_max_length
max_len = sentence_max_length
"""
# use batch longest length
max_len = max(input_lenList)
#max_len = min([sentence_max_length, max_len])
"""
padded_input_ids_list = []
mask_list = []
for input_ids in input_ids_list:
padded_input_ids, mask = seq_padding(input_ids, max_len, pad_token_id)
padded_input_ids_list.append(padded_input_ids)
mask_list.append(mask)
output_mask_list = []
key_len_list = []
for keyword_idx in keyword_idx_list:
output_mask_list.append([0.0] * (keyword_idx[0]) + [1.0] * (
keyword_idx[1]-keyword_idx[0]) + [0.0] * (max_len-keyword_idx[1]))
key_len_list.append(float(keyword_idx[1]-keyword_idx[0]))
return (torch.tensor(padded_input_ids_list),
torch.tensor(mask_list),
torch.tensor(output_mask_list),
torch.tensor(key_len_list))
class Experiment:
def __init__(self, args, tokenizer):
self.args = args
self.exp_log = open("exp_log_" + args.loss + ".txt", 'w', 1)
self.env = pickle.load(open("env.pkl", "rb"))
self.train_set = self.env['train']
self.dev_set = self.env['dev']
self.test_set = self.env['test']
if(self.args.toy is True):
print("Using toy mode...")
random.shuffle(self.train_set)
random.shuffle(self.dev_set)
random.shuffle(self.test_set)
self.train_set = self.train_set[:500]
self.dev_set = self.dev_set[:100]
self.test_set = self.test_set[:100]
classes_freq = [1 for i in range(0, self.args.class_num)]
for instance in self.train_set:
classes_freq[instance["class"]] += 1
classes_freq_sum = sum(classes_freq)
#classes_weight = [math.log(float(classes_freq_sum)/float(freq)) for freq in classes_freq]
print(self.args.scale_weight)
self.exp_log.write(str(self.args.scale_weight) + "\n")
if self.args.scale_weight is True:
classes_weight = [float(classes_freq_sum)/float(freq)
for freq in classes_freq]
else:
classes_weight = [1.0 for freq in classes_freq]
#classes_weight = [2.0, 1.0]
self.classes_weight = torch.from_numpy(
np.array(classes_weight, dtype='float32'))
print("classes_freq:", classes_freq)
self.exp_log.write("classes_freq: " + str(classes_freq) + "\n")
print("classes_weight:", classes_weight)
self.exp_log.write("classes_weight: " + str(classes_weight) + "\n")
# if self.args.experiment == "BERT":
if self.args.sep_encoder is True:
self.mdl = BERT_NN_SEP(args)
else:
# !!!!!!!!!!!!!! should not mix gloss/context and context/context pairs in training !!!!!!!!!!!!!!!
self.mdl = BERT_NN(args)
self.encoder1_flag = self.args.encoder1_flag
self.encoder2_flag = self.args.encoder2_flag
# loss functions
if self.args.loss == "CrossEntropy":
if "rep" in self.args.concat:
concat_rep = True
else:
concat_rep = False
if "diff" in self.args.concat:
concat_difference = True
else:
concat_difference = False
if "mul" in self.args.concat:
concat_multiplication = True
else:
concat_multiplication = False
self.criterion = SoftmaxLoss(
args=self.args,
classes_weight=self.classes_weight,
concatenation_sent_rep=concat_rep,
concatenation_sent_difference=concat_difference,
concatenation_sent_multiplication=concat_multiplication)
if self.args.cuda is True:
# self.mdl = nn.DataParallel(self.mdl) # !!!!!
self.mdl.to(self.args.device)
self.classes_weight = self.classes_weight.to(self.args.device)
self.criterion.to(self.args.device)
def select_optimizer(self):
if self.args.optimizer == "Adam":
parameters = filter(lambda p: p.requires_grad,
self.mdl.parameters())
parameters = list(parameters) + list(self.criterion.parameters())
self.optimizer = optim.Adam(parameters, lr=self.args.learning_rate)
#self.optimizer = optim.AdamW(parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
elif self.args.optimizer == "AdamW":
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self.mdl.named_parameters() if not any(
nd in n for nd in no_decay)], 'weight_decay': self.args.weight_decay},
{'params': [p for n, p in self.mdl.named_parameters() if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': self.criterion.parameters()}
]
self.optimizer = AdamW(
optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
self.schedule = get_linear_schedule_with_warmup(
self.optimizer, num_warmup_steps=10000, num_training_steps=len(self.train_set)*self.args.epochs)
def make_batch(self, x, i, batch_size):
'''
:param x: input sentences
:param i: select the ith batch (-1 to take all)
:return: sentences, targets, actual_batch_size
'''
batch = x[int(i * batch_size):int((i + 1) * batch_size)]
if self.encoder1_flag == "gloss":
(input1_ids_tensor, input1_mask_tensor,
input1_output_mask_tensor, input1_key_len_tensor) = prepare_input_tensor(
[instance['input1_ids'] for instance in batch],
[instance["query1_idx"] for instance in batch],
self.args.gloss_max_length,
self.args.PAD_id)
else:
(input1_ids_tensor, input1_mask_tensor,
input1_output_mask_tensor, input1_key_len_tensor) = prepare_input_tensor(
[instance['input1_ids'] for instance in batch],
[instance["query1_idx"] for instance in batch],
self.args.sentence_max_length,
self.args.PAD_id)
(input2_ids_tensor, input2_mask_tensor,
input2_output_mask_tensor, input2_key_len_tensor) = prepare_input_tensor(
[instance['input2_ids'] for instance in batch],
[instance["query2_idx"] for instance in batch],
self.args.sentence_max_length,
self.args.PAD_id)
targets = torch.LongTensor(
np.array([instance['class'] for instance in batch], dtype=np.int32).tolist())
if self.args.cuda is True:
input1_ids_tensor = input1_ids_tensor.to(self.args.device)
input2_ids_tensor = input2_ids_tensor.to(self.args.device)
input1_mask_tensor = input1_mask_tensor.to(self.args.device)
input2_mask_tensor = input2_mask_tensor.to(self.args.device)
input1_output_mask_tensor = input1_output_mask_tensor.to(
self.args.device)
input2_output_mask_tensor = input2_output_mask_tensor.to(
self.args.device)
input1_key_len_tensor = input1_key_len_tensor.to(self.args.device)
input2_key_len_tensor = input2_key_len_tensor.to(self.args.device)
targets = targets.to(self.args.device)
actual_batch_size = input1_ids_tensor.size(0)
return {"targets": targets,
"input1_ids_tensor": input1_ids_tensor, "input2_ids_tensor": input2_ids_tensor,
"input1_mask_tensor": input1_mask_tensor, "input2_mask_tensor": input2_mask_tensor,
"input1_output_mask_tensor": input1_output_mask_tensor,
"input2_output_mask_tensor": input2_output_mask_tensor,
"input1_key_len_tensor": input1_key_len_tensor,
"input2_key_len_tensor": input2_key_len_tensor,
"actual_batch_size": actual_batch_size}
def train_batch(self, i, batch_size):
self.mdl.train()
self.criterion.train()
# self.mdl.zero_grad()
self.optimizer.zero_grad()
# if self.args.experiment == "BERT":
#loss = self.criterion(output, targets)
batch = self.make_batch(self.train_set, i, batch_size)
emb1 = self.mdl(batch["input1_ids_tensor"], batch["input1_mask_tensor"],
batch["input1_output_mask_tensor"], batch["input1_key_len_tensor"], self.encoder1_flag)
emb2 = self.mdl(batch["input2_ids_tensor"], batch["input2_mask_tensor"],
batch["input2_output_mask_tensor"], batch["input2_key_len_tensor"], self.encoder2_flag)
loss = self.criterion(emb1, emb2, batch["targets"])
loss.backward()
nn.utils.clip_grad_norm_(parameters=list(self.mdl.parameters(
))+list(self.criterion.parameters()), max_norm=self.args.max_grad_norm)
self.optimizer.step()
if self.args.optimizer == "AdamW":
self.schedule.step()
# return loss.data[0]
return loss.item()
def train(self):
"""
This is the main train function
"""
print(self.args)
self.exp_log.write(str(self.args) + "\n")
if len(self.train_set) % self.args.batch_size == 0:
num_batches = int(len(self.train_set) / self.args.batch_size)
else:
num_batches = int(len(self.train_set) / self.args.batch_size) + 1
print("len(self.train_set)", len(self.train_set))
self.exp_log.write("len(self.train_set): " +
str(len(self.train_set)) + "\n")
print("num_batches:", num_batches)
self.exp_log.write("num_batches: " + str(num_batches) + "\n")
self.select_optimizer()
final_results_strList = []
for epoch in range(1, self.args.epochs+1):
self.mdl.train()
self.criterion.train()
print("epoch: ", epoch)
self.exp_log.write("epoch: " + str(epoch) + "\n")
#t0 = time.clock()
t0 = time.perf_counter()
random.shuffle(self.train_set)
print(
"========================================================================")
self.exp_log.write("=====================================\n")
losses = []
for i in tqdm(range(num_batches), bar_format='{l_bar}{bar:20}{r_bar}{bar:-10b}'):
loss = self.train_batch(i, self.args.batch_size)
if(loss is None):
continue
losses.append(loss)
#t1 = time.clock()
t1 = time.perf_counter()
print("[Epoch {}] Train Loss={} T={}s".format(
epoch, np.mean(losses), t1-t0))
if len(self.dev_set) != 0:
print("Evaluate on dev set...")
self.exp_log.write("Evaluate on dev set...\n")
avg_P, avg_R, avg_F, results_str = self.test(epoch, "dev")
print(results_str)
self.exp_log.write(results_str + "\n")
if len(self.test_set) != 0:
print("Evaluate on test set...")
self.exp_log.write("Evaluate on test set...\n")
avg_P, avg_R, avg_F, results_str = self.test(epoch, "test")
print(results_str)
self.exp_log.write(results_str + "\n")
if "transfer_eval" in self.args.eval_dataset:
torch.save(
{
"args": self.args,
"mdl": self.mdl.state_dict(),
"criterion": self.criterion.state_dict()
},
"epoch" + str(epoch) + "_model_" + self.args.loss + ".pt")
def predict(self):
exp_log_output = open("eval_trained_model.txt", "w")
if len(self.dev_set) != 0:
print("Evaluate on dev set...")
exp_log_output.write("Evaluate on dev set...\n")
avg_P, avg_R, avg_F, results_str = self.test("saved_model", "dev")
print(results_str)
exp_log_output.write(results_str + "\n")
if len(self.test_set) != 0:
print("Evaluate on test set...")
exp_log_output.write("Evaluate on test set...\n")
avg_P, avg_R, avg_F, results_str = self.test("saved_model", "test")
print(results_str)
exp_log_output.write(results_str + "\n")
exp_log_output.close()
def test(self, epoch, data_flag):
if data_flag == "dev":
dataset = self.dev_set
output_file = open("valid_TruePred_" + str(epoch) +
"_" + self.args.loss + ".txt", "w")
elif data_flag == "test":
dataset = self.test_set
output_file = open("test_TruePred_" + str(epoch) +
"_" + self.args.loss + ".txt", "w")
all_probs, all_preds, acc, avg_P, avg_R, avg_F, results_str = self.evaluate(dataset)
if output_file is None:
return avg_P, avg_R, avg_F, results_str
for i, instance in enumerate(dataset):
output_file.write(str(instance["instance_id"])
+ "\t" + str(instance["keyword"])
+ "\t" + str(instance["class"])
+ "\t" + str(all_preds[i])
+ "\t" + str(all_probs[i])
+ "\t" + instance["input1_text"]
+ "\t" + instance["input2_text"] + "\n")
output_file.close()
return avg_P, avg_R, avg_F, results_str
def evaluate(self, x):
self.mdl.eval()
self.criterion.eval()
BS = self.args.batch_size // 2
if BS == 0:
BS = 1
if len(x) % BS == 0:
num_batches = int(len(x) / BS)
else:
num_batches = int(len(x) / BS) + 1
all_probs = []
all_preds = []
all_targets = []
for instance in x:
all_targets.append(instance["class"])
for i in range(num_batches):
batch = self.make_batch(x, i, BS)
emb1 = self.mdl(batch["input1_ids_tensor"], batch["input1_mask_tensor"],
batch["input1_output_mask_tensor"], batch["input1_key_len_tensor"],
self.encoder1_flag)
emb2 = self.mdl(batch["input2_ids_tensor"], batch["input2_mask_tensor"],
batch["input2_output_mask_tensor"], batch["input2_key_len_tensor"],
self.encoder2_flag)
"""
emb1 = self.mdl(input1_ids_tensor, query1_idx)
emb2 = self.mdl(input2_ids_tensor, query2_idx)
"""
output = self.criterion(emb1, emb2, targets=None)
if self.args.loss == "CrossEntropy":
output = nn.functional.softmax(output, dim=1)
# print(actual_batch_size)
all_probs += tensor_to_numpy(output).tolist()
if self.args.loss == "CrossEntropy":
for probs in all_probs:
all_preds.append(probs.index(max(probs)))
# print("len(all_targets):", len(all_targets), "len(all_preds):", len(all_preds))
confusion_matrix = {}
matches = 0
for i in range(len(all_targets)):
if all_targets[i] == all_preds[i]:
matches += 1
string = str(all_targets[i]) + " --> " + str(all_preds[i])
if string in confusion_matrix:
confusion_matrix[string] += 1
else:
confusion_matrix[string] = 1
acc = float(matches) / float(len(all_targets))
print("accuracy:", acc)
print("confusion_matrix[target --> pred]:", confusion_matrix)
labelList = [label for label in range(0, self.args.class_num)]
results = precision_recall_fscore_support(
all_targets, all_preds, average=None, labels=labelList)
#results = precision_recall_fscore_support(all_targets, all_preds, average=None)
results_str = []
results_str.append("accuracy: " + str(acc))
results_str.append("\t".join([str(l) for l in labelList]))
results_str.append("\t".join(["%0.4f" % p for p in results[0]]))
results_str.append("\t".join(["%0.4f" % r for r in results[1]]))
results_str.append("\t".join(["%0.4f" % f for f in results[2]]))
results_str = "\n".join(results_str)
avg = precision_recall_fscore_support(
all_targets, all_preds, average='macro')
avg_P, avg_R, avg_F = avg[0], avg[1], avg[2]
# print(results_str)
return all_probs, all_preds, acc, avg_P, avg_R, avg_F, results_str
def model_main(args, tokenizer):
exp = Experiment(args, tokenizer)
if args.exp_mode == "train":
print("Training...")
exp.train()
torch.save({
"args": exp.args,
"mdl": exp.mdl.state_dict(),
"criterion": exp.criterion.state_dict()},
"trained_model_" + args.loss + ".pt")
elif args.exp_mode == "twoStageTune":
print("Two stages tuning...")
checkpoint = torch.load("pretrained_model_" + args.loss + ".pt")
exp.mdl.load_state_dict(checkpoint["mdl"])
exp.criterion.load_state_dict(checkpoint["criterion"])
exp.train()
elif args.exp_mode == "eval":
print("Evaluating...")
checkpoint = torch.load("trained_model_" + args.loss + ".pt")
#exp.args = checkpoint["args"]
exp.mdl.load_state_dict(checkpoint["mdl"])
exp.criterion.load_state_dict(checkpoint["criterion"])
exp.predict()
|
{"hexsha": "c6e62745fad7f42592fb10ed1da87dda7e93af7c", "size": 19207, "ext": "py", "lang": "Python", "max_stars_repo_path": "BERT_model_span/train.py", "max_stars_repo_name": "tencent-ailab/EMNLP21_SemEq", "max_stars_repo_head_hexsha": "8a0a863e20193f5a7ae1ace0fa6624f3cc35aa3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-11-11T05:09:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T01:49:06.000Z", "max_issues_repo_path": "BERT_model_span/train.py", "max_issues_repo_name": "tencent-ailab/EMNLP21_SemEq", "max_issues_repo_head_hexsha": "8a0a863e20193f5a7ae1ace0fa6624f3cc35aa3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-01T02:30:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-01T02:30:50.000Z", "max_forks_repo_path": "BERT_model_span/train.py", "max_forks_repo_name": "tencent-ailab/EMNLP21_SemEq", "max_forks_repo_head_hexsha": "8a0a863e20193f5a7ae1ace0fa6624f3cc35aa3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-11T05:26:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T07:50:47.000Z", "avg_line_length": 39.9313929314, "max_line_length": 112, "alphanum_fraction": 0.5827562868, "include": true, "reason": "import numpy", "num_tokens": 4223}
|
include("header.jl")
struct M370; layer; end;
@testset "serialize" begin
M1 = RNN(2,3)
M2 = M1 |> cpucopy
@test typeof(M2.w.value) <: Array
@test M2.w.value == M1.w.value
if gpu() >= 0
M3 = M2 |> gpucopy
@test typeof(M3.w.value) <: KnetArray
@test M3.w.value == M2.w.value
# 370-1
m = M370(param(5,5,1,1))
mcpu = m |> cpucopy
@test Knet.save("foo.jld2","mcpu",mcpu) === nothing
# 370-2
@test conv4(mcpu.layer,randn(Float32,20,20,1,1)) isa Array
end
end
|
{"hexsha": "af8cd84bbe9264055489d3ba9bfae1bb9b1ee069", "size": 564, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/serialize.jl", "max_stars_repo_name": "petershintech/Knet.jl", "max_stars_repo_head_hexsha": "9ed953d568f2ce94265bcc9663a671ac8364d8b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-26T00:46:11.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-26T00:46:11.000Z", "max_issues_repo_path": "test/serialize.jl", "max_issues_repo_name": "petershintech/Knet.jl", "max_issues_repo_head_hexsha": "9ed953d568f2ce94265bcc9663a671ac8364d8b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/serialize.jl", "max_forks_repo_name": "petershintech/Knet.jl", "max_forks_repo_head_hexsha": "9ed953d568f2ce94265bcc9663a671ac8364d8b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6923076923, "max_line_length": 66, "alphanum_fraction": 0.5265957447, "num_tokens": 204}
|
[STATEMENT]
lemma fps_inverse_mult: "inverse (f * g :: 'a::field fps) = inverse f * inverse g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inverse (f * g) = inverse f * inverse g
[PROOF STEP]
by (simp add: fps_inverse_mult_divring)
|
{"llama_tokens": 97, "file": null, "length": 1}
|
--{-# LANGUAGE BangPatterns #-}
module NeuralNetworks where
import Util
import Data.List
import System.Random
import Numeric.LinearAlgebra
import Numeric.LinearAlgebra.Util
import Numeric.GSL.Minimization
import Control.Parallel (par,pseq)
import Debug.Trace
import System.IO
import System.Directory
readThetaList :: [Int] -> IO [Matrix Double]
readThetaList layers = do
let readThetaListInternal fname = do
contents <- readFile fname
let thetas = contents `seq` read contents :: [Matrix Double]
return thetas
let generateThetaList = do
rgen <- getStdGen
return $ randomInitializeAllWeights rgen layers
let fileName = show layers
fexist <- doesFileExist fileName
if fexist
then
readThetaListInternal fileName
else
generateThetaList
writeThetaList :: [Int] -> [Matrix Double] -> IO ()
writeThetaList layers thetas = do
let filename = show layers
let contents = show thetas
writeFile filename contents
{- RANDINITIALIZEWEIGHTS Randomly initialize the weights of a layer with L_in
incoming connections and L_out outgoing connections
W = RANDINITIALIZEWEIGHTS(L_in, L_out) randomly initializes the weights
of a layer with L_in incoming connections and L_out outgoing
connections.
Note that W should be set to a matrix of size(L_out, 1 + L_in) as
the first row of W handles the "bias" terms -}
randomInitializeWeights :: RandomGen g => g -> Int -> Int -> Matrix Double
randomInitializeWeights gen l_in l_out = randommatrix * 2 * epsilon_init - epsilon_init
where
randomlist = randoms gen :: [Double]
epsilon_init = 0.12
randommatrix = (l_out><(1+l_in)) randomlist
randomInitializeAllWeights :: RandomGen g => g -> [Int] -> [Matrix Double]
randomInitializeAllWeights gen (outputs:[]) = []
randomInitializeAllWeights gen (inputs:outputs:rest) = (randomInitializeWeights gen inputs outputs):(randomInitializeAllWeights gen (outputs:rest))
{- Implements the neural network cost function for a neural network which performs
classification.
[J, grad] = NNCOSTFUNCTON(nn_params, hidden_layer_size, num_labels, ...
X, y, lambda) computes the cost and gradient of the neural network. The
parameters for the neural network are "unrolled" into the vector
nn_params and need to be converted back into the weight matrices.
The returned parameter grad should be a "unrolled" vector of the
partial derivatives of the neural network.
NOTE: cost does not work on NNs with no hidden layers. TODO fix this if
I ever need it.
-}
costFunction :: Vector Double -> [Int] -> Matrix Double -> Matrix Double -> Double -> (Double, Vector Double)
costFunction thetaVector layers x targetYsMatrix lambda = (j_reg, gradientVector)
where
m = fromIntegral $ rows targetYsMatrix
thetaList = reshapeVector layers thetaVector
(inputMatrices, activationMatrices, output_layer) = forwardPropagate x thetaList
--j = trace (show $ -((1/m) * sumElements (targetYsMatrix * (log output_layer) + (1 - targetYsMatrix) * (log (1 - output_layer))))) (-((1/m) * ( sumElements (targetYsMatrix * (log output_layer) + (1 - targetYsMatrix) * (log (1 - output_layer))))))
--j = trace (show $ ((targetYsMatrix * (log output_layer) + (1 - targetYsMatrix) * (log (1 - output_layer))))) (-((1/m) * ( sumElements (targetYsMatrix * (log output_layer) + (1 - targetYsMatrix) * (log (1 - output_layer))))))
j = -((1/m) * ( sumElements (targetYsMatrix * (log output_layer) + (1 - targetYsMatrix) * (log (1 - output_layer)))))
j_reg = j + (lambda/(2*m)) * ( sum ( map (\ theta -> sumElements ((dropColumns 1 theta) ^ 2)) thetaList ))
deltaList = backPropagate thetaList inputMatrices activationMatrices output_layer targetYsMatrix
gradientVector = createGradientVector thetaList deltaList
where
-- create list of gradient matrices and then flatten them into a single vector
createGradientVector :: [Matrix Double] -> [Matrix Double] -> Vector Double
createGradientVector tlist dlist = flattenConcat $ iterate tlist dlist
where
iterate [] [] = []
iterate (t:ts) (d:ds) = grad:(iterate ts ds)
where
-- liminate j = 0 column from theta for purposes of regularization
tmpTheta = padLeft (dropColumns 1 t) 0
-- regularized gradients
grad = (1/(scalar m)) * d + (scalar (lambda/m)) * tmpTheta
-- |Performs forward propagation on a neural network, returning the resulting output layer
forwardPropagate :: Matrix Double -> [Matrix Double] -> ([Matrix Double],[Matrix Double],Matrix Double)
--forwardPropagate inputMatrix _ | trace ("Forward propagate input matrix dimensions " ++ (show (rows inputMatrix)) ++ "x" ++ (show (cols inputMatrix)) ) False = undefined
forwardPropagate inputMatrix thetaList = (inputMatrices, activationMatrices, output_layer)
where
resultList' = doForwardProp inputMatrix thetaList
--add x to the start of the input list, for both input and activation... testing
resultList = (inputMatrix,inputMatrix):resultList'
(inputMatrices, activationMatrices) = unzip resultList
output_layer = last activationMatrices
doForwardProp input [] = []
doForwardProp input (t:thetas) = (initial_value,activation):(doForwardProp activation thetas)
where
input_with_bias = addBias input
initial_value = input_with_bias <> (ctrans t)
activation = sigmoid initial_value
-- |Performs backpropagation on a neural network, returning the resulting Delta matrices
backPropagate :: [Matrix Double] -> [Matrix Double] -> [Matrix Double] -> Matrix Double -> Matrix Double -> [Matrix Double]
backPropagate thetaList inputMatrices activationMatrices output_layer targetYs = output_deltas `par` doBackProp 0 deltaList
where
deltaList = makeZeroDeltaList thetaList
m = rows targetYs
-- reverse the lists we'll be iterating through, since we're going backwards from the output layer
revInputs = reverse inputMatrices
revActivations = reverse activationMatrices
revThetaList = reverse thetaList
output_deltas = (head revActivations) - targetYs
--for each example in training set:
doBackProp index ds | index >= m = ds
| otherwise = doBackProp (index +1) updatedDs
where
-- reverse the lists we'll be iterating through, since we're going backwards from the output layer
revDs = reverse ds
--do output layer separately:
output_delta = ctrans $ extractRows [index] output_deltas
--do the rest of the layers:
updatedDs = reverse $ backPropagateOneExample revThetaList output_delta (tail revInputs) (tail revActivations) revDs
backPropagateOneExample _ _ _ _ [] = []
backPropagateOneExample (thetaI:thetas) previous_delta (inputI:inputs) (activationI:activations) (deltaI:deltas) = current_delta `pseq` updatedDelta:(backPropagateOneExample thetas current_delta inputs activations deltas)
where
current_delta_with_bias = (ctrans thetaI) <> previous_delta * (sigmoidGradient (ctrans (addBias (extractRows [index] inputI))))
current_delta = updatedDelta `par` dropRows 1 current_delta_with_bias
-- capital Delta accumulation
updatedDelta = deltaI + previous_delta <> addBias (extractRows [index] activationI)
-- |Makes a list of 0-filled Delta matrices from a list of theta matrices
makeZeroDeltaList :: [Matrix Double] -> [Matrix Double]
makeZeroDeltaList [] = []
makeZeroDeltaList (m:ms) = (zeros (rows m) (cols m)):(makeZeroDeltaList ms)
-- |Reshapes a vector into a list of weight matrices for a network of the given number and size of layers
reshapeVector :: [Int] -> Vector Double -> [Matrix Double]
reshapeVector layers vector = iterate layers (toList vector)
where
iterate _ [] = []
iterate [] _ = []
iterate (x:[]) _ = []
iterate (l_in:l_out:xs) vlist = m:(iterate (l_out:xs) (drop msize vlist))
where
msize = l_out * (l_in + 1)
m = reshape (l_in + 1) (fromList (take msize vlist))
-- |The prediction function
predict :: [Matrix Double] -> Matrix Double -> [Double] -> Matrix Double
predict [] input targetMapping = selectTarget
where
maxIndex xs = snd $ maximum $ zip xs targetMapping
selectTarget = asColumn $ fromList $ map maxIndex $ toLists input
predict (theta:thetas) input targetMapping = predict thetas layer targetMapping
where
layer = sigmoid $ (addBias input) <> (ctrans theta)
{-
Minimization methods:
ConjugateFR
ConjugatePR
VectorBFGS
VectorBFGS2
SteepestDescent
-}
minimizeVDGeneric method x targetYsMatrix layers num_iters lambda precision initial_thetas = minimizeVD method precision num_iters first_step tol traceCostFunc gradientFunc (flattenConcat initial_thetas)
where
first_step = precision * 100
tol = 0.1 -- see http://www.gnu.org/software/gsl/manual/html_node/Multimin-Algorithms-with-Derivatives.html
costFunc thetaVect = fst $ costGradFunction thetaVect
gradientFunc thetaVect = snd $ costGradFunction thetaVect
costGradFunction t = (costFunction t layers x targetYsMatrix lambda)
traceGradientFunc thetaVect = traceShow ((sumElements grad)) grad
where grad = snd $ costGradFunction thetaVect
traceCostFunc thetaVect = traceShow cost cost
where cost = fst $ costGradFunction thetaVect
--memoizedCostGradFunction = unsafeMemoize costGradFunction
-- Generate learning curves
learningCurvesNeuralNetwork::
RandomGen g => MinimizeMethodD ->
Matrix Double ->
Matrix Double ->
Matrix Double ->
Matrix Double ->
[Int] ->
Double ->
Int ->
Double ->
g ->
Int ->
[(Double,Double,Int)]
learningCurvesNeuralNetwork method x y xval yval layers lambda iter precision gen step_size = learningCurves trainFunc costFunc x y xval yval step_size
where
initial_thetas = randomInitializeAllWeights gen layers
trainFunc tx ty = asColumn $ fst $ minimizeVDGeneric method tx ty layers iter lambda precision initial_thetas
costFunc tx ty tt = fst $ costFunction (flatten tt) layers tx ty 0
|
{"hexsha": "81b8e4850a093d9356831e035c402f227941e210", "size": 10969, "ext": "hs", "lang": "Haskell", "max_stars_repo_path": "src/NeuralNetworks.hs", "max_stars_repo_name": "thade/haskellml", "max_stars_repo_head_hexsha": "4d24f70323d8fbe1044732e3f4f99ac2c1cb6db8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-04-13T06:27:51.000Z", "max_stars_repo_stars_event_max_datetime": "2016-10-23T14:54:06.000Z", "max_issues_repo_path": "src/NeuralNetworks.hs", "max_issues_repo_name": "thade/haskellml", "max_issues_repo_head_hexsha": "4d24f70323d8fbe1044732e3f4f99ac2c1cb6db8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/NeuralNetworks.hs", "max_forks_repo_name": "thade/haskellml", "max_forks_repo_head_hexsha": "4d24f70323d8fbe1044732e3f4f99ac2c1cb6db8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.7355769231, "max_line_length": 254, "alphanum_fraction": 0.664235573, "num_tokens": 2520}
|
#include "baldr/graphreader.h"
#include "baldr/rapidjson_utils.h"
#include "filesystem.h"
#include <boost/program_options.hpp>
#include <boost/property_tree/ptree.hpp>
#include <string>
#include "config.h"
namespace bpo = boost::program_options;
namespace bpt = boost::property_tree;
int main(int argc, char** argv) {
std::string config, bbox;
std::string inline_config;
std::string config_file_path;
bpo::options_description options("valhalla_expand_bounding_box " VALHALLA_VERSION "\n"
"\n"
" Usage: valhalla_expand_bounding_box [options]\n"
"\n"
"Finds all the nodes in the bounding box and then expands "
"the bounding box by the shape of the edges that leave the nodes."
"\n"
"\n");
auto adder = options.add_options();
adder("help,h", "Print this help message.");
adder("version,v", "Print the version of this software.");
adder("config,c", bpo::value<std::string>(&config_file_path),
"Path to the json configuration file.");
adder("inline-config,i", bpo::value<std::string>(&inline_config), "Inline json config.");
adder(
"bounding-box,b", bpo::value<std::string>(&bbox),
"Bounding box to expand. The format is lower left lng/lat and upper right lng/lat or min_x,min_y,max_x,max_y");
bpo::positional_options_description pos_options;
pos_options.add("bounding-box", 1);
bpo::variables_map vm;
try {
bpo::store(bpo::command_line_parser(argc, argv).options(options).positional(pos_options).run(),
vm);
bpo::notify(vm);
} catch (std::exception& e) {
std::cerr << "Unable to parse command line options because: " << e.what() << "\n"
<< "This is a bug, please report it at " PACKAGE_BUGREPORT << "\n";
return EXIT_FAILURE;
}
if (vm.count("help")) {
std::cout << options << "\n";
return EXIT_SUCCESS;
}
if (vm.count("version")) {
std::cout << "valhalla_expand_bounding_box " << VALHALLA_VERSION << "\n";
return EXIT_SUCCESS;
}
if (!vm.count("bounding-box")) {
std::cout << "You must provide a bounding box to expand.\n";
return EXIT_FAILURE;
}
// Read the config file
boost::property_tree::ptree pt;
if (vm.count("inline-config")) {
std::stringstream ss;
ss << inline_config;
rapidjson::read_json(ss, pt);
} else if (vm.count("config") && filesystem::exists(config_file_path)) {
rapidjson::read_json(config_file_path, pt);
} else {
std::cerr << "Configuration is required\n\n" << options << "\n\n";
return EXIT_FAILURE;
}
// configure logging
boost::optional<boost::property_tree::ptree&> logging_subtree =
pt.get_child_optional("mjolnir.logging");
if (logging_subtree) {
auto logging_config =
valhalla::midgard::ToMap<const boost::property_tree::ptree&,
std::unordered_map<std::string, std::string>>(logging_subtree.get());
valhalla::midgard::logging::Configure(logging_config);
}
std::stringstream ss(bbox);
std::vector<float> result;
while (ss.good()) {
std::string substr;
getline(ss, substr, ',');
result.push_back(std::stof(substr));
}
if (result.size() != 4) {
std::cout << "You must provide a valid bounding box to expand.\n";
return EXIT_FAILURE;
}
valhalla::midgard::AABB2<valhalla::midgard::PointLL> bb{{result[0], result[1]},
{result[2], result[3]}};
valhalla::baldr::GraphReader reader(pt.get_child("mjolnir"));
bb = reader.GetMinimumBoundingBox(bb);
if (!bb.minpt().IsValid() || !bb.maxpt().IsValid())
return EXIT_FAILURE;
std::cout << std::fixed << std::setprecision(6) << bb.minx() << "," << bb.miny() << "," << bb.maxx()
<< "," << bb.maxy() << std::endl;
return EXIT_SUCCESS;
}
|
{"hexsha": "b1d4691e6fccf3167d2f122d15acfc5f54a472c9", "size": 4008, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/valhalla_expand_bounding_box.cc", "max_stars_repo_name": "CesarHerreraG/valhalla", "max_stars_repo_head_hexsha": "0f481c6e751f0b3f7320d6ac41f32949dd2c5152", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/valhalla_expand_bounding_box.cc", "max_issues_repo_name": "CesarHerreraG/valhalla", "max_issues_repo_head_hexsha": "0f481c6e751f0b3f7320d6ac41f32949dd2c5152", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/valhalla_expand_bounding_box.cc", "max_forks_repo_name": "CesarHerreraG/valhalla", "max_forks_repo_head_hexsha": "0f481c6e751f0b3f7320d6ac41f32949dd2c5152", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5517241379, "max_line_length": 117, "alphanum_fraction": 0.6052894212, "num_tokens": 1002}
|
# coding: utf-8
# # Separating Flowers
# This notebook explores a classic Machine Learning Dataset: the Iris flower dataset
#
# ## Tutorial goals
# 1. Explore the dataset
# 2. Build a simple predictive modeling
# 3. Iterate and improve your score
#
# How to follow along:
#
# git clone https://github.com/dataweekends/pyladies_intro_to_data_science
#
# cd pyladies_intro_to_data_science
#
# ipython notebook
# We start by importing the necessary libraries:
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
# ### 1) Explore the dataset
# #### Numerical exploration
#
# - Load the csv file into memory using Pandas
# - Describe each attribute
# - is it discrete?
# - is it continuous?
# - is it a number?
# - Identify the target
# - Check if any values are missing
#
# Load the csv file into memory using Pandas
# In[ ]:
df = pd.read_csv('iris-2-classes.csv')
# What's the content of ```df``` ?
# In[ ]:
df.head(3)
# Describe each attribute (is it discrete? is it continuous? is it a number? is it text?)
# In[ ]:
df.info()
# Are the features continuous or discrete?
# In[ ]:
df.describe()
# #### Identify the target
# What are we trying to predict?
# ah, yes... the type of Iris flower!
# In[ ]:
df['iris_type'].value_counts()
# Check if any values are missing
# In[ ]:
df.info()
# #### Mental notes so far:
#
# - Dataset contains 100 entries
# - 1 Target column (```iris_type```)
# - 4 Numerical Features
# - No missing values
# #### Visual exploration
# - plot the distribution of the Sepal Length feature
# - check the influence of Sepal Length on the target
# Plot the distribution of Sepal Length
# In[ ]:
df['sepal_length_cm'].plot(kind='hist', figsize=(10,6))
plt.title('Distribution of Sepal Length', size = '20')
plt.xlabel('Sepal Length (cm)', size = '20')
plt.ylabel('Number of flowers', size = '20')
# check the influence of Sepal Length
# In[ ]:
df[df['iris_type']=='virginica']['sepal_length_cm'].plot(kind='hist', bins = 10, range = (4,7),
figsize=(10,6), alpha = 0.3, color = 'b')
df[df['iris_type']=='versicolor']['sepal_length_cm'].plot(kind='hist', bins = 10, range = (4,7),
figsize=(10,6), alpha = 0.3, color = 'g')
plt.title('Distribution of Sepal Length', size = '20')
plt.xlabel('Sepal Length (cm)', size = '20')
plt.ylabel('Number of flowers', size = '20')
plt.legend(['Virginica', 'Versicolor'])
plt.show()
# Check the influence of two features of combined
# In[ ]:
plt.scatter(df[df['iris_type']== 'virginica']['petal_length_cm'].values,
df[df['iris_type']== 'virginica']['sepal_length_cm'].values, label = 'Virginica', c = 'b')
plt.scatter(df[df['iris_type']== 'versicolor']['petal_length_cm'].values,
df[df['iris_type']== 'versicolor']['sepal_length_cm'].values, label = 'Versicolor', c = 'r')
plt.legend(['virginica', 'versicolor'], loc = 2)
plt.title('Scatter plot', size = '20')
plt.xlabel('Petal Length (cm)', size = '20')
plt.ylabel('Sepal Length (cm)', size = '20')
plt.show()
# Ok, so, the flowers seem to have different characteristics
#
# Let's build a simple model to test that
# Define a new target column called "target" that is 1 if iris_kind = 'virginica' and 0 otherwise
# In[ ]:
df['target'] = df['iris_type'].map({'virginica': 1, 'versicolor': 0})
print df[['iris_type', 'target']].head(2)
print
print df[['iris_type', 'target']].tail(2)
# Define simplest model as benchmark
# The simplest model is a model that predicts 0 for everybody, i.e. all versicolor.
#
# How good is it?
# In[ ]:
actual_versicolor = len(df[df['target'] == 0])
total_flowers = len(df)
ratio_of_versicolor = actual_versicolor / float(total_flowers)
print "If I predict every flower is versicolor, I'm correct %0.1f %% of the time" % (100 * ratio_of_versicolor)
df['target'].value_counts()
# We need to do better than that
# Define features (X) and target (y) variables
# In[ ]:
X = df[['sepal_length_cm', 'sepal_width_cm',
'petal_length_cm', 'petal_width_cm']]
y = df['target']
# Initialize a decision Decision Tree model
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=0)
model
# Split the features and the target into a Train and a Test subsets.
#
# Ratio should be 70/30
# In[ ]:
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3, random_state=0)
# Train the model
# In[ ]:
model.fit(X_train, y_train)
# Calculate the model score
# In[ ]:
my_score = model.score(X_test, y_test)
print "Classification Score: %0.2f" % my_score
# Print the confusion matrix for the decision tree model
# In[ ]:
from sklearn.metrics import confusion_matrix
y_pred = model.predict(X_test)
print "\n=======confusion matrix=========="
print confusion_matrix(y_test, y_pred)
# ### 3) Iterate and improve
#
# Now you have a basic pipeline. How can you improve the score? Try:
# - changing the parameters of the model
# check the documentation here:
# http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
#
# - changing the model itself
# check examples here:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
#
# - try separating 3 classes of flowers using the ```iris.csv``` dataset provided
|
{"hexsha": "579e087a9f93daf1bc57dfcf53b9779b0d051431", "size": 5598, "ext": "py", "lang": "Python", "max_stars_repo_path": "Iris Flowers Workshop.py", "max_stars_repo_name": "Dataweekends/pyladies_intro_to_data_science", "max_stars_repo_head_hexsha": "6c3d503d15b361d7f71f26adc451c1bb886429f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-11-01T23:25:08.000Z", "max_stars_repo_stars_event_max_datetime": "2015-12-10T05:56:07.000Z", "max_issues_repo_path": "Iris Flowers Workshop.py", "max_issues_repo_name": "Dataweekends/pyladies_intro_to_data_science", "max_issues_repo_head_hexsha": "6c3d503d15b361d7f71f26adc451c1bb886429f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Iris Flowers Workshop.py", "max_forks_repo_name": "Dataweekends/pyladies_intro_to_data_science", "max_forks_repo_head_hexsha": "6c3d503d15b361d7f71f26adc451c1bb886429f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-10-31T18:34:37.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-02T03:58:29.000Z", "avg_line_length": 22.6639676113, "max_line_length": 111, "alphanum_fraction": 0.6666666667, "include": true, "reason": "import numpy", "num_tokens": 1494}
|
# import modules
import numpy as np
import wave
def readWave(filename):
wr = wave.open(filename, 'r')
params = wr.getparams() # wr = wave_read, Get Parameters
nchannels = params[0] # Number of Channels
sampwidth = params[1] # Quantization Bit Number (Byte Number)
rate = params[2] # Sampling Frequency
nframes = params[3] # Length of the Signal
frames = wr.readframes(nframes) # Data of the Signal
wr.close()
# by Bit Number
if sampwidth == 1:
data = np.frombuffer(frames, dtype=np.uint8)
data = (data - 128) / 128
elif sampwidth == 2: # 16 bit Quantization
data = np.frombuffer(frames, dtype=np.int16) / 32768
elif sampwidth == 3:
a8 = np.frombuffer(frames, dtype=np.uint8)
tmp = np.zeros((nframes * nchannels, 4), dtype=np.uint8)
tmp[:, 1:] = a8.reshape(-1, 3)
data = tmp.view(np.int32)[:, 0] / 2147483648
elif sampwidth == 4:
data = np.frombuffer(frames, dtype=np.int32) / 2147483648
data = np.reshape(data, (-1, nchannels)).T # ndarrayの形を整える
if nchannels==1: # For Monoral
data = np.reshape(data, (nframes))
return data
def readGetWave(filename):
wr = wave.open(filename, 'r')
params = wr.getparams()
nchannels = params[0] # Number of Channels
sampwidth = params[1] # Quantization Bit Number
rate = params[2] # Sampling Frequency
nframes = params[3] # Length of the Signal
frames = wr.readframes(nframes) # Data of the Signal
wr.close()
# by Bit Number
if sampwidth == 1:
data = np.frombuffer(frames, dtype=np.uint8)
data = (data - 128) / 128
elif sampwidth == 2:
data = np.frombuffer(frames, dtype=np.int16) / 32768
elif sampwidth == 3:
a8 = np.frombuffer(frames, dtype=np.uint8)
tmp = np.zeros((nframes * nchannels, 4), dtype=np.uint8)
tmp[:, 1:] = a8.reshape(-1, 3)
data = tmp.view(np.int32)[:, 0] / 2147483648
elif sampwidth == 4:
data = np.frombuffer(frames, dtype=np.int32) / 2147483648
data = np.reshape(data, (-1, nchannels)).T
if nchannels==1:
data = np.reshape(data, (nframes))
return params, data
def writeWave(file_name, data, params=(1, 3, 48000)):
"""
Write .wav File
Args:
-------------------
filename: fullpass to write the .wav at (string)
data: data to convert to .wav (numpy array (float64))
params: (number of channels, samp width, framerate)
"""
if data.ndim == 1: # Dimension Number of the Data
nchannels = 1
data = np.reshape(data, [data.shape[0], 1]) # Data を1行に
else:
nchannels = data.shape[0]
data = data.T
audio = wave.Wave_write(file_name) # filenameというファイルに書き出し
# パラメータ設定, parms=(1, 2, 44100)が標準になりそう
audio.setnchannels(params[0])
audio.setsampwidth(params[1])
audio.setframerate(params[2])
data = (data*(2**(8*params[1]-1)-1)).reshape(data.size, 1)
if params[1] == 1:
data = data + 128
frames = data.astype(np.uint8).tostring()
elif params[1] == 2:
frames = data.astype(np.int16).tostring()
elif params[1] == 3:
a32 = np.asarray(data, dtype = np.int32)
a8 = ( a32.reshape(a32.shape + (1,)) >> np.array([0, 8, 16]) ) & 255
frames = a8.astype(np.uint8).tostring()
elif params[1] == 4:
frames = data.astype(np.int32).tostring()
audio.writeframes(frames) # 出力データ設定
audio.close() # ファイルを閉じる
return
def getInfo(filename):
wr = wave.open(filename)
params = wr.getparams()
wr.close()
return params
|
{"hexsha": "503248e38b1bd2705221726c43eba2a8a9bdcc8d", "size": 3639, "ext": "py", "lang": "Python", "max_stars_repo_path": "iowave.py", "max_stars_repo_name": "animolopez/module", "max_stars_repo_head_hexsha": "588b8de7211bef29b85282a33c9313f90a505f71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "iowave.py", "max_issues_repo_name": "animolopez/module", "max_issues_repo_head_hexsha": "588b8de7211bef29b85282a33c9313f90a505f71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "iowave.py", "max_forks_repo_name": "animolopez/module", "max_forks_repo_head_hexsha": "588b8de7211bef29b85282a33c9313f90a505f71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4910714286, "max_line_length": 76, "alphanum_fraction": 0.5998900797, "include": true, "reason": "import numpy", "num_tokens": 1126}
|
# -*- coding: utf-8 -*-
"""
A collection of functions that use oTherm APIs to retrieve data from an oTherm instance. The typical application
is to first retrieve the *site* data. Then, using the *site* dataclass object, retrieve information about the:
- *weather_station*,
- *thermal_load*,
- *monitoring_system*, and
- *heat_pump_data*.
The tools also contain scripts for:
- Retrieving the specifications for any oTherm monitoring system by the name of the monitoring system, and
- Retrieving heat pump peformance data from a local SQLite database (*note*, the SQLite database is not part
of the oTherm database.
.. note::
The names and types of data elements used in the analyses differ from the oTherm data model specification.
The *dataclass* objects use for analysis are constructed from json objects returned from the oTherm database.
However, because the *dataclass* objects represent a single instance, the data elements are reorganized into
a simpler representation than the original json response.
Example
-------
The input typically consists of a site_name and start and end dates. The functions can be called from analyses
modules. For example ::
site_name = 'GES649'
start_date = '2015-01-01'
end_date = '2021-01-01'
#Get site information
site = get_site_info(site_name)
#Get equipment information and dataframe of heat pump operating data
equipment, hp_data = get_equipment_data(site.id, start_date, end_date, site.timezone)
#Get monitoring system information and measurement specifications
equip_monitoring_system = get_equipment_monitoring_system(equipment.id)
#Get weather data for station
wx_data = get_weather_data(site.weather_station.nws_id, site.timezone, start_date, end_date)
#Get thermal source specifications
source_specs = get_source_specs(site)
"""
import pandas as pd
import requests
import numpy as np
import configuration
from dataclasses import dataclass
from dacite import from_dict
from typing import Optional
import time
import pprint
def get_site_info(site_name, db):
"""
get site info docstring
:param str site_name: name of oTherm site
:return:
The **site** object consists is a nested dataclass object ::
@dataclass
class Site:
id: int
name: str
city: str
state: str
timezone: str
thermal_load: ThermalLoad
weather_station: WeatherStation
To access data elements, use the dot syntax. For example, the Weather Station ID, is accessed by
>>> site.weather_station
'KPSM'
"""
@dataclass
class Site:
id: int
name: str
city: str
state: str
timezone: str
description: str
application: str
thermal_load: str
weather_station_nws_id: str
if db == 'localhost':
site_url = "https://localhost:8000/api/site/?name=%s" % (site_name)
site_response = requests.get(site_url)
else:
site_url = "https://%s/api/site/?name=%s" % (configuration.db_info[db]['baseurl'], site_name)
site_response = requests.get(site_url, headers=configuration.db_info[db]['header'])
site_dict = site_response.json()[0]
try:
site = from_dict(data_class=Site, data=site_dict)
return site
except Exception as e:
print('Error with site data: \n ', e)
def get_thermal_load(site_name, db):
"""
@dataclass
class ThermalLoad:
uuid: str
name: str
description: Optional[str]
conditioned_area: float
heating_design_load: float
cooling_design_load: float
heating_design_oat: float
cooling_design_oat: float
To access data elements, use the dot syntax. For example, the Weather Station ID, is accessed by
"""
"""sphinx-ThermalLoad-begin"""
@dataclass
class ThermalLoad:
uuid: str
name: str
description: Optional[str]
conditioned_area: float
heating_design_load: float
cooling_design_load: float
heating_design_oat: float
cooling_design_oat: float
"""sphinx-ThermalLoad-end"""
#@dataclass
#class SiteLoad:
# id: int
# name: str
# city: str
# state: str
# thermal_load: ThermalLoad
if db == 'localhost':
thermal_load_url = "https://localhost:8000/api/thermal_load/?id=%s" % (site_name)
thermal_load_response = requests.get(thermal_load_url)
else:
thermal_load_url = "https://%s/api/thermal_load/?id=%s" % (configuration.db_info[db]['baseurl'], site_name)
thermal_load_response = requests.get(thermal_load_url, headers=configuration.db_info[db]['header'])
#print(thermal_load_url)
thermal_load_dict = thermal_load_response.json()[0]
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(thermal_load_dict['thermal_load'])
try:
thermal_load = from_dict(data_class=ThermalLoad, data=thermal_load_dict['thermal_load'])
return thermal_load
except Exception as e:
print('Error with thermal_load data: \n ', e)
def get_equipment(site_id, db):
"""
Uses 'request' method to read equipment table for a specific site
:param int site_id: The site_id in the PostgreSQL database. Can be obtained from *site.id*
:return: Equipment dataclass contains equipment information in the following fields::
@dataclass
class Equipment:
id: int
uuid: str
model: str
description: Optional[str]
no_flowmeter_flowrate: float
type: int
site: int
manufacturer: int
"""
@dataclass
class Equipment:
id: int
uuid: str
model: str
description: Optional[str]
no_flowmeter_flowrate: Optional[float]
type: int
site: int
manufacturer: int
if db == 'localhost':
equip_url = "https://localhost:8000/api/equipment/?site=%s&start_date=%s&end_date=%s" % (site_id)
equip_response = requests.get(equip_url)
else:
equip_url = "https://%s/api/equipment/?site=%s" % (configuration.db_info[db]['baseurl'], site_id)
equip_response = requests.get(equip_url, headers=configuration.db_info[db]['header'])
print(equip_url)
#Limitation: only gets the first piece of equipmemnt at a site.
equipment_dict = equip_response.json()[0]
#print(equipment_dict)
equipment = from_dict(data_class=Equipment, data=equipment_dict)
return equipment
def get_equipment_data(site_id, start_date, end_date, timezone, db):
"""
Uses 'request' method to reads heat pump operating data from otherm influx database and returns a pandas dataframe.
The data DataFrame returned includes all records for the equipment at a site. At present, the script is limited
to a single piece of equipment at a site.
:param int site_id: The site_id in the PostgreSQL database. Can be obtained from *site.id*
:param str start_date: start date (e.g. 2018-1-1)
:param str end_date: end date (e.g. 2018-12-31)
:param str timezone: (e.g. 'US/Eastern')
:return: Equipment dataclass contains equipment information in the following fields::
@dataclass
class Equipment:
id: int
uuid: str
model: str
description: Optional[str]
no_flowmeter_flowrate: float
type: int
site: int
manufacturer: int
*pandas.DataFrame* containing heat pump operating data over the specified time range. The DataFrame contains
all fields stored for the piece of equipment in the influxDB database.
.. note:: The index of the *DataFrame* is set to the ``time`` field and localized according the ``site.timezone`` attribute
"""
if db == 'localhost':
equip_url = "https://localhost:8000/api/equipment_data/?site=%s&start_date=%s&end_date=%s" % (site_id, start_date,
end_date)
equip_response = requests.get(equip_url)
else:
equip_url = "https://%s/api/equipment_data/?site=%s&start_date=%s&end_date=%s" % (configuration.db_info[db]['baseurl'],
site_id, start_date, end_date)
print(equip_url)
equip_response = requests.get(equip_url, headers=configuration.db_info[db]['header'])
print(equip_response)
print(site_id, start_date, end_date, db, configuration.db_info[db]['header'])
#Limitation: only gets the first piece of equipmemnt at a site.
try:
hp_data = pd.DataFrame.from_dict(equip_response.json()[0]['heat_pump_metrics'])
hp_data.set_index(pd.to_datetime(hp_data['time']), inplace=True)
hp_data['time_elapsed'] = hp_data.index.to_series().diff().dt.seconds.div(3600, fill_value=0)
hp_data.tz_convert(timezone)
hp_data['heat_flow_rate'] = 900*hp_data['sourcefluid_flowrate']*(hp_data['source_supplytemp'] -
hp_data['source_returntemp'])
except Exception as e:
print('Error with heat pump data: \n ', e)
return
# need to filter for when heat pump is on, otherwise NaN
return hp_data
def get_equipment_monitoring_system(equip_id):
"""
Retrieves the equipment monitoring system and specifications
:param str uuid: *uuid* of thermal equipment
:return:
Dataclass object with equipment monitoring system specifications ::
@dataclass
class MonitoringSysInfo:
id: int
name: Optional[str]
description: Optional[str]
specs: list
@dataclass
class EquipmentMonitor:
id: int
start_date: str
end_date: Optional[str]
equip_id: int
monitoring_system_spec: int
info: MonitoringSysInfo
To access data elements, use the dot syntax. For example, the *list* containing the monitoring system specifications
can be accessed by
>>> monitoring_system.info.specs
`[{'measurement_spec': {'name': 'HPP VA W 8% EP', 'description': 'Heat pump power, volt-amps, electrical panel', ...`
The monitoring system specifications is a list of measurements performed by the monitoring system, each measurement
has its own set of specifications. See oTherm documentation for more details.
The list can be search for individual measurements specifications with ``utilities.get_measurement_specs``
"""
@dataclass
class MonitoringSysInfo:
id: int
name: Optional[str]
description: Optional[str]
specs: list
@dataclass
class EquipmentMonitor:
id: int
start_date: str
end_date: Optional[str]
equip_id: int
monitoring_system_spec: Optional[int]
info: MonitoringSysInfo
if db == 'localhost':
equipment_monitoring_system_url = "http://localhost:8000/api/equipment_monitoring/?equip_id=%s" % (equip_id)
equip_mon_response = requests.get(equipment_monitoring_system_url)
else:
equipment_monitoring_system_url = "http://%s/api/equipment_monitoring/?equip_id=%s" %(configuration.db_info[db]['baseurl'], equip_id)
equip_mon_response = requests.get(equipment_monitoring_system_url, headers=configuration.db_info[db]['header'])
equipment_monitoring_system_dict = equip_mon_response.json()[0]
equipment_monitoring_system_dict['info'] = equipment_monitoring_system_dict.pop('monitoring_sys_info')
equipment_monitoring_system_dict['info']['specs'] = equipment_monitoring_system_dict['info'].pop('monitoring_system_specs')
try:
monitoring_system = from_dict(data_class=EquipmentMonitor, data=equipment_monitoring_system_dict)
except Exception as e:
print('Error with monitoring system data: \n ', e)
return monitoring_system
def get_weather_data(nws_id,timezone, start_date, end_date):
"""
Parameters
----------
nws_id : str
National Weather Station 4 character station identifier
timezone : str
Timezone of site, such as *'US/Eastern'*
start_date : str
Beginning date of request, such as *'2015-01-01'*
end_date : str
End date of request
Returns
-------
pandas.DataFrame
The returned DataFrame contains weather station data over the specified time range and contains all \
fields stored for the weather station.
.. note:: The index of the *DataFrame* is set to the ``time`` field and localized according the ``site.timezone`` attribute
"""
weather_url = "https://%s/api/weather_station/?nws_id=%s&start_date=%s&end_date=%s" % (configuration.db_info[db]['baseurl'],
nws_id, start_date, end_date)
wx_response = requests.get(weather_url, headers=configuration.db_info[db]['header'])
try:
wx_data = pd.DataFrame.from_dict(wx_response.json()[0]['weather_data'])
wx_data.set_index(pd.to_datetime(wx_data['time']), inplace=True)
wx_data['time_elapsed'] = wx_data.index.to_series().diff().dt.seconds.div(3600, fill_value=0)
wx_data.tz_convert(timezone)
except Exception as e:
print('Error with weather data: \n ', e)
pass
return None
return wx_data
def get_source_specs(site):
"""
Retrieves the source specifications.
:param str site: site name
:return:
Dataclass object with source specifications ::
@dataclass
class SourceSpec:
site: str
site_id: int
source_name: str
source_type: str
description: str
freeze_protection: Optional[float]
grout_type: Optional[str]
formation_conductivity: Optional[float]
formation_type: Optional[str]
grout_conductivity: Optional[float]
antifreeze: Optional[str]
pipe_dimension_ratio: Optional[str]
n_pipes_in_circuit: Optional[int]
n_circuits: Optional[int]
total_pipe_length: Optional[float]
To access data elements, use the dot syntax.
.. note:: While the oTherm data model supports multiple types of sources, this db_reader tool only supports
the vertical loop spec at present.
"""
# currently limited to vertical loop source specs
@dataclass
class SourceSpec:
site: str
site_id: int
source_name: str
source_type: str
description: str
freeze_protection: Optional[float]
grout_type: Optional[str]
formation_conductivity: Optional[float]
formation_type: Optional[str]
grout_conductivity: Optional[float]
antifreeze: Optional[str]
pipe_dimension_ratio: Optional[str]
n_pipes_in_circuit: Optional[int]
n_circuits: Optional[int]
total_pipe_length: Optional[float]
source_spec_url = "http://%s/api/thermal_source/?site=%s" % (configuration.db_info[db]['baseurl'] , site.id)
source_spec_response = requests.get(source_spec_url, headers=configuration.db_info[db]['header'])
otherm_spec_dict = source_spec_response.json()[0]
source_spec_dict = {}
source_spec_dict.update({'site': site.name, 'site_id': site.id})
source_spec_dict.update({'source_name': otherm_spec_dict['name']})
source_spec_dict.update(otherm_spec_dict['source_info']['source_type'])
source_spec_dict.update(otherm_spec_dict['source_info']['source_spec_info'])
antifreeze = source_spec_dict.pop('antifreeze_info')
source_spec_dict.update({'antifreeze': antifreeze['name']})
ghex_specs = source_spec_dict.pop('ghex_specs')
source_spec_dict.update({'pipe_dimension_ratio': ghex_specs['dimension_ratio'],
'n_pipes_in_circuit': ghex_specs['n_pipes_in_circuit'],
'n_circuits': ghex_specs['n_circuits'],
'total_pipe_length': ghex_specs['total_pipe_length']
})
source_spec_dict.pop('id')
source_spec_dict['source_type'] = source_spec_dict.pop('name')
source_spec = from_dict(data_class=SourceSpec, data=source_spec_dict)
return source_spec, otherm_spec_dict
def get_mfr_data(parameters):
ge = psycopg2.connect("dbname='mgf_performance_data' user='gxi' host='45.55.41.135' password='geoSTTR!'")
#TODO: sql query will need to be more specific as all pd is in one table so need to SELECT * FROM ... WHERE ....
sql = """SELECT * FROM \"%s\"""" % parameters
ds_data = pd.read_sql(sql, ge)
ge.close()
return ds_data
def get_monitoring_system(name):
"""
Similar to :func:`get_equipment_monitoring_system` but returns monitoring_system attributes for a given monitoring
system by name rather than equipment being monitored. This function requires the exact name of the monitoring
system, as specified in the oTherm database
Parameters
----------
name : str
The name of the monitoring system
Returns
-------
dict
All specifications of a monitoring system in the oTherm database. Refer to oTherm documentation for detais.
For more explanation of the parameters and return values, see :func:`get_equipment_monitoring_system`
"""
mon_sys_url = "https://%s/api/monitoring_system/?name=%s" % (configuration.db_info[db]['baseurl'], name)
mon_sys_response = requests.get(mon_sys_url, headers=configuration.db_info[db]['header'])
mon_sys_json = mon_sys_response.json()[0]
mon_sys_response.close()
return mon_sys_json
if __name__ == '__main__':
site_name = '111693'
start_date = '2016-01-01'
end_date = '2022-04-20'
timezone = 'US/Eastern'
db = 'otherm_cgb'
#db = 'othermdev'
#db = 'localhost'
site = get_site_info(site_name, db)
equipment = get_equipment(site.id, db)
#print(equipment)
hp_data = get_equipment_data(site.id, start_date, end_date, site.timezone, db)
#thermal_load = get_thermal_load(site.id, db)
#equip_monitoring_system = get_equipment_monitoring_system(equipment.id)
#nws_id = site.weather_station_nws_id
#wx_data = get_weather_data(nws_id, timezone, start_date, end_date)
#wx_data = get_weather_data(site.weather_station_nws_id, site.timezone, start_date, end_date)
#monitoring_system_dict = get_monitoring_system(equip_monitoring_system.info.name)
#source_spec, otherm_source = get_source_specs(site)
# --- writing weather station data to csv files ---------------------------
# station_data = pd.read_csv('../temp_files/NWS_stations_2.csv', header=0)
# for nws_id in station_data['nws_id']:
# print(nws_id)
# wx_data = get_weather_data(nws_id, timezone, start_date, end_date)
# if wx_data is not None:
# print(len(wx_data))
# outputfile = nws_id +'_data.csv'
# wx_data.to_csv('../temp_files/weather_data/' + outputfile)
|
{"hexsha": "92a951ffc91ed587ef4f218d46366660abc60f75", "size": 19717, "ext": "py", "lang": "Python", "max_stars_repo_path": "db_tools/otherm_db_reader.py", "max_stars_repo_name": "otherm/gshp-analysis", "max_stars_repo_head_hexsha": "746070b10a05985c31f06acd5e052ac3a7bf4924", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-14T09:07:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T17:39:47.000Z", "max_issues_repo_path": "db_tools/otherm_db_reader.py", "max_issues_repo_name": "otherm/gshp-analysis", "max_issues_repo_head_hexsha": "746070b10a05985c31f06acd5e052ac3a7bf4924", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "db_tools/otherm_db_reader.py", "max_forks_repo_name": "otherm/gshp-analysis", "max_forks_repo_head_hexsha": "746070b10a05985c31f06acd5e052ac3a7bf4924", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.530647986, "max_line_length": 141, "alphanum_fraction": 0.6497438758, "include": true, "reason": "import numpy", "num_tokens": 4467}
|
import numpy as np
import scipy.sparse as sp
from fdfdpy.constants import ETA_0, EPSILON_0, DEFAULT_MATRIX_FORMAT
def sig_w(l, dw, m=4, lnR=-12):
# helper for S()
sig_max = -(m+1)*lnR/(2*ETA_0*dw)
return sig_max*(l/dw)**m
def S(l, dw, omega, L0):
# helper for create_sfactor()
return 1 - 1j*sig_w(l, dw)/(omega*EPSILON_0*L0)
def create_sfactor(wrange, L0, s, omega, Nw, Nw_pml):
# used to help construct the S matrices for the PML creation
sfactor_array = np.ones(Nw, dtype=np.complex128)
if Nw_pml < 1:
return sfactor_array
hw = np.diff(wrange)[0]/Nw
dw = Nw_pml*hw
for i in range(0, Nw):
if s is 'f':
if i <= Nw_pml:
sfactor_array[i] = S(hw * (Nw_pml - i + 0.5), dw, omega, L0)
elif i > Nw - Nw_pml:
sfactor_array[i] = S(hw * (i - (Nw - Nw_pml) - 0.5), dw, omega, L0)
if s is 'b':
if i <= Nw_pml:
sfactor_array[i] = S(hw * (Nw_pml - i + 1), dw, omega, L0)
elif i > Nw - Nw_pml:
sfactor_array[i] = S(hw * (i - (Nw - Nw_pml) - 1), dw, omega, L0)
return sfactor_array
def S_create(omega, L0, N, Npml, xrange,
yrange=None, matrix_format=DEFAULT_MATRIX_FORMAT):
# creates S matrices for the PML creation
M = np.prod(N)
if np.isscalar(Npml):
Npml = np.array([Npml])
if len(N) < 2:
N = np.append(N, 1)
Npml = np.append(Npml, 0)
Nx = N[0]
Nx_pml = Npml[0]
Ny = N[1]
Ny_pml = Npml[1]
# Create the sfactor in each direction and for 'f' and 'b'
s_vector_x_f = create_sfactor(xrange, L0, 'f', omega, Nx, Nx_pml)
s_vector_x_b = create_sfactor(xrange, L0, 'b', omega, Nx, Nx_pml)
s_vector_y_f = create_sfactor(yrange, L0, 'f', omega, Ny, Ny_pml)
s_vector_y_b = create_sfactor(yrange, L0, 'b', omega, Ny, Ny_pml)
# Fill the 2D space with layers of appropriate s-factors
Sx_f_2D = np.zeros(N, dtype=np.complex128)
Sx_b_2D = np.zeros(N, dtype=np.complex128)
Sy_f_2D = np.zeros(N, dtype=np.complex128)
Sy_b_2D = np.zeros(N, dtype=np.complex128)
for i in range(0, Ny):
Sx_f_2D[:, i] = 1/s_vector_x_f
Sx_b_2D[:, i] = 1/s_vector_x_b
for i in range(0, Nx):
Sy_f_2D[i, :] = 1/s_vector_y_f
Sy_b_2D[i, :] = 1/s_vector_y_b
# Reshape the 2D s-factors into a 1D s-array
Sx_f_vec = Sx_f_2D.reshape((-1,))
Sx_b_vec = Sx_b_2D.reshape((-1,))
Sy_f_vec = Sy_f_2D.reshape((-1,))
Sy_b_vec = Sy_b_2D.reshape((-1,))
# Construct the 1D total s-array into a diagonal matrix
Sx_f = sp.spdiags(Sx_f_vec, 0, M, M, format=matrix_format)
Sx_b = sp.spdiags(Sx_b_vec, 0, M, M, format=matrix_format)
Sy_f = sp.spdiags(Sy_f_vec, 0, M, M, format=matrix_format)
Sy_b = sp.spdiags(Sy_b_vec, 0, M, M, format=matrix_format)
return (Sx_f, Sx_b, Sy_f, Sy_b)
|
{"hexsha": "a8f3563287a6ccad894613f90c52609d7c433af3", "size": 2901, "ext": "py", "lang": "Python", "max_stars_repo_path": "fdfdpy/pml.py", "max_stars_repo_name": "fancompute/python-fdfd", "max_stars_repo_head_hexsha": "49d3682a9cface0e2ce32932f4dbfc36adff9fef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2018-06-14T21:03:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T19:31:02.000Z", "max_issues_repo_path": "fdfdpy/pml.py", "max_issues_repo_name": "ianwilliamson/python-fdfd", "max_issues_repo_head_hexsha": "49d3682a9cface0e2ce32932f4dbfc36adff9fef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2018-07-13T14:53:50.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-28T00:59:45.000Z", "max_forks_repo_path": "fdfdpy/pml.py", "max_forks_repo_name": "ianwilliamson/python-fdfd", "max_forks_repo_head_hexsha": "49d3682a9cface0e2ce32932f4dbfc36adff9fef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-06-14T21:03:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T22:55:32.000Z", "avg_line_length": 32.2333333333, "max_line_length": 83, "alphanum_fraction": 0.5977249224, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1067}
|
#=
# 420: Discontinuous Quantities
([source code](SOURCE_URL))
Test jumping species and quantity handling
=#
module Example420_DiscontinuousQuantities
using Printf
using VoronoiFVM
using SparseArrays
using ExtendableGrids
using GridVisualize
using LinearAlgebra
function main(;N=5, Plotter=nothing,unknown_storage=:sparse)
XX=collect(0:0.1:1)
xcoord=XX
for i=1:N-1
xcoord=glue(xcoord,XX.+i)
end
grid2=simplexgrid(xcoord)
for i=1:N
cellmask!(grid2,[i-1],[i],i)
end
for i=1:N-1
bfacemask!(grid2,[i],[i],i+2)
end
params=zeros(2,num_cellregions(grid2))
for i=1:num_cellregions(grid2)
params[1,i]=i
params[2,i]=10*i
end
system=VoronoiFVM.System(grid2,unknown_storage=unknown_storage)
## First, we introduce a continuous quantity which we name "cspec". Note that the "species number" can be assigned automatically if not given explicitely.
cspec=ContinuousQuantity(system,1:N;ispec=1,id=1)
## A discontinuous quantity can be introduced as well. by default, each reagion gets a new species number. This can be overwritten by the user.
dspec=DiscontinuousQuantity(system,1:N; regionspec=[2+i%2 for i=1:N],id=2)
# check 1D array acces with quantities
carrierList = [cspec dspec]
numberCarriers = length(carrierList)
params2=zeros(1, numberCarriers)
for icc ∈ carrierList
params2[icc] = 2
end
for i=1:numberCarriers
@assert params2[i] == 2
end
# check 2D array acces with quantities
for i=1:num_cellregions(grid2)
@assert params[cspec,i] == i
@assert params[dspec,i] == 10*i
end
for i=1:num_cellregions(grid2)
params[cspec,i] = -i
params[dspec,i] = -10*i
end
for i=1:num_cellregions(grid2)
@assert params[1,i] == -i
@assert params[2,i] == -10*i
end
##For both quantities, we define simple diffusion fluxes:
function flux(f,u,edge)
f[dspec]=u[dspec,1]-u[dspec,2]
f[cspec]=u[cspec,1]-u[cspec,2]
end
d1=1
q1=0.2
function breaction(f,u,bnode)
# left outer boundary value for dspec
if bnode.region == 1
f[dspec] = u[dspec] + 0.5
end
## Define a thin layer inteface condition for `dspec` and an interface source for `cspec`.
if bnode.region>2
react=(u[dspec,1]-u[dspec,2])/d1
f[dspec,1]= react
f[dspec,2]= -react
f[cspec]=-q1*u[cspec]
end
end
physics!(system,VoronoiFVM.Physics(
flux=flux,
breaction=breaction
))
## Set boundary conditions
boundary_dirichlet!(system,dspec,2,0.1)
boundary_dirichlet!(system,cspec,1,0.1)
boundary_dirichlet!(system,cspec,2,1.0)
subgrids=VoronoiFVM.subgrids(dspec,system)
U=solve(unknowns(system,inival=0),system)
dvws=VoronoiFVM.views(U,dspec,subgrids,system)
cvws=VoronoiFVM.views(U,cspec,subgrids,system)
vis=GridVisualizer(resolution=(600,300),Plotter=Plotter)
for i=1:length(dvws)
scalarplot!(vis,subgrids[i],dvws[i],flimits=(-0.5,1.5),clear=false,color=:red)
scalarplot!(vis,subgrids[i],cvws[i],flimits=(-0.5,1.5),clear=false,color=:green)
end
reveal(vis)
norm(system,U,2)
end
function test()
testval=7.812799873197911
main(unknown_storage=:sparse) ≈ testval &&
main(unknown_storage=:dense) ≈ testval
end
end
|
{"hexsha": "1e1012034564759390a43f66a2d8b1aa4cbecd99", "size": 3442, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/Example420_DiscontinuousQuantities.jl", "max_stars_repo_name": "PatricioFarrell/VoronoiFVM.jl", "max_stars_repo_head_hexsha": "690943ff455c91f16d114ad52cc83f2e8fa84e58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 80, "max_stars_repo_stars_event_min_datetime": "2019-11-18T05:04:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:11:01.000Z", "max_issues_repo_path": "examples/Example420_DiscontinuousQuantities.jl", "max_issues_repo_name": "PatricioFarrell/VoronoiFVM.jl", "max_issues_repo_head_hexsha": "690943ff455c91f16d114ad52cc83f2e8fa84e58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2019-11-19T18:12:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T15:39:14.000Z", "max_forks_repo_path": "examples/Example420_DiscontinuousQuantities.jl", "max_forks_repo_name": "PatricioFarrell/VoronoiFVM.jl", "max_forks_repo_head_hexsha": "690943ff455c91f16d114ad52cc83f2e8fa84e58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2019-08-29T16:46:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T14:10:29.000Z", "avg_line_length": 23.5753424658, "max_line_length": 158, "alphanum_fraction": 0.6487507263, "num_tokens": 1106}
|
import asyncio
import json
import time
from dataclasses import dataclass
from typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple
import numpy as np
from scanpointgenerator import CompoundGenerator
from bluefly import detector, motor, pmac
from bluefly.core import ConfigDict, Device, RemainingPoints, Status
from bluefly.detector import DatumFactory, DetectorDevice, FilenameScheme
class FlyLogic:
async def scan(
self,
detectors: Sequence[DetectorDevice],
points: RemainingPoints,
offset: int,
callback: Callable[[str, int], None],
):
"""Scan the given points, putting them at offset into file. Progress updates
should call callback """
raise NotImplementedError(self)
async def stop(self, detectors: Sequence[DetectorDevice]):
"""Stop where you are, without retracing or closing files"""
raise NotImplementedError(self)
# Based on:
# https://github.com/NSLS-II/sirepo-bluesky/blob/7173258e7570904295bfcd93d5bca3dcc304c15c/sirepo_bluesky/sirepo_flyer.py
class FlyDevice(Device):
"""Generic fly scan device that wraps some custom routines"""
def __init__(
self, detectors: Sequence[DetectorDevice], logic: FlyLogic,
):
assert detectors, "Need at least one detector"
self._detectors = detectors
self._logic = logic
self._generator = CompoundGenerator(generators=[])
self._when_configured = time.time()
self._when_triggered = time.time()
self._when_updated = time.time()
self._start_offset = 0
self._completed_steps = 0
self._total_steps = 0
self._watchers: List[Callable] = []
self._complete_task: Optional[asyncio.Task] = None
self._pause_task: Optional[asyncio.Task] = None
self._resuming = False
self._factories: Dict[str, DatumFactory] = {}
self._scheme = FilenameScheme.get_instance()
def configure(self, d: Dict[str, Any]) -> Tuple[ConfigDict, ConfigDict]:
old_config = self.read_configuration()
self._when_configured = time.time()
self._generator = d["generator"]
new_config = self.read_configuration()
return old_config, new_config
def read_configuration(self) -> ConfigDict:
return dict(
generator=dict(
value=json.dumps(self._generator.to_dict()),
timestamp=self._when_configured,
)
)
def describe_configuration(self) -> ConfigDict:
return dict(
generator=dict(source="user supplied parameter", dtype="string", shape=[])
)
def stage(self) -> List[Device]:
self._factories.clear()
return [self]
def unstage(self) -> List[Device]:
# TODO: would be good to return a Status object here
asyncio.create_task(self._unstage())
return [self]
async def _unstage(self):
det_coros = [det.logic.close() for det in self._detectors]
await asyncio.gather(self._scheme.done_using_prefix(), *det_coros)
def collect(self) -> Generator[Dict[str, ConfigDict], None, None]:
for factory in self._factories.values():
# TODO: add completed_steps in here
# TODO: what happens about rewind?
yield from factory.collect_datums()
def collect_asset_docs(self):
for factory in self._factories.values():
yield from factory.collect_asset_docs()
def describe_collect(self) -> Dict[str, ConfigDict]:
assert self.name
dsets = {name: factory.describe() for name, factory in self._factories.items()}
return {
self.name: dict(
completed_steps=dict(source="progress", dtype="number", shape=[]),
),
**dsets,
}
def kickoff(self) -> Status:
status = Status(self._kickoff())
return status
async def _kickoff(self):
if self._resuming:
# Resuming where we last left off
self._resuming = False
else:
# Start from the beginning
self._completed_steps = 0
self._generator.prepare()
self._total_steps = self._generator.size
self._when_triggered = time.time()
if not self._factories:
# beginning of the scan, open the file
self._start_offset = 0
file_prefix = await self._scheme.current_prefix()
coros = []
for det in self._detectors:
assert det.name
coros.append(det.logic.open(file_prefix + det.name))
resources = await asyncio.gather(*coros)
for det, resource in zip(self._detectors, resources):
assert det.name
self._factories[det.name] = DatumFactory(det.name, resource)
def pause(self):
# TODO: would be good to return a Status object here
assert self._complete_task, "Trigger not called"
self._complete_task.cancel()
self._pause_task = asyncio.create_task(self._logic.stop(self._detectors))
def resume(self):
assert self._pause_task.done(), "You didn't wait for pause to finish"
self._resuming = True
def complete(self) -> Status:
self._complete_task = asyncio.create_task(self._complete())
status = Status(self._complete_task, self._watchers.append)
return status
async def _complete(self):
completed_at_start = self._completed_steps
points = RemainingPoints(self._generator, completed_at_start)
queue: asyncio.Queue[int] = asyncio.Queue()
async def update_watchers():
steps: Dict[str, int] = {
det.name: completed_at_start for det in self._detectors
}
last_updated: Dict[str, float] = {
det.name: time.time() for det in self._detectors
}
while self._completed_steps < self._total_steps:
oldest_det = time.time() - min(last_updated.values())
# Allow the oldest detector to be up to 60s + exposure behind
timeout = 60 + self._generator.duration - oldest_det
name, step = await asyncio.wait_for(queue.get(), timeout)
factory = self._factories[name]
factory.register_collections(np.arange(steps[name], step))
steps[name] = step
new_completed_steps = min(steps.values())
if new_completed_steps > self._completed_steps:
self._completed_steps = new_completed_steps
self._when_updated = time.time()
for watcher in self._watchers:
watcher(
name=self.name,
current=self._completed_steps,
initial=0,
target=self._total_steps,
unit="",
precision=0,
time_elapsed=self._when_updated - self._when_triggered,
fraction=self._completed_steps / self._total_steps,
)
await asyncio.gather(
self._logic.scan(
self._detectors,
points,
self._start_offset + self._completed_steps,
lambda name, steps: queue.put_nowait(
(name, steps + completed_at_start)
),
),
update_watchers(),
)
self._start_offset += self._total_steps
@dataclass
class PMACMasterFlyLogic(FlyLogic):
pmac: pmac.PMAC
motors: List[motor.MotorDevice]
async def scan(
self,
detectors: Sequence[DetectorDevice],
points: RemainingPoints,
offset: int,
callback: Callable[[str, int], None],
):
# Prepare the motors and arm detectors
period, num = points.constant_duration, points.remaining
tracker, _, _ = await asyncio.gather(
pmac.build_initial_trajectory(self.pmac, self.motors, points),
pmac.move_to_start(self.pmac, self.motors, points.peek_point()),
detector.arm_detectors_triggered(detectors, num, offset, period),
)
# Kick off pmac, then show the progress of detectors
await asyncio.gather(
pmac.keep_filling_trajectory(self.pmac, tracker),
detector.collect_detectors(detectors, num, callback),
)
async def stop(self, detectors: Sequence[DetectorDevice]):
await asyncio.gather(
detector.stop_detectors(detectors), pmac.stop_trajectory(self.pmac)
)
|
{"hexsha": "4d9d2f4bf1f51a8453be30564a2dccf1c24416d8", "size": 8813, "ext": "py", "lang": "Python", "max_stars_repo_path": "bluefly/fly.py", "max_stars_repo_name": "dls-controls/bluefly", "max_stars_repo_head_hexsha": "5f461998a3f629a5f07e8733ab937a0302fa92f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bluefly/fly.py", "max_issues_repo_name": "dls-controls/bluefly", "max_issues_repo_head_hexsha": "5f461998a3f629a5f07e8733ab937a0302fa92f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bluefly/fly.py", "max_forks_repo_name": "dls-controls/bluefly", "max_forks_repo_head_hexsha": "5f461998a3f629a5f07e8733ab937a0302fa92f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-16T22:08:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-16T22:08:11.000Z", "avg_line_length": 37.9870689655, "max_line_length": 120, "alphanum_fraction": 0.6036536934, "include": true, "reason": "import numpy", "num_tokens": 1818}
|
[STATEMENT]
lemma \<L>_proj_2_reg_collapse:
"\<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` (\<L> \<A>) - {None})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` \<L> \<A> - {None})
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` \<L> \<A> - {None})
[PROOF STEP]
have "\<Q>\<^sub>r (fmap_funs_reg snd (trim_reg \<A>)) |\<subseteq>| ta_reachable (ta (fmap_funs_reg snd (trim_reg \<A>)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Q>\<^sub>r (fmap_funs_reg snd (trim_reg \<A>)) |\<subseteq>| ta_reachable (ta (fmap_funs_reg snd (trim_reg \<A>)))
[PROOF STEP]
by (auto simp: fmap_funs_reg_def)
[PROOF STATE]
proof (state)
this:
\<Q>\<^sub>r (fmap_funs_reg snd (trim_reg \<A>)) |\<subseteq>| ta_reachable (ta (fmap_funs_reg snd (trim_reg \<A>)))
goal (1 subgoal):
1. \<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` \<L> \<A> - {None})
[PROOF STEP]
note [simp] = \<L>_collapse_automaton'[OF this]
[PROOF STATE]
proof (state)
this:
\<L> (collapse_automaton_reg (fmap_funs_reg snd (trim_reg \<A>))) = the ` (gcollapse ` \<L> (fmap_funs_reg snd (trim_reg \<A>)) - {None})
goal (1 subgoal):
1. \<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` \<L> \<A> - {None})
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` \<L> \<A> - {None})
[PROOF STEP]
by (auto simp: proj_2_reg_def fmap_funs_\<L> \<L>_trim)
[PROOF STATE]
proof (state)
this:
\<L> (proj_2_reg \<A>) = the ` (gcollapse ` map_gterm snd ` \<L> \<A> - {None})
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 805, "file": "Regular_Tree_Relations_RRn_Automata", "length": 7}
|
#!/usr/bin/python3
'''Advent of Code 2018 Day 10 solution'''
from typing import List, Tuple
import numpy
Grid = List[List[int]]
def cellpower(x: int, y: int, serial: int) -> int:
'''Calculate the "power" of a cell'''
if not x or not y:
return 0
rack = x + 10
return (int(((rack * y + serial) * rack) / 100) % 10) - 5
def generategrid(serial: int) -> Grid:
'''Generate a 300x300 grid for a given serial. (0 is ignored)'''
grid: Grid = numpy.empty((301, 301), dtype=int)
for x in range(0, 301):
for y in range(0, 301):
grid[x][y] = cellpower(x, y, serial)
return grid
def generatesat(grid: Grid) -> Grid:
'''Generate a Summed Area Table for a given grid'''
satgrid: Grid = numpy.empty((301, 301), dtype=int)
satgrid[0][0] = grid[0][0]
for z in range(1, 301):
satgrid[0][z] = satgrid[0][z-1]+grid[0][z]
satgrid[z][0] = satgrid[z-1][0]+grid[z][0]
for x in range(1, 301):
for y in range(1, 301):
satgrid[x][y] = satgrid[x-1][y]+satgrid[x][y-1]-satgrid[x-1][y-1]+grid[x][y]
return satgrid
def squarepower(sat: Grid, x: int, y: int, size: int) -> int:
'''CAlculate the sum of the square of size size starting at x,y, using a SAT'''
return sat[x+size-1][y+size-1] - sat[x-1][y+size-1] - sat[x+size-1][y-1] + sat[x-1][y-1]
def runpart1(serial: int) -> Tuple[int, int]:
'''Solve part 1'''
grid = generategrid(serial)
sat = generatesat(grid)
maxx = 0
maxy = 0
maxpower = 0
for x in range(1, 299):
for y in range(1, 299):
sp = squarepower(sat, x, y, 3)
if sp > maxpower:
maxx = x
maxy = y
maxpower = sp
return (maxx, maxy)
def runpart2(serial: int) -> Tuple[int, int, int]:
'''Solve part 2'''
grid = generategrid(serial)
sat = generatesat(grid)
maxsize = 0
maxpower = 0
maxx = 0
maxy = 0
for size in range(1, 301):
for x in range(1, 300-size):
for y in range(1, 300-size):
sp = squarepower(sat, x, y, size)
if sp > maxpower:
maxx = x
maxy = y
maxpower = sp
maxsize = size
return (maxx, maxy, maxsize)
def run() -> Tuple[Tuple[int, int], Tuple[int, int, int]]:
'''Main'''
return (runpart1(1788), runpart2(1788))
if __name__ == '__main__':
print(run())
|
{"hexsha": "18f1b20ead8c5a280009099aad02a1af54cad581", "size": 2472, "ext": "py", "lang": "Python", "max_stars_repo_path": "aoc2018/day11.py", "max_stars_repo_name": "zoeimogen/AoC2018", "max_stars_repo_head_hexsha": "d50e1c483e58067f0f73e04318997410d53fcf15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-12-03T18:42:07.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-03T18:42:07.000Z", "max_issues_repo_path": "aoc2018/day11.py", "max_issues_repo_name": "zoeimogen/AoC2018", "max_issues_repo_head_hexsha": "d50e1c483e58067f0f73e04318997410d53fcf15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aoc2018/day11.py", "max_forks_repo_name": "zoeimogen/AoC2018", "max_forks_repo_head_hexsha": "d50e1c483e58067f0f73e04318997410d53fcf15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4285714286, "max_line_length": 92, "alphanum_fraction": 0.53802589, "include": true, "reason": "import numpy", "num_tokens": 802}
|
C include 'VICMAIN_FOR'
C
Subroutine ABLE86(IND,UNIT,BUF)
INTEGER*4 UNIT !Input unit number of image
INTEGER*4 BUF(*) !Array of label items returned
INTEGER IND
Real*4 XYZ
Character*20 CXYZ
Integer*4 SIZE,OFF,IJK
INTEGER*4 INSTANCE(30)
CHARACTER*9 TASKS(30)
Character*32 BLANKS
Character*7200 ALABEL
Character*3 CMon
REAL*4 R999
CHARACTER*100 SSS
INTEGER*4 IR999
CHARACTER*40 ETYPE ! Encoding type, Integer Cosine Transform?
LOGICAL PHASEII
EQUIVALENCE(R999,IR999)
DATA BLANKS /'! '/, R999 /-999/
SIZE = BUF(1) ! Caller passes length of BUF
If (SIZE.LT.1) Then
Call Xvmessage(' ***ABLE86 -size is less than 1', ' ')
CALL Abend
ENDIF
If (SIZE.GT.83) SIZE=83
IND = 0 ! INDicator initially normal
c
C ....Initialize all label items in BUF(*) as invalid
BUF(1) = 0
Do I=2,SIZE ! Typical SIZE ~= 50 for FLIGHT Label
BUF(I) = -999
EndDO
IF (SIZE.GE.83) BUF(83) = 0
c
c Defaulted Floats and Char arrays
c
If (SIZE.GE. 3) BUF(3) = IR999 ! Exposure, Float
If (SIZE.GE.17) Call MVCL(BLANKS,BUF(17),12) ! Target name
If (SIZE.GE.20) BUF(20)= IR999 ! IOF, Float
If (SIZE.GE.21) BUF(21)= IR999 ! CNV, Float
If (SIZE.GE.22) BUF(22)= IR999 ! Sol_Range, Float
If (SIZE.GE.23) Call MVCL(BLANKS,BUF(23),20)
If (SIZE.GE.28) Call MVCL(BLANKS,BUF(28),20)
If (SIZE.GE.33) Call MVCL(BLANKS,BUF(33),20)
If (SIZE.GE.38) Call MVCL(BLANKS,BUF(38),20)
If (SIZE.GE.43) Call MVCL(BLANKS,BUF(43),8)
If (SIZE.GE.47) Call MVCL(BLANKS,BUF(47),8)
If (SIZE.GE.50) BUF(50)= IR999
If (SIZE.GE.58) Call MVCL(BLANKS,BUF(51),32)
If (SIZE.GE.66) Call MVCL(BLANKS,BUF(59),32)
If (SIZE.GE.74) Call MVCL(BLANKS,BUF(67),32)
If (SIZE.GE.82) Call MVCL(BLANKS,BUF(75),32)
NINSTANCE = 30
Call XLHINFO(UNIT,TASKS,INSTANCE,NINSTANCE,ISTAT,' ')
c
C ...Read label into ALABEL, a 7200-Byte String, where 7200 = 72 * 100
Call VIC1LAB(UNIT,istat,nlabs,alabel,20) ! IBM or NOT ?
Call CHKSTAT(ISTAT,' ***ABLE86 ERR,ISTAT=',1,ISTAT,1)
PHASEII = .FALSE.
If (NLABS.EQ.0) GoTo 200 ! NON-IBM, goto flight_label
c
c Ground Calibration, search for GLL ID
OFF = INDEX(ALABEL(1:),'GLL/S') ! GLL ?
If (OFF.EQ.0) Return ! If not GLL, return
c
C ....Here if GLL SSI ground calibration label (in IBM Format)
BUF(1) = 1
c
C ....Get frame number
If (SIZE.LT.2) Return ! Less than 8 Bytes
I = INDEX(ALABEL(OFF:),' FRAME') ! Search fo FRAME starting from GLL/S
If (I.NE.0) Then
c
c Put Label (Char String) into INT*4 PAR, 20 Bytes
Read (ALABEL(OFF+I+5:),'(BN,I2)') BUF(2)
Else
Call Xvmessage(' ***ABLE86 -err in frame number',' ')
IND = -1
EndIF
c
C ....Get exposure time(msec)
If (SIZE.LT.3) Return
I = INDEX(ALABEL(OFF:),'EXP=')
If (I.NE.0) Then
c
Read (ALABEL(OFF+I+3:),'(BN,F7.1)') XYZ
Call MVLC(XYZ,CXYZ,4)
Call MVCL(CXYZ,BUF(3),4)
Else
Call Xvmessage(' ***ABLE86 -expo time error', ' ')
IND = -1
EndIF
c
C ....Get filter position
If (SIZE.LT.4) Return
I = INDEX(ALABEL(OFF:),' FILTER=')
If (I.NE.0) Then
c Convert ASCII integer to Decimal value BUF(4)
Read (ALABEL(OFF+I+8:),'(BN,I1)') BUF(4)
Else
Call Xvmessage(' ***ABLE86 -filter position error', ' ')
IND = -1
EndIF
c
C ....Get frame-rate
If (SIZE.LT.5) Return
I = INDEX(ALABEL(OFF:),' FR.RATE=')
If (I.NE.0) Then
Read (ALABEL(OFF+I+8:),'(BN,I1)') IFR
BUF(5) = 4 !66 2/3 sec
If (IFR.EQ.2) BUF(5)=1 ! 2 1/3 sec
If (IFR.EQ.8) BUF(5)=2 ! 8 2/3 sec
If (IFR.EQ.3) BUF(5)=3 !33 1/3 sec
Else
Call Xvmessage(' ***ABLE86 -Scan-rate error', ' ')
IND = -1
ENDIF
If (SIZE.LT.6) Return
IND = -1 !No FIBE in calibration label
If (SIZE.LT.7) Return
c
C ....Get gain state
If (SIZE.LT.8) Return
I = INDEX(ALABEL(OFF:),' GAIN=')
If (I.NE.0) Then
Read (ALABEL(OFF+I+5:),'(BN,I1)') BUF(8)
Else
Call Xvmessage(' ***ABLE86 gain error', ' ')
EndIF
IF (Size.LT.9) Return ! No MOD10 for ground calibration
C ....Get year
If (SIZE.LT.10) Return
I = INDEX(ALABEL(OFF:),' LEVEL=')
If (I.EQ.0) Return
Do K=1,30 ! Replace ":" with blanks
J = OFF + I + K
If (ALABEL(J:J).EQ.':') ALABEL(J:J)=' '
EndDO
Read (ALABEL(OFF+I+36:),'(BN,I4)') BUF(10) ! year = 38th after ' LEVEL'
c
C ....Get day-of-year (should it be day-of-month ?)
If (SIZE.LT.11) Return
Read (ALABEL(OFF+I+32:),'(BN,I2)') IJK ! IJK is day-of-month
Read (ALABEL(OFF+I+27:),'(BN,A3)') CMon ! Month in Character
Mon = 13 ! Default 'BAD' flag
If (Cmon(1:3) .Eq. 'JAN') Mon = 1
If (Cmon(1:3) .Eq. 'FEB') Mon = 2
If (Cmon(1:3) .Eq. 'MAR') Mon = 3
If (Cmon(1:3) .Eq. 'APR') Mon = 4
If (Cmon(1:3) .Eq. 'MAY') Mon = 5
If (Cmon(1:3) .Eq. 'JUN') Mon = 6
If (Cmon(1:3) .Eq. 'JUL') Mon = 7
If (Cmon(1:3) .Eq. 'AUG') Mon = 8
If (Cmon(1:3) .Eq. 'SEP') Mon = 9
If (Cmon(1:3) .Eq. 'OCT') Mon = 10
If (Cmon(1:3) .Eq. 'NOV') Mon = 11
If (Cmon(1:3) .Eq. 'DEC') Mon = 12
Nyear = Buf(10)
c
c Calculate Day-of-Year & put in BUF(11)
Call JDAY(Mon, IJK, Nyear, Nout)
BUF(11) = Nout
c
C ....Get time-of-day, Hour-Minute-Second
If (SIZE.LT.12) Return
Read (ALABEL(OFF+I+17:),'(BN,I2)') BUF(12) ! Hour
If (SIZE.LT.13) Return
Read (ALABEL(OFF+I+20:),'(BN,I2)') BUF(13) ! Minute
c
If (SIZE.LT.14) Return
Read (ALABEL(OFF+I+23:),'(BN,I2)') BUF(14) ! Second
GOTO 300
c
c !!! Now, NON-IBM Label !!!
200 Call XLGET(UNIT,'HISTORY','MISSION',SSS,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
If (SSS(1:7).NE.'GALILEO') Return ! Return if not Galileo
Call XLGET(UNIT,'HISTORY','SENSOR',SSS,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
If (SSS(1:3).NE.'SSI') Return ! Return if not SSI
Call XLGET(UNIT,'HISTORY','ENCODING_TYPE',ETYPE,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
If (ISTAT .EQ. 1)
* PHASEII = .TRUE.
C ....Set FLAG = 2, or 3 if Galileo SSI flight label
If (PHASEII) Then
BUF(1) = 3
Else
BUF(1) = 2
EndIF
C ....Get spacecraft clock (=100*RIM + MOD91)
If (SIZE.LT.2) Return
Call XLGET(UNIT,'HISTORY','RIM',BUF(2),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
Call XLGET(UNIT,'HISTORY','MOD91',MOD91,ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.EQ.1) Then
BUF(2)=100*BUF(2) + MOD91
Else
IND = -1
EndIF
C ....Get exposure time (msec)
If (SIZE.LT.3) Return
CaLL XLGET(UNIT,'HISTORY','EXP',BUF(3),ISTAT,
* 'FORMAT','REAL','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
C ....Get filter position (0-7)
If (SIZE.LT.4) Return
Call XLGET(UNIT,'HISTORY','FILTER',BUF(4),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
C ....Get frame rate (1-4)
If (SIZE.LT.5) Return
Call XLGET(UNIT,'HISTORY','RATE',BUF(5),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
C ....Get flood,invert/non-invert,blem-protect,extended-exposure(PhaseI)
C ....Get on-chip mosaic, edited OpNav image, flood, invert/non-invert,
C ....blem-protect, extended-exposure(PhaseII)
If (SIZE.LT.6) Return
If (PHASEII) Then
Call XLGET(UNIT,'HISTORY','MOFIBE',SSS,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
Else
Call XLGET(UNIT,'HISTORY','FIBE',SSS,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
EndIF
If (ISTAT.EQ.1) Then
If (PHASEII) Then
Read (SSS(1:7),'(BN,I6)') BUF(6) ! MOFIBE
Else
Read (SSS(1:5),'(BN,I4)') BUF(6) ! FIBE
EndIF
Else
IND = -1
EndIF
C ....Get boom-obscuration flag
If (SIZE.LT.7) Return
Call XLGET(UNIT,'HISTORY','BOOM',SSS,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
If (ISTAT.EQ.1) Then
If (SSS(1:1).EQ.'P') BUF(7)=1
If (SSS(1:1).EQ.'N') BUF(7)=0
If (SSS(1:1).EQ.'V') BUF(7)=2
Else
IND = -1
EndIF
C ....Get camera gain-state
If (SIZE.LT.8) Return
Call XLGET(UNIT,'HISTORY','GAIN',BUF(8),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
C ....Get 10*MOD10 + MOD8
If (SIZE.LT.9) Return
Call XLGET(UNIT,'HISTORY','MOD10',BUF(9),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
Call XLGET(UNIT,'HISTORY','MOD8',MOD8,ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.EQ.1) Then
BUF(9)=10*BUF(9) + MOD8
Else
IND = -1
EndIF
C ....Get spacecraft-event time
If (SIZE.LT.10) Return
Call XLGET(UNIT,'HISTORY','SCETYEAR',BUF(10),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
If (SIZE.LT.11) Return
Call XLGET(UNIT,'HISTORY','SCETDAY',BUF(11),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
If (SIZE.LT.12) Return
Call XLGET(UNIT,'HISTORY','SCETHOUR',BUF(12),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
If (SIZE.LT.13) Return
Call XLGET(UNIT,'HISTORY','SCETMIN',BUF(13),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
If (SIZE.LT.14) Return
Call XLGET(UNIT,'HISTORY','SCETSEC',BUF(14),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
Call XLGET(UNIT,'HISTORY','SCETMSEC',BUF(15),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
C ....Get RIM count partition number
Call XLGET(UNIT,'HISTORY','PARTITION',BUF(16),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
If (ISTAT.NE.1) IND=-1
C ....Get picture number
If (SIZE.LT.16) Return
Call XLGET(UNIT,'HISTORY','PICNO',SSS,ISTAT,'HIST',TASKS(1),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
Call MVCL(SSS,BUF(47),length)
Else
IND = -1
EndIF
C ....Target name
If (SIZE.LT.17) Return
Call XLGET(UNIT,'HISTORY','TARGET',SSS,ISTAT,'HIST',TASKS(1),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
Call MVCL(SSS,BUF(17),length)
Else
IND = -1
EndIF
If (SIZE.LT.20) Return
C ....Solar range
If (SIZE.GE.22 .AND. PHASEII)
* Call XLGET(UNIT,'HISTORY','SOLRANGE',BUF(22),
* ISTAT,'FORMAT','REAL','HIST',TASKS(1),' ')
C Scan for GALSOS task
300 IG = 0 !Index of GALSOS task
JG = 1 !First assume info is in FIRST task
DO J=1,NINSTANCE
IF (TASKS(J).EQ.'GALSOS') IG=J
ENDDO
IF (IG.EQ.0) GOTO 400 ! Skip if no GALSOS info
C ....DN-to-reflectance conversion factor
Call XLGET(UNIT,'HISTORY','IOF',BUF(20),ISTAT,
* 'FORMAT','REAL','HIST',TASKS(1),' ')
C ....DN-to-radiance conversion factor
If (SIZE.LT.21) Return
Call XLGET(UNIT,'HISTORY','CNV',BUF(21),ISTAT,
* 'FORMAT','REAL','HIST',TASKS(1),' ')
If (SIZE.LT.22) Return
If (.NOT.PHASEII) Call XLGET(UNIT,'HISTORY','SOLRANGE',BUF(22),
* ISTAT,'FORMAT','REAL','HIST',TASKS(1),' ')
If (SIZE.LT.27) Return
C ....The GALSOS cal files are recorded in the first task or the
C ....GALSOS task (later versions of GALSOS). Determine where
C ....they are stored and use JG to point to them.
C ....Dark-current filename
Call XLGET(UNIT,'HISTORY','DC',SSS,ISTAT,'HIST',
* TASKS(1),'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.NE.1) THEN
Call XLGET(UNIT,'HISTORY','DC',SSS,ISTAT,'HIST',
* TASKS(IG),'FORMAT','STRING','LENGTH',length,' ')
IF (ISTAT.EQ.1) JG=IG !Info is in GALSOS task
ENDIF
If (ISTAT.EQ.1) Call MVCL(SSS,BUF(23),length)
C ....Get radiometric filename
If (SIZE.LT.32) Return
Call XLGET(UNIT,'HISTORY','CAL',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Call MVCL(SSS,BUF(28),length)
C ....Get blemish filename
If (SIZE.LT.37) Return
Call XLGET(UNIT,'HISTORY','BLM',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Call MVCL(SSS,BUF(33),length)
C ....Get shutter-offset filename
If (SIZE.LT.42) Return
Call XLGET(UNIT,'HISTORY','SO',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Call MVCL(SSS,BUF(38),length)
C ....Get EDR tape-ID
400 If (SIZE.LT.44) Return
Call XLGET(UNIT,'HISTORY','EDRTAPE',SSS,ISTAT,'HIST',
* TASKS(1),'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Call MVCL(SSS,BUF(43),length)
C ....Get EDR tape file number
If (SIZE.LT.45) Return
Call XLGET(UNIT,'HISTORY','EDRFILE',BUF(45),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
C ....Get uneven-bit-weighting correction flag
If (SIZE.LT.46) Return
Call XLGET(UNIT,'HISTORY','UBWC',SSS,ISTAT,
* 'FORMAT','STRING','HIST',TASKS(1),' ')
If (ISTAT.EQ.1) Then
If (SSS(1:3).EQ.'OFF') BUF(46)=0
If (SSS(1:2).EQ.'ON') BUF(46)=1
EndIF
C ....Store SEQNO for PHASEII
If (SIZE.LT.49) Return
If (PHASEII) Then
Call XLGET(UNIT,'HISTORY','SEQNO',buf(49),ISTAT,
* 'FORMAT','INT','HIST',TASKS(1),' ')
EndIF
C ....Get image entropy
If (SIZE.LT.50) Return
Call XLGET(UNIT,'HISTORY','ENTROPY',BUF(50),ISTAT,'HIST',
* TASKS(1),'FORMAT','REAL',' ')
If (ISTAT.NE.1) IND=-1
C ....Get Dark-Current file disk and directory name
If (SIZE.LT.58) Return
Call XLGET(UNIT,'HISTORY','DIRDC',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
Call MVCL(SSS,BUF(51),32)
Else
IND = -1
EndIF
C ....Get Radiometric file disk and directory name
If (SIZE.LT.66) Return
Call XLGET(UNIT,'HISTORY','DIRCAL',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
Call MVCL(SSS,BUF(59),32)
Else
IND = -1
EndIF
C ....Get Blemish file disk and directory name
If (SIZE.LT.74) Return
Call XLGET(UNIT,'HISTORY','DIRBLM',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
Call MVCL(SSS,BUF(67),32)
Else
IND = -1
EndIF
C ....Get Shutter-Offset file disk and directory name
If (SIZE.LT.82) Return
Call XLGET(UNIT,'HISTORY','DIROFF',SSS,ISTAT,'HIST',TASKS(JG),
* 'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
Call MVCL(SSS,BUF(75),32)
Else
IND = -1
EndIF
C ....Get SSI readout mode
If (SIZE.LT.83) Return
BUF(83) = 0
IF (PHASEII) THEN
Call XLGET(UNIT,'HISTORY','TLMFMT',SSS,ISTAT,'HIST',
& TASKS(1),'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) Then
IF (SSS.EQ.'HCA'.OR.SSS.EQ.'HMA') THEN
Call XLGET(UNIT,'HISTORY','READOUTMODE',SSS,ISTAT,
& 'HIST',
& TASKS(1),'FORMAT','STRING','LENGTH',length,' ')
If (ISTAT.EQ.1) THEN
BUF(83) = 2
IF (SSS.EQ.'SAMPLE') BUF(83)=1
Else
IND = -1
EndIF
ENDIF
ENDIF
ENDIF
Return
End
|
{"hexsha": "55279839649101a43f279f0567e88d85c5b1f0ca", "size": 16706, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "vos/p2/sub/able86/able86.f", "max_stars_repo_name": "NASA-AMMOS/VICAR", "max_stars_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-10-21T05:56:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:01.000Z", "max_issues_repo_path": "vos/p2/sub/able86/able86.f", "max_issues_repo_name": "NASA-AMMOS/VICAR", "max_issues_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vos/p2/sub/able86/able86.f", "max_forks_repo_name": "NASA-AMMOS/VICAR", "max_forks_repo_head_hexsha": "4504c1f558855d9c6eaef89f4460217aa4909f8e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-09T01:51:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T00:23:24.000Z", "avg_line_length": 36.0043103448, "max_line_length": 79, "alphanum_fraction": 0.523584341, "num_tokens": 6035}
|
import numpy as np
from pcdet.utils.common_utils import create_logger
from pathlib import Path
from pcdet.datasets.nuscenes.nuscenes_dataset import NuScenesDataset
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.ops.roiaware_pool3d import points_in_boxes_cpu
from pcdet.utils import visualize_utils as V
from mayavi import mlab
class_names = ['car', 'truck', 'construction_vehicle', 'bus', 'trailer',
'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone']
def test_nus_dataset():
data_path = Path('/mnt/nas/DATA/nuScenes/nuScenes')
cfg_file = '/home/jianyun/WorkSpace/pythonProjects/OpenPCDet/tools/cfgs/dataset_configs/nuscenes_dataset.yaml'
cfg_from_yaml_file(cfg_file, cfg)
logger = create_logger()
nus_dataset = NuScenesDataset(cfg, class_names=class_names, root_path=data_path, logger=logger, training=True)
np.random.seed(666)
for i, data_dict in enumerate(nus_dataset):
if i != 20:
continue
frame_id = data_dict['frame_id']
print(f'frame_id: {frame_id}')
points = data_dict['points']
gt_boxes = data_dict['gt_boxes']
point_masks = points_in_boxes_cpu(points[:, 0:3], gt_boxes[:, 0:7])
gt_points = points[point_masks.sum(axis=0) != 0]
# np.save(data_path / 'TMP' / f'gtPoints_{frame_id}.npy', gt_points)
gt_points.astype(np.float32).tofile(data_path / 'TMP' / f'gtPoints_{frame_id}.bin')
fig = V.draw_scenes(points)
V.draw_sphere_pts(gt_points, fig=fig)
mlab.show(stop=True)
# exit(-1)
if __name__ == '__main__':
test_nus_dataset()
|
{"hexsha": "e05ebdd23bd8a05761b37066a0e4812f31a932df", "size": 1638, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_datasets/nuscenes/test_nuscenes_dataset.py", "max_stars_repo_name": "StarsMyDestination/OpenPCDet", "max_stars_repo_head_hexsha": "a9bfdffb2c23f6fe7d4c19085b47ec35728d5884", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_datasets/nuscenes/test_nuscenes_dataset.py", "max_issues_repo_name": "StarsMyDestination/OpenPCDet", "max_issues_repo_head_hexsha": "a9bfdffb2c23f6fe7d4c19085b47ec35728d5884", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_datasets/nuscenes/test_nuscenes_dataset.py", "max_forks_repo_name": "StarsMyDestination/OpenPCDet", "max_forks_repo_head_hexsha": "a9bfdffb2c23f6fe7d4c19085b47ec35728d5884", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4285714286, "max_line_length": 114, "alphanum_fraction": 0.6965811966, "include": true, "reason": "import numpy", "num_tokens": 439}
|
module testMoments
using Base.Test
using DataFrames
using Datetime
using TimeData
include(string(Pkg.dir("AssetMgmt"), "/src/AssetMgmt.jl"))
println("\n Running moments tests\n")
#########################
## test portfolio mean ##
#########################
pf = AssetMgmt.Portfolio(ones(4, 8)/8)
mus = DataFrame(rand(1, 8))
## test getPMean for portfolios
AssetMgmt.getPMean(pf, mus)
## test getPMean for investments
vals = rand(8, 4)
valsDf = DataFrame(AssetMgmt.makeWeights(vals))
invs = AssetMgmt.Investments(valsDf, [1:8])
mus = DataFrame(rand(8, 4))
AssetMgmt.getPMean(invs, mus)
#############################
## test portfolio variance ##
#############################
pf = AssetMgmt.Portfolio(ones(1, 4)/4)
covMatr = cov(Timematr(rand(50, 4)))
AssetMgmt.getPVar(pf, covMatr)
valsDf = DataFrame(ones(10, 4)/4)
invs = AssetMgmt.Investments(valsDf, [1:10])
AssetMgmt.getPVar(invs, covMatr)
end
|
{"hexsha": "f7493440494b61fd019c9a9db0cb5d0f4eb7c736", "size": 911, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/ttmoments.jl", "max_stars_repo_name": "cgroll/AssetMgmt.jl", "max_stars_repo_head_hexsha": "bbb87c1aab5f3b114807d7d5edb4db113260aa42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/ttmoments.jl", "max_issues_repo_name": "cgroll/AssetMgmt.jl", "max_issues_repo_head_hexsha": "bbb87c1aab5f3b114807d7d5edb4db113260aa42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/ttmoments.jl", "max_forks_repo_name": "cgroll/AssetMgmt.jl", "max_forks_repo_head_hexsha": "bbb87c1aab5f3b114807d7d5edb4db113260aa42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-01T18:49:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T18:49:12.000Z", "avg_line_length": 20.7045454545, "max_line_length": 58, "alphanum_fraction": 0.6399560922, "num_tokens": 271}
|
from typing import Tuple, Union, List, Any
import torch
import numpy as np
class Augmentation(object):
"""
Super class for all augmentations.
"""
def __init__(self) -> None:
"""
Constructor method
"""
pass
def __call__(self, *args: Any, **kwargs: Any) -> None:
"""
Call method is used to apply the augmentation
:param args: Will be ignored
:param kwargs: Will be ignored
"""
raise NotImplementedError()
class RandomFlipping(Augmentation):
"""
Random flipping augmentation over given dimensions
"""
def __init__(self, dimensions: Union[Tuple[int, ...], int]) -> None:
"""
Constructor method
:param dimensions: (Union[Tuple[int, ...], int]) Dimensions to be utilized
"""
# Call super constructor
super(RandomFlipping, self).__init__()
# Save parameter
self.dimensions = dimensions if isinstance(dimensions, tuple) else (dimensions,)
def __call__(self, tensors: List[torch.Tensor], *args, **kwargs) -> List[torch.Tensor]:
"""
Call method is used to apply the augmentation
:param tensors: (Tuple[torch.Tensor, ...]) Tensors to be augmented by random flipping
:param args: Will be ignored
:param kwargs: Will be ignored
"""
# Choose dimension randomly
dimension = tuple(
np.random.choice(self.dimensions, size=np.random.randint(1, len(self.dimensions) + 1, 1), replace=False))
# Augment all tensors in given list
for index in range(len(tensors)):
# Perform flipping
tensors[index] = flip(tensor=tensors[index], dims=dimension)
return tensors
class GaussianNoise(Augmentation):
"""
This class implements gaussian noise injection augmentation.
"""
def __init__(self, standard_deviation: float = 0.1) -> None:
"""
Constructor method
:param standard_deviation: (float) Standard deviation of gaussian noise to be added
"""
# Call super constructor
super(GaussianNoise, self).__init__()
# Save parameter
self.standard_deviation = standard_deviation
def __call__(self, tensors: List[torch.Tensor], apply_augmentation: List[bool], *args: Any,
**kwargs: Any) -> List[torch.Tensor]:
"""
Call method is used to apply the augmentation
:param tensors: (List[torch.Tensor]) Tensors to be augmented by random flipping
:param apply: (List[bool]) List of booleans if true augmentation is to the corresponding tensor
:param args: Will be ignored
:param kwargs: Will be ignored
"""
# Augment all tensors in given list
for index, (tensor, apply) in enumerate(zip(tensors, apply_augmentation)):
if apply:
# Add gaussian noise
tensors[index] = tensor + torch.randn_like(tensor) * self.standard_deviation
return tensors
class AdjustBrightness(Augmentation):
"""
This class implements random brightness adjustment augmentation.
"""
def __init__(self, brightness_factor: float = 0.15) -> None:
"""
Constructor method
:param brightness_factor: (float) Max brightness change relative to the max value of the tensor
"""
# Call super constructor
super(AdjustBrightness, self).__init__()
# Save parameter
self.brightness_factor = brightness_factor
def __call__(self, tensors: List[torch.Tensor], apply_augmentation: List[bool], *args,
**kwargs) -> List[torch.Tensor]:
"""
Call method is used to apply the augmentation
:param tensors: (Tuple[torch.Tensor, ...]) Tensors to be augmented by random flipping
:param apply: (Tuple[bool]) List of booleans if true augmentation is to the corresponding tensor
:param args: Will be ignored
:param kwargs: Will be ignored
"""
# Choose brightness adjustment value
brightness_factor = (2. * torch.rand(len(tensors)) - 1.) * self.brightness_factor
# Augment all tensors in given list
for index, (tensor, apply) in enumerate(zip(tensors, apply_augmentation)):
if apply:
# Perform brightness adjustment
tensors[index] = (tensor + tensor.max() * brightness_factor[index])
return tensors
def flip(tensor: torch.Tensor, dims: Tuple[int]) -> torch.Tensor:
"""
Function to flip a tensor at given dimensions with advanced indexing.
Much faster than standard flip function
:param tensor: Tensor to be flipped
:param dim: (Tuple[int]) Dimensions to be flipped
:return: Flipped tensor
"""
for dim in dims:
# Init index tensor
reverse_index = torch.arange(tensor.shape[dim] - 1, -1, -1)
# Perform flipping
if dim == -1:
tensor = tensor[..., reverse_index]
elif dim == -2:
tensor = tensor[..., reverse_index, :]
elif dim == -3:
tensor = tensor[..., reverse_index, :, :]
else:
raise ValueError("Illegal dim to be flip. Dim: {}".format(dim))
return tensor
|
{"hexsha": "b9e999fb5f2032290816c4819f79ca1417862ac1", "size": 5275, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/augmentation.py", "max_stars_repo_name": "ChristophReich1996/OSS-Net", "max_stars_repo_head_hexsha": "38ffae60286b53e72f2d17f510dbbfffb7036caa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-11-03T18:18:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T12:35:56.000Z", "max_issues_repo_path": "data/augmentation.py", "max_issues_repo_name": "ChristophReich1996/OSS-Net", "max_issues_repo_head_hexsha": "38ffae60286b53e72f2d17f510dbbfffb7036caa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/augmentation.py", "max_forks_repo_name": "ChristophReich1996/OSS-Net", "max_forks_repo_head_hexsha": "38ffae60286b53e72f2d17f510dbbfffb7036caa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-12T04:34:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T04:34:39.000Z", "avg_line_length": 36.1301369863, "max_line_length": 117, "alphanum_fraction": 0.6191469194, "include": true, "reason": "import numpy", "num_tokens": 1138}
|
from normal_form.games.zero_sum import ZeroSumGame
import numpy as np
class UniqueEquilibrium(ZeroSumGame):
def __init__(self, N, M, config):
G = np.zeros((N, M))
row = np.random.choice(N)
column = np.random.choice(M)
G[row, column] = 0.5
for i in range(M):
if i != column:
G[row, i] = 0.0
for i in range(N):
if i != row:
for j in range(M):
G[i, j] = 1.0
super(UniqueEquilibrium, self).__init__(G)
def __repr__(self):
return f"unique_{self.G.shape[0]}_{self.G.shape[1]}"
|
{"hexsha": "d0ba79acb93f1402041c8a8ede9052d80cbe5828", "size": 635, "ext": "py", "lang": "Python", "max_stars_repo_path": "finite_games/normal_form/games/unique_equilibrium.py", "max_stars_repo_name": "rtloftin/strategically_efficient_rl", "max_stars_repo_head_hexsha": "85a702b9361211d345a58cc60696e4e851d48ec4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-08-02T14:28:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T03:30:47.000Z", "max_issues_repo_path": "finite_games/normal_form/games/unique_equilibrium.py", "max_issues_repo_name": "rtloftin/strategically_efficient_rl", "max_issues_repo_head_hexsha": "85a702b9361211d345a58cc60696e4e851d48ec4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-02T17:35:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T04:42:17.000Z", "max_forks_repo_path": "finite_games/normal_form/games/unique_equilibrium.py", "max_forks_repo_name": "rtloftin/strategically_efficient_rl", "max_forks_repo_head_hexsha": "85a702b9361211d345a58cc60696e4e851d48ec4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-02T17:30:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-13T12:16:29.000Z", "avg_line_length": 23.5185185185, "max_line_length": 60, "alphanum_fraction": 0.5149606299, "include": true, "reason": "import numpy", "num_tokens": 172}
|
import numpy as np
import os
# plotting settings
import plot_settings
import matplotlib.pyplot as plt
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..",))
from frius import time2distance, das_beamform, image_bf_data
"""
User parameters
"""
min_depth = 0.01575
max_depth = 0.075
"""
Probe + raw date
"""
# probe/medium parameters
samp_freq = 50e6
center_freq = 5e6
dx = 0.9375*1e-3
speed_sound = 6300
bw_pulse = 1.0
pulse_dur = 500e-9
n_cycles = 0.5
# signed 16 bit integer [-128,127]
ndt_rawdata = np.genfromtxt(os.path.join(os.path.dirname(__file__), '..', 'data', 'ndt_rawdata.csv'), delimiter=',')
n_samples = len(ndt_rawdata)
time_vec = np.arange(n_samples)/samp_freq
depth = time2distance(time_vec[-1], speed_sound)
n_elem_tx = ndt_rawdata.shape[1]
probe_geometry = np.arange(n_elem_tx)*dx
"""
DAS beamform
"""
res = das_beamform(ndt_rawdata.T, samp_freq,
dx, probe_geometry, center_freq, speed_sound, depth)[0]
scal_fact = 1e2
image_bf_data(res, probe_geometry, depth, dynamic_range=40, scal_fact=scal_fact)
plt.xlabel("Lateral [cm]")
plt.ylabel("Axial [cm]")
plt.ylim([depth*scal_fact, 0])
plt.tight_layout()
fp = os.path.join(os.path.dirname(__file__), "figures", "_fig4p4a.png")
plt.savefig(fp, dpi=300)
"""
Single RF signal
"""
chan_idx = 0
scal_fact = 1e6
plt.figure()
plt.plot(scal_fact*time_vec, ndt_rawdata[:,chan_idx])
plt.grid()
plt.xlabel("Time [microseconds]")
ax = plt.gca()
ax.axes.yaxis.set_ticklabels([])
plt.tight_layout()
fp = os.path.join(os.path.dirname(__file__), "figures", "_fig4p4b.pdf")
plt.savefig(fp, dpi=300)
plt.show()
|
{"hexsha": "7b5f444fccb66fa908fd254537685e256cfdfad2", "size": 1594, "ext": "py", "lang": "Python", "max_stars_repo_path": "report_results/fig4p4_visualize_nde.py", "max_stars_repo_name": "ebezzam/frius", "max_stars_repo_head_hexsha": "c3acc98288c949085b7dea08ef3708581f86ce25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "report_results/fig4p4_visualize_nde.py", "max_issues_repo_name": "ebezzam/frius", "max_issues_repo_head_hexsha": "c3acc98288c949085b7dea08ef3708581f86ce25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "report_results/fig4p4_visualize_nde.py", "max_forks_repo_name": "ebezzam/frius", "max_forks_repo_head_hexsha": "c3acc98288c949085b7dea08ef3708581f86ce25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-26T10:10:33.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-26T10:10:33.000Z", "avg_line_length": 22.1388888889, "max_line_length": 116, "alphanum_fraction": 0.7308657465, "include": true, "reason": "import numpy", "num_tokens": 481}
|
import os
from pathlib import Path
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
# TODO(arl): allow depth for volumetric data
DIMENSIONS = ["height", "width", "channels"]
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def write_dataset(
filename: str, images: np.ndarray, labels: Optional[np.ndarray] = None
):
"""Write out a TF record of image data for training models.
Parameters
----------
filename : str
The filename of the TFRecordFile.
images : np.ndarray
An array of images of the format N(D)HWC.
labels : np.ndarray, optional
An array of labels.
"""
if not filename.endswith(".tfrecord"):
filename = f"{filename}.tfrecord"
assert images.dtype in (
np.uint8,
np.uint16,
)
assert images.ndim > 2 and images.ndim < 6
if labels is not None:
assert images.shape[0] == labels.shape[0]
with tf.io.TFRecordWriter(filename) as writer:
for idx, data in enumerate(images):
feature = {
"train/image": _bytes_feature(data.tostring()),
"train/width": _int64_feature(data.shape[1]),
"train/height": _int64_feature(data.shape[0]),
"train/channels": _int64_feature(data.shape[-1]),
}
if labels is not None:
label = labels[idx]
feature.update({"train/label": _int64_feature(label)})
features = tf.train.Features(feature=feature)
example = tf.train.Example(features=features)
# write out the serialized features
writer.write(example.SerializeToString())
def parse_tfrecord(
serialized_example,
output_shape: Optional[tuple] = None,
read_label: bool = False,
read_weights: bool = False,
):
"""Parse input images and return the one_hot label encoding.
Parameters
----------
serialized_example : tf.Tensor
The serialized example to be parsed.
output_shape : tuple, None
Optional parameter to non-dynamically define output shape. If none, the
shape is determined from the dimensions stored in the TFRecord.
read_label : bool
Read a label encoded in the file.
read_weights : bool
Read weights encoded in the example.
Returns
-------
image : tf.Tensor
The image as a tf.float32 tensor.
"""
feature = {
f"train/{dim}": tf.io.FixedLenFeature([], tf.int64) for dim in DIMENSIONS
}
feature.update({"train/image": tf.io.FixedLenFeature([], tf.string)})
if read_label:
feature.update({"train/label": tf.io.FixedLenFeature([], tf.int64)})
features = tf.io.parse_single_example(
serialized=serialized_example, features=feature
)
# convert the image data from string back to the numbers
image_raw = tf.io.decode_raw(features["train/image"], tf.uint8)
# get the image dimensions
if output_shape is None:
output_shape = [features[f"train/{dim}"] for dim in DIMENSIONS]
image = tf.cast(tf.reshape(image_raw, output_shape), tf.float32)
if read_label:
label = features["train/label"]
return image, label
else:
return image
def per_channel_normalize(x: tf.Tensor) -> tf.Tensor:
"""Independently normalize each channel of an image to zero mean, unit
variance."""
stack = []
for dim in range(x.shape[-1]):
channel = tf.expand_dims(x[..., dim], -1)
normalized = tf.squeeze(tf.image.per_image_standardization(channel))
stack.append(normalized)
x = tf.stack(stack, axis=-1)
x = tf.clip_by_value(x, -4.0, 4.0)
return x
def build_dataset(files: Union[List[os.PathLike], os.PathLike], **kwargs):
"""Build a TF Dataset from a list of TFRecordFiles. Map the parser to it.
Parameters
----------
files : str, list[str]
The list of TFRecord files to use for the dataset.
Returns
-------
dataset : tf.data.Dataset
The TF dataset.
"""
# parse the input
if not isinstance(files, list):
fn, ext = os.path.splitext(files)
if ext != "tfrecord":
pth = Path(files)
files = [pth / f for f in os.listdir(files) if f.endswith(".tfrecord")]
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(lambda x: parse_tfrecord(x, **kwargs), num_parallel_calls=8)
return dataset
|
{"hexsha": "d904265a92d743b7f1db25a11a6d9edda143febc", "size": 4731, "ext": "py", "lang": "Python", "max_stars_repo_path": "cellx/tools/dataset.py", "max_stars_repo_name": "nthndy/cellx", "max_stars_repo_head_hexsha": "56a22099beeba59401d6882b6d6b0010718c0376", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cellx/tools/dataset.py", "max_issues_repo_name": "nthndy/cellx", "max_issues_repo_head_hexsha": "56a22099beeba59401d6882b6d6b0010718c0376", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cellx/tools/dataset.py", "max_forks_repo_name": "nthndy/cellx", "max_forks_repo_head_hexsha": "56a22099beeba59401d6882b6d6b0010718c0376", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0245398773, "max_line_length": 86, "alphanum_fraction": 0.6334812936, "include": true, "reason": "import numpy", "num_tokens": 1086}
|
using HTTP
using JSON3
using SQLite
using ZulipSnippetBot
include("configuration.jl")
setupbot!(token = TOKEN, host = HOST, port = PORT)
const db = SQLite.DB(DB)
ZulipSnippetBot.run(db)
|
{"hexsha": "99e978898f8507492fb95c19ba0599942bd778f5", "size": 188, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "snippetserver.jl", "max_stars_repo_name": "Arkoniak/ZulipSnippetBot", "max_stars_repo_head_hexsha": "c1789a29bb8c010859784ddc19c009e9e6eecdcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-19T16:11:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-19T16:11:32.000Z", "max_issues_repo_path": "snippetserver.jl", "max_issues_repo_name": "Arkoniak/ZulipSnippetBot", "max_issues_repo_head_hexsha": "c1789a29bb8c010859784ddc19c009e9e6eecdcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "snippetserver.jl", "max_forks_repo_name": "Arkoniak/ZulipSnippetBot", "max_forks_repo_head_hexsha": "c1789a29bb8c010859784ddc19c009e9e6eecdcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0909090909, "max_line_length": 50, "alphanum_fraction": 0.7606382979, "num_tokens": 53}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 10 10:16:49 2018
@author: paul
"""
import numpy as np
from scipy.stats import expon
import matplotlib.pyplot as plt
experiments = 100
winningprice = expon(-3, 5)
def buy(price):
win = winningprice.rvs()
return price < win
def purchases(price):
tot = 0
for i in range(experiments):
tot += buy(price)
return tot
def revenue(price):
return purchases(price) * price
def sampledata(samples):
price = np.linspace(0, 30, samples);
conversions = purchases(price)
revenue = price * conversions
return price, conversions, revenue
if __name__ == "__main__":
fig = plt.figure()
# generate some synthetic data
# price is the price point tested
# conversions is the amount of people that bought the item
# rev is the revenue that we got from those purchases
price, conversions, rev = sampledata(30);
# show the conversions
ax = plt.subplot(3, 1, 1)
plt.plot(price, conversions, "bo", lw=2, label="conversions per 100 impressions")
plt.legend()
# show the revenue
ax = plt.subplot(3, 1, 2)
plt.plot(price, rev, "bo", lw=2, label="revenue per 100 impressions")
plt.legend()
# winning price distribution
# a person will buy an item if his winning price is <= price
ax = plt.subplot(3, 1, 3)
x = np.linspace(0, 30, 30)
plt.plot(x, winningprice.pdf(x), label="Winning price distribution")
plt.legend()
plt.show()
|
{"hexsha": "a005636b62961bb6e3e036dc2a6ec0120df43d7a", "size": 1538, "ext": "py", "lang": "Python", "max_stars_repo_path": "data.py", "max_stars_repo_name": "paulpach/pricingengine", "max_stars_repo_head_hexsha": "0feaa3819142370af9b85965f3da32dbff9f59ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data.py", "max_issues_repo_name": "paulpach/pricingengine", "max_issues_repo_head_hexsha": "0feaa3819142370af9b85965f3da32dbff9f59ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data.py", "max_forks_repo_name": "paulpach/pricingengine", "max_forks_repo_head_hexsha": "0feaa3819142370af9b85965f3da32dbff9f59ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.661971831, "max_line_length": 85, "alphanum_fraction": 0.6436931079, "include": true, "reason": "import numpy,from scipy", "num_tokens": 414}
|
import os
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from models import basenet
from models import dataloader
from models.cifar_core import CifarModel
import utils
class CifarGradProjAdv(CifarModel):
def __init__(self, opt):
super(CifarGradProjAdv, self).__init__(opt)
self.training_ratio = opt['training_ratio']
self.alpha = opt['alpha']
def set_network(self, opt):
"""Define the network"""
self.class_network = basenet.ResNet18(opt['output_dim']).to(self.device)
self.domain_network = nn.Linear(opt['output_dim'], 2).to(self.device)
def set_data(self, opt):
"""Set up the dataloaders"""
data_setting = opt['data_setting']
with open(data_setting['train_data_path'], 'rb') as f:
train_array = pickle.load(f)
mean = tuple(np.mean(train_array / 255., axis=(0, 1, 2)))
std = tuple(np.std(train_array / 255., axis=(0, 1, 2)))
normalize = transforms.Normalize(mean=mean, std=std)
if data_setting['augment']:
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_data = dataloader.CifarDatasetWithDomain(data_setting['train_data_path'],
data_setting['train_label_path'],
data_setting['domain_label_path'],
transform_train)
test_color_data = dataloader.CifarDatasetWithDomain(data_setting['test_color_path'],
data_setting['test_label_path'],
data_setting['domain_label_path'],
transform_test)
test_gray_data = dataloader.CifarDatasetWithDomain(data_setting['test_gray_path'],
data_setting['test_label_path'],
data_setting['domain_label_path'],
transform_test)
self.train_loader = torch.utils.data.DataLoader(
train_data, batch_size=opt['batch_size'],
shuffle=True, num_workers=1)
self.test_color_loader = torch.utils.data.DataLoader(
test_color_data, batch_size=opt['batch_size'],
shuffle=False, num_workers=1)
self.test_gray_loader = torch.utils.data.DataLoader(
test_gray_data, batch_size=opt['batch_size'],
shuffle=False, num_workers=1)
def set_optimizer(self, opt):
optimizer_setting = opt['optimizer_setting']
self.class_optimizer = optimizer_setting['optimizer'](
params=self.class_network.parameters(),
lr=optimizer_setting['lr'],
weight_decay=optimizer_setting['weight_decay']
)
self.domain_optimizer = optimizer_setting['optimizer'](
params=self.domain_network.parameters(),
lr=optimizer_setting['lr'],
weight_decay=optimizer_setting['weight_decay']
)
def state_dict(self):
state_dict = {
'class_network': self.class_network.state_dict(),
'domain_network': self.domain_network.state_dict(),
'class_optimizer': self.class_optimizer.state_dict(),
'domain_optimizer': self.domain_optimizer.state_dict(),
'epoch': self.epoch
}
return state_dict
def _train(self, loader):
"""Train the model for one epoch"""
self.class_network.train()
self.domain_network.train()
train_class_loss = 0
train_domain_loss = 0
total = 0
class_correct = 0
domain_correct = 0
for i, (images, class_labels, domain_labels) in enumerate(loader):
images, class_labels, domain_labels = images.to(self.device), \
class_labels.to(self.device), domain_labels.to(self.device)
class_outputs, features = self.class_network(images)
domain_outputs = self.domain_network(class_outputs)
class_loss = self._criterion(class_outputs, class_labels)
domain_loss = self._criterion(domain_outputs, domain_labels)
# Update the domain classifier
domain_grad = torch.autograd.grad(domain_loss, self.domain_network.parameters(),
retain_graph=True)
for param, grad in zip(self.domain_network.parameters(), domain_grad):
param.grad = grad
self.domain_optimizer.step()
# Update the main network
if self.epoch % self.training_ratio == 0:
grad_from_class = torch.autograd.grad(class_loss, self.class_network.parameters(),
retain_graph=True)
grad_from_domain = torch.autograd.grad(domain_loss, self.class_network.parameters(),
retain_graph=True)
for param, class_grad, domain_grad in zip(self.class_network.parameters(), grad_from_class,
grad_from_domain):
# Gradient projection
if domain_grad.norm() > 1e-5:
param.grad = class_grad - self.alpha*domain_grad - \
((class_grad*domain_grad).sum()/domain_grad.norm()) \
* (domain_grad/domain_grad.norm())
else:
param.grad = class_grad - self.alpha*domain_grad
self.class_optimizer.step()
total += class_labels.size(0)
train_class_loss += class_loss.item()
_, class_predicted = class_outputs.max(1)
class_correct += class_predicted.eq(class_labels).sum().item()
train_domain_loss += domain_loss.item()
_, domain_predicted = domain_outputs.max(1)
domain_correct += domain_predicted.eq(domain_labels).sum().item()
train_result = {
'class_loss': class_loss.item(),
'domain_loss': domain_loss.item(),
'class_accuracy': 100.*class_correct/total,
'domain_accuracy': 100.*domain_correct/total
}
self.log_result('Train_iteraion', train_result,
len(loader)*self.epoch + i)
if self.print_freq and (i % self.print_freq == 0):
print('Training epoch: {} [{}|{}], class loss:{}, class accuracy:{}, '\
'domain loss: {}, domain accuracy: {}'.format(
self.epoch, i+1, len(loader), class_loss, 100.*class_correct/total,
domain_loss, 100.*domain_correct/total))
self.epoch += 1
def _test(self, loader):
"""Test the model performance"""
self.class_network.eval()
self.domain_network.eval()
total = 0
class_correct = 0
domain_correct = 0
test_class_loss = 0
test_domain_loss = 0
feature_list = []
class_output_list = []
domain_output_list = []
class_predict_list = []
domain_predict_list = []
with torch.no_grad():
for i, (images, class_labels, domain_labels) in enumerate(loader):
images, class_labels, domain_labels = images.to(self.device), \
class_labels.to(self.device), domain_labels.to(self.device)
class_outputs, features = self.class_network(images)
domain_outputs = self.domain_network(class_outputs)
class_loss = self._criterion(class_outputs, class_labels)
domain_loss = self._criterion(domain_outputs, domain_labels)
test_class_loss += class_loss.item()
test_domain_loss += domain_loss.item()
total += class_labels.size(0)
_, class_predicted = class_outputs.max(1)
class_correct += class_predicted.eq(class_labels).sum().item()
_, domain_predicted = domain_outputs.max(1)
domain_correct += domain_predicted.eq(domain_labels).sum().item()
class_predict_list.extend(class_predicted.tolist())
class_output_list.append(class_outputs.cpu().numpy())
domain_output_list.append(domain_outputs.cpu().numpy())
feature_list.append(features.cpu().numpy())
test_result = {
'class_loss': test_class_loss/len(loader),
'domain_loss': test_domain_loss/len(loader),
'class_accuracy': 100.*class_correct/total,
'domain_accuracy': 100.*domain_correct/total,
'class_predict_labels': class_predict_list,
'domain_predict_labels': domain_predict_list,
'class_outputs': np.vstack(class_output_list),
'domain_outputs': np.vstack(domain_output_list),
'features': np.vstack(feature_list)
}
return test_result
def train(self):
self._train(self.train_loader)
utils.save_state_dict(self.state_dict(), os.path.join(self.save_path, 'ckpt.pth'))
def test(self):
# Test and save the result
test_color_result = self._test(self.test_color_loader)
test_gray_result = self._test(self.test_gray_loader)
utils.save_pkl(test_color_result, os.path.join(self.save_path, 'test_color_result.pkl'))
utils.save_pkl(test_gray_result, os.path.join(self.save_path, 'test_gray_result.pkl'))
# Output the classification accuracy on test set
info = ('Test on color images accuracy: {}, domain accuracy; {}\n'
'Test on gray images accuracy: {}, domain accuracy: {}'
.format(test_color_result['class_accuracy'], test_color_result['domain_accuracy'],
test_gray_result['class_accuracy'], test_gray_result['domain_accuracy']))
utils.write_info(os.path.join(self.save_path, 'test_result.txt'), info)
|
{"hexsha": "2078085c4d7572b4955ddaa6be4082f7ac592a9b", "size": 11407, "ext": "py", "lang": "Python", "max_stars_repo_path": "dlfairness/original_code/DomainBiasMitigation/models/cifar_gradproj_adv.py", "max_stars_repo_name": "lin-tan/fairness-variance", "max_stars_repo_head_hexsha": "7f6aee23160707ffe78f429e5d960022ea1c9fe4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dlfairness/original_code/DomainBiasMitigation/models/cifar_gradproj_adv.py", "max_issues_repo_name": "lin-tan/fairness-variance", "max_issues_repo_head_hexsha": "7f6aee23160707ffe78f429e5d960022ea1c9fe4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dlfairness/original_code/DomainBiasMitigation/models/cifar_gradproj_adv.py", "max_forks_repo_name": "lin-tan/fairness-variance", "max_forks_repo_head_hexsha": "7f6aee23160707ffe78f429e5d960022ea1c9fe4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.9959677419, "max_line_length": 108, "alphanum_fraction": 0.5484351714, "include": true, "reason": "import numpy", "num_tokens": 2028}
|
import copy
from gym.wrappers import TransformReward
import numpy as np
from ray.rllib.env.atari_wrappers import FrameStack
from ray.tune import registry
from envs.frame_diff import FrameDiff
from envs.frame_stack_phase_correlation import FrameStackPhaseCorrelation
from envs.grayscale import Grayscale
from envs.mixed_grayscale_color_frame_stack import MixedGrayscaleColorFrameStack
from envs.procgen_env_wrapper import ProcgenEnvWrapper
from envs.reward_normalization_wrapper import RewardNormalizationWrapper
from envs.state_occupancy_counter import StateOccupancyCounter
def wrap_procgen(env,
frame_diff=False,
frame_diff_options={},
frame_stack=False,
frame_stack_options={},
frame_stack_phase_correlation=False,
frame_stack_phase_correlation_options={},
normalize_reward=False,
normalize_reward_options={},
grayscale=False,
mixed_grayscale_color=False,
mixed_grayscale_color_options={},
count_state_occupancy=False):
env_name = env.env_name
if count_state_occupancy:
env = StateOccupancyCounter(env)
if frame_diff:
env = FrameDiff(env, **frame_diff_options)
if grayscale:
assert not frame_diff
assert not frame_stack_phase_correlation
env = Grayscale(env)
if frame_stack:
env = FrameStack(env, **frame_stack_options)
if frame_stack_phase_correlation:
env = FrameStackPhaseCorrelation(env, **frame_stack_phase_correlation_options)
if mixed_grayscale_color:
assert not frame_diff
assert not grayscale
assert not frame_stack_phase_correlation
assert not frame_stack
env = MixedGrayscaleColorFrameStack(env, **mixed_grayscale_color_options)
if normalize_reward:
env = RewardNormalizationWrapper(env, **normalize_reward_options)
return env
def create_env(config):
config = copy.deepcopy(config)
if "env_wrapper_options" in config:
env_wrapper_options = config["env_wrapper_options"]
del config["env_wrapper_options"]
else:
env_wrapper_options = {}
env = ProcgenEnvWrapper(config)
env = wrap_procgen(env, **env_wrapper_options)
return env
registry.register_env("custom_procgen_env_wrapper", create_env)
|
{"hexsha": "cf93e1e5e9d1fd074e2210bf85124cd40da0000f", "size": 2403, "ext": "py", "lang": "Python", "max_stars_repo_path": "envs/custom_procgen_env_wrapper.py", "max_stars_repo_name": "wulfebw/neurips2020-procgen", "max_stars_repo_head_hexsha": "e131684cfa15188473873144933fc73bd54a2e60", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "envs/custom_procgen_env_wrapper.py", "max_issues_repo_name": "wulfebw/neurips2020-procgen", "max_issues_repo_head_hexsha": "e131684cfa15188473873144933fc73bd54a2e60", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "envs/custom_procgen_env_wrapper.py", "max_forks_repo_name": "wulfebw/neurips2020-procgen", "max_forks_repo_head_hexsha": "e131684cfa15188473873144933fc73bd54a2e60", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8656716418, "max_line_length": 86, "alphanum_fraction": 0.7153558052, "include": true, "reason": "import numpy", "num_tokens": 482}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.