text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from os import path, makedirs
from shutil import rmtree
from datetime import datetime, timedelta
import platform
import pandas as pd
import numpy as np
import requests
from tinydb import TinyDB, Query
import pytest
import publicAPI.forecast_utils as forecast_utils
import publicAPI.exceptions as exceptions
import helpers
HERE = path.abspath(path.dirname(__file__))
ROOT = path.dirname(HERE)
CONFIG_FILENAME = path.join(HERE, 'test_config.cfg')
CONFIG = helpers.get_config(CONFIG_FILENAME)
def test_fetch_emd_history(config=CONFIG):
"""happypath test for `fetch_market_history_emd`"""
data = forecast_utils.fetch_market_history_emd(
region_id=config.get('TEST', 'region_id'),
type_id=config.get('TEST', 'type_id'),
data_range=config.get('TEST', 'history_count'),
config=config
)
## validate contents ##
assert data['version'] == 2
data_keys = data['columns'].split(',')
for entry in data['result']:
assert set(data_keys) == set(entry['row'].keys())
## validate count ##
try:
db_data = helpers.check_db_values(
region_id=config.get('TEST', 'region_id'),
type_id=config.get('TEST', 'type_id'),
data_range=config.get('TEST', 'history_count'),
config=config
)
mismatch_dates = helpers.compare_dates(
data['result'],
db_data
)
assert len(mismatch_dates) <= int(config.get('TEST', 'miss_budget'))
except Exception as err:
pytest.xfail(
'Unable to validate date counts: {0}/{1}'.format(
len(data['result']),
config.get('TEST', 'history_count') +
'\n\texception= {0}'.format(repr(err))
))
def test_fetch_emd_history_fail(config=CONFIG):
"""happypath test for `fetch_market_history_emd`"""
with pytest.raises(requests.exceptions.HTTPError):
data = forecast_utils.fetch_market_history_emd(
region_id=config.get('TEST', 'region_id'),
type_id=config.get('TEST', 'type_id'),
data_range=config.get('TEST', 'history_count'),
config=config,
endpoint_addr='http://www.eveprosper.com/noendpoint'
)
with pytest.raises(exceptions.NoDataReturned):
data = forecast_utils.fetch_market_history_emd(
region_id=config.get('TEST', 'region_id'),
type_id=config.get('TEST', 'bad_typeid'),
data_range=config.get('TEST', 'history_count'),
config=config
)
DEMO_DATA = {
'version': 2,
'currentTime': datetime.now().isoformat(),
'name':'history',
'key': 'typeID,regionID,date',
'columns': 'typeID,regionID,date,lowPrice,highPrice,avgPrice,volume,orders',
'result':[
{'row': {
'typeID': '38',
'regionID': '10000002',
'date': '2015-03-28',
'lowPrice': '674.02',
'highPrice': '682.65',
'avgPrice': '681.99',
'volume': '43401081',
'orders': '1808'
}},
{'row': {
'typeID': '38',
'regionID': '10000002',
'date': '2015-03-29',
'lowPrice': '677.29',
'highPrice': '681.95',
'avgPrice': '681.89',
'volume': '46045538',
'orders': '1770'
}},
{'row': {
'typeID': '38',
'regionID': '10000002',
'date': '2015-03-30',
'lowPrice': '678.93',
'highPrice': '684',
'avgPrice': '679.14',
'volume': '56083217',
'orders': '1472'
}}
]
}
def test_parse_emd_data():
"""happypath test for refactoring EMD data"""
cleandata = forecast_utils.parse_emd_data(DEMO_DATA['result'])
assert isinstance(cleandata, pd.DataFrame) #check output type
headers = list(cleandata.columns.values)
expected_rows = DEMO_DATA['columns'].split(',')
assert set(headers) == set(expected_rows) #check row headers
assert len(cleandata.index) == len(DEMO_DATA['result'])
def test_parse_emd_data_fail():
"""make sure behavior is expected for failure"""
with pytest.raises(TypeError):
data = forecast_utils.parse_emd_data(DEMO_DATA)
TEST_DATA_PATH = path.join(HERE, 'sample_emd_data.csv')
TEST_PREDICT_PATH = path.join(HERE, 'sample_emd_predict.csv')
@pytest.mark.prophet
def test_build_forecast(config=CONFIG):
"""try to build a forecast"""
test_data = pd.read_csv(TEST_DATA_PATH)
test_data['date'] = pd.to_datetime(test_data['date'])
max_date = test_data['date'].max()
expected_rows = [
'date',
'avgPrice',
'yhat',
'yhat_low',
'yhat_high',
'prediction'
]
predict_data = forecast_utils.build_forecast(
test_data,
int(config.get('TEST', 'forecast_range'))
)
headers = list(predict_data.columns.values)
assert set(expected_rows) == set(headers)
assert predict_data['date'].max() == \
max_date + timedelta(days=int(config.get('TEST', 'forecast_range')))
expected_prediction = pd.read_csv(TEST_PREDICT_PATH)
expected_prediction['date'] = pd.to_datetime(expected_prediction['date'])
float_limit = float(config.get('TEST', 'float_limit'))
for key in expected_rows:
print(key)
print(predict_data[key].dtype)
if predict_data[key].dtype == np.float64:
pass
#TODO: ubuntu systems have >0.1 spread on values
#unique_vals = predict_data[key] - expected_prediction[key]
#for val in unique_vals.values:
# assert (abs(val) < float_limit) or (np.isnan(val)) #fucking floats
else:
assert predict_data[key].equals(expected_prediction[key])
@pytest.mark.prophet
def test_forecast_truncate(config=CONFIG):
"""make sure truncate functionality works"""
test_data = pd.read_csv(TEST_DATA_PATH)
test_data['date'] = pd.to_datetime(test_data['date'])
max_date = test_data['date'].max()
truncate_range = int(config.get('TEST', 'truncate_range'))
predict_data = forecast_utils.build_forecast(
test_data,
int(config.get('TEST', 'forecast_range')),
truncate_range
)
expected_min_date = max_date - timedelta(days=truncate_range-1)
actual_min_date = predict_data['date'].min()
assert expected_min_date == actual_min_date
@pytest.mark.incremental
class TestPredictCache:
"""test cache tools in Prediction toolset"""
cache_path = path.join(HERE, 'cache')
cache_file = 'prophet.json'
cache_filepath = path.join(cache_path, cache_file)
type_id = int(CONFIG.get('TEST', 'type_id'))
region_id = int(CONFIG.get('TEST', 'region_id'))
def test_clear_existing_cache(self):
"""clean up cache path before testing"""
helpers.clear_caches()
def test_empty_cache(self):
"""test un-cached behavior"""
data = forecast_utils.check_prediction_cache(
self.region_id,
self.type_id,
cache_path=self.cache_path
)
assert data is None
assert path.isfile(self.cache_filepath)
tdb = TinyDB(self.cache_filepath)
assert tdb.all() == []
tdb.close()
def test_write_first_cache(self):
"""test write behavior on first pass (cache-buster mode)"""
self.test_clear_existing_cache() #blowup existing cache again
dummy_data = forecast_utils.parse_emd_data(DEMO_DATA['result'])
forecast_utils.write_prediction_cache(
self.region_id,
self.type_id,
dummy_data,
cache_path=self.cache_path
)
assert path.isfile(self.cache_filepath)
tdb = TinyDB(self.cache_filepath)
data = tdb.all()[0]
keys_list = [
'cache_date',
'region_id',
'type_id',
'lastWrite',
'prediction'
]
assert set(keys_list) == set(data.keys())
dummy_str_data = dummy_data.to_json(
date_format='iso',
orient='records'
)
cached_data = pd.read_json(data['prediction'])
assert data['prediction'] == dummy_str_data
tdb.close()
def test_check_requested_range():
"""validate `check_requested_range()` func"""
assert forecast_utils.check_requested_range(10) == 10
assert forecast_utils.check_requested_range(1000, max_range=180) == 180
with pytest.raises(exceptions.InvalidRangeRequested):
data = forecast_utils.check_requested_range(1000, max_range=180, raise_for_status=True)
try:
data = forecast_utils.check_requested_range(1000, max_range=180, raise_for_status=True)
except Exception as err_msg:
assert err_msg.status == 413
assert isinstance(err_msg.message, str)
|
{"hexsha": "e3cb3ebec22097cceb6548c63c20131d8ff6f362", "size": 8906, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_forecast_utils.py", "max_stars_repo_name": "EVEprosper/ProsperAPI", "max_stars_repo_head_hexsha": "2d25b9210d32ca777204b1dddb56848d7075dd85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-03-27T13:10:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-30T09:33:11.000Z", "max_issues_repo_path": "tests/test_forecast_utils.py", "max_issues_repo_name": "EVEprosper/ProsperAPI", "max_issues_repo_head_hexsha": "2d25b9210d32ca777204b1dddb56848d7075dd85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2016-11-14T00:58:54.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-11T16:54:25.000Z", "max_forks_repo_path": "tests/test_forecast_utils.py", "max_forks_repo_name": "EVEprosper/ProsperAPI", "max_forks_repo_head_hexsha": "2d25b9210d32ca777204b1dddb56848d7075dd85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-04-19T01:12:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-07T02:23:45.000Z", "avg_line_length": 32.035971223, "max_line_length": 95, "alphanum_fraction": 0.618122614, "include": true, "reason": "import numpy", "num_tokens": 2045}
|
\documentclass[a4paper,11pt]{jctvcdoc}
\usepackage{geometry}[2010/02/12]
\usepackage{hyperref}
\hypersetup{colorlinks=true}
\usepackage{color,soul}
\usepackage[position=bottom]{subfig}
\captionsetup[subfloat]{position=top}
\usepackage{multirow}
\usepackage{dcolumn}
\newcolumntype{.}{D{.}{.}{-1}}
\usepackage{colortbl}
\usepackage{makecell}
\usepackage{longtable}
\usepackage{array}
\usepackage{algorithm2e}
\usepackage[strings]{underscore}
\usepackage{csquotes}
\MakeOuterQuote{"}
\EnableQuotes
\newcommand\None{}
\newcommand\NotSet{}
\makeatletter
\newcommand{\Option}[1]{\ifx\optOption\@empty\gdef\optOption{#1}\else\g@addto@macro\optOption{ \\ #1}\fi}
\newcommand{\ShortOption}[1]{\ifx\optShortOption\@empty\gdef\optShortOption{#1}\else\g@addto@macro\optShortOption{ \\ #1}\fi}
\newcommand{\Default}[1]{\ifx\optDefault\@empty\gdef\optDefault{#1}\else\g@addto@macro\optDefault{ \\ #1}\fi}
\newcommand{\clearOptions}{\gdef\optOption{}\gdef\optShortOption{}\gdef\optDefault{}}
\makeatother
\newenvironment{OptionTable}[1]{%
\footnotesize
\def\arraystretch{1.8}
\clearOptions
\begin{longtable}{l<{\makecell[tl]{\optOption}}%
>{\texttt\bgroup}l<{\makecell[tl]{\optShortOption}\egroup}%
c<{\makecell[tc]{\optDefault}}%
>{\def\arraystretch{1.0}}p{0.5\textwidth}<{\clearOptions}}
\caption{#1} \\
\hspace*{12em}&&\hspace*{8em}&\kill
\hline
\thead{Option} &
\egroup\thead{Shorthand}\bgroup &
\thead{Default} &
\thead{Description} \\
\hline
\endfirsthead
\caption[]{#1 (Continued)} \\
\hspace*{12em}&&\hspace*{8em}&\kill
\hline
\thead{Option} &
\egroup\thead{Shorthand}\bgroup &
\thead{Default} &
\thead{Description} \\
\hline
\endhead
\multicolumn{4}{r}{Continued...}\\
\hline
\endfoot
\hline
\endlastfoot
}{%
\hline
\end{longtable}
}
\newenvironment{MacroTable}[1]{%
\footnotesize
\def\arraystretch{1.3}
\clearOptions
\begin{longtable}{lcp{0.5\textwidth}}
\caption{#1} \\
%\hspace*{12em}&&\hspace*{8em}&\kill
\hline
\thead{Option} &
\thead{Default} &
\thead{Description} \\
\hline
\endfirsthead
\caption[]{#1 (Continued)} \\
\hline
\thead{Option} &
\thead{Default} &
\thead{Description} \\
\hline
\endhead
\multicolumn{3}{r}{Continued...}\\
\hline
\endfoot
\hline
\endlastfoot
}{%
\end{longtable}
}
\title{HM Software Manual}
\author{%
Frank Bossen
\email{bossen@docomoinnovations.com}
\and
David Flynn
\email{davidf@rd.bbc.co.uk}
\and
Karsten S\"uhring
\email{Karsten.Suehring@hhi.fraunhofer.de}
}
\jctvcmeeting{}
\jctvcdocnum{Software Manual}
\jctvcdocstatus{Software AHG working document}
\jctvcdocpurpose{Information}
\jctvcdocsource{AHG chairs}
\begin{document}
\maketitle
\begin{abstract}
This document is a user manual describing usage of reference software
for the HEVC project. It applies to version 11.0
of the software.
\end{abstract}
\tableofcontents
\listoftables
\section{General Information}
Reference software is being made available to provide a reference
implementation of the draft HEVC standard being developed by the Joint
Collaborative Team on Video Coding (JCT-VC) regrouping experts from
ITU-T SG 16 and ISO/IEC SC29 WG11. One of the main goals of the
reference software is to provide a basis upon which to conduct
experiments in order to determine which coding tools provide desired
coding performance. It is not meant to be a particularly efficient
implementation of anything, and one may notice its apparent
unsuitability for a particular use. It should not be construed to be a
reflection of how complex a production-quality implementation of a
future HEVC standard would be.
This document aims to provide guidance on the usage of the reference
software. It is widely suspected to be incomplete and suggestions for
improvements are welcome. Such suggestions and general inquiries may be
sent to the general JCT-VC email reflector on
\url{jct-vc@lists.rwth-aachen.de} (registration required).
\subsection*{Bug reporting}
Bugs should be reported on the issue tracker set up at
\url{http://hevc.kw.bbc.co.uk/trac/}
\section{Installation and compilation}
The software may be retrieved from one of the following SVN servers
(mirrored):
\begin{itemize}
\item \url{https://hevc.hhi.fraunhofer.de/svn/svn_HEVCSoftware/}
\item \url{svn://hevc.kw.bbc.co.uk/svn/jctvc-hm/}
\end{itemize}
Table~\ref{tab:project-files} enumerates various project files that are
provided for development environments.
\begin{table}[ht]
\footnotesize
\caption{Available project files}
\label{tab:project-files}
\centering
\begin{tabular}{ll}
\hline
\thead{Environment} &
\thead{Location of project file} \\
% Environment & Location of project file \\
\hline
MS Visual Studio 8 & build/HM_vc8.sln \\
MS Visual Studio 9 & build/HM_vc9.sln \\
Xcode & HM.xcodeproj \\
Linux & build/linux/makefile \\
\hline
\end{tabular}
\end{table}
%%%%
%%%%
%%%%
\section{Using the encoder}
\begin{verbatim}
TAppEncoder [-h] [-c config.cfg] [--parameter=value]
\end{verbatim}
\begin{table}[ht]
\footnotesize
\centering
\begin{tabular}{lp{0.5\textwidth}}
\hline
\thead{Option} &
\thead{Description} \\
\hline
\texttt{-h} & Prints parameter usage. \\
\texttt{-c} & Defines configuration file to use. Multiple configuration files
may be used with repeated --c options. \\
\texttt{--}\emph{parameter}\texttt{=}\emph{value}
& Assigns value to a given parameter as further described below.
Some parameters are also supported by shorthand
"--\em{opt}~\emph{value}".\\
\hline
\end{tabular}
\end{table}
Sample configuration files are provided in the cfg/ folder.
\subsection{GOP structure table}
\label{sec:gop-structure}
Defines the cyclic GOP structure that will be used repeatedly
throughout the sequence. The table should contain GOPSize lines,
named Frame1, Frame2, etc. The frames are listed in decoding
order, so Frame1 is the first frame in decoding order, Frame2 is
the second and so on. Among other things, the table specifies all
reference pictures kept by the decoder for each frame. This
includes pictures that are used for reference for the current
picture as well as pictures that will be used for reference in
the future. The encoder will not automatically calculate what
pictures that has to be kept for future references, they have to
be specified. Note that some specified reference frames for
pictures encoded in the very first GOP after an IDR frame might
not be available. This is handled automatically by the encoder,
so the reference pictures can be given in the GOP structure table
as if there were infinitely many identical GOPs before the
current one. Each line in the table contains the parameters used
for the corresponding frame, separated by whitespace:
\begin{itemize}
\item[]\textbf{Type}: Slice type, can be either I, P or B.
\item[]\textbf{POC}: Display order of the frame within a GOP, ranging
from 1 to GOPSize.
\item[]\textbf{QPOffset}: QP offset is added to the QP parameter to set
the final QP value to use for this frame.
\item[]\textbf{QPFactor}: Weight used during rate distortion
optimization. Higher values mean lower quality and less bits. Typical
range is between
0.3 and 1.
\item[]\textbf{tcOffsetDiv2}: In-loop deblocking filter parameter tcOffsetDiv2
is added to the base parameter LoopFilterTcOffset_div2 to set the final tc_offset_div2
parameter for this picture signalled in the slice segment header. The final
value of tc_offset_div2 shall be an integer number in the range $-6..6$.
\item[]\textbf{betaOffsetDiv2}: In-loop deblocking filter parameter betaOffsetDiv2
is added to the base parameter LoopFilterBetaOffset_div2 to set the final beta_offset_div2
parameter for this picture signalled in the slice segment header. The final
value of beta_offset_div2 shall be an integer number in the range $-6..6$.
\item[]\textbf{temporal_id}: Temporal layer of the frame. A frame cannot
predict from a frame with a higher temporal id. If a frame with higher
temporal IDs is listed among a frame's reference pictures, it is
not used, but is kept for possible use in future frames.
\item[]\textbf{num_ref_pics_active}: Size of reference picture lists L0
and L1, indicating how many reference pictures in each direction that
are used during coding.
\item[]\textbf{num_ref_pics}: The number of reference pictures kept for
this frame. This includes pictures that are used for reference for the
current picture as well as pictures that will be used for reference in
the future.
\item[]\textbf{reference_pictures}: A space-separated list of
num_ref_pics integers, specifying the POC of the reference pictures
kept, relative the POC of the current frame. The picture list shall be
ordered, first with negative numbers from largest to smallest, followed
by positive numbers from smallest to largest (e.g. \verb|-1 -3 -5 1 3|).
Note that any pictures not supplied in this list will be discarded and
therefore not available as reference pictures later.
\item[]\textbf{predict}: Defines the value of the syntax element
inter_ref_pic_set_prediction_flag. A value of 0 indicates that the
reference picture set is encoded without inter RPS prediction and the
subsequent parameters deltaRIdx$-1$, deltaRPS, num_ref_idcs and
Reference_idcs are ignored and do not need to be present. A value of 1
indicates that the reference picture set is encoded with inter
prediction RPS using the subsequent parameters deltaRIdx$-1$, deltaRPS,
num_ref_idcs and Reference_idcs in the line. A value of 2 indicates that
the reference picture set is encoded with inter RPS but only the
deltaRIdx$-1$ parameters is needed. The deltaRPS, num_ref_idcs and
Reference_idcs values are automatically derived by the encoder based on
the POC and refPic values of the current line and the RPS pointed to by
the deltaRIdx$-1$ parameters.
\item[]\textbf{deltaRIdx$-1$}: The difference between the index of the
curent RPS and the predictor RPS minus 1.
\item[]\textbf{deltaRPS}: The difference between the POC of the
predictor RPS and POC the current RPS.
\item[]\textbf{num_ref_idcs}: The number of ref_idcs to encode for the
current RPS. The value is equal to the value of num_ref_pics of the
predictor RPS plus 1.
\item[]\textbf{reference_idcs}: A space-separated list of num_ref_idcs
integers, specifying the ref idcs of the inter RPS prediction. The value
of ref_idcs may be 0, 1 or 2 indicating that the reference picture is a
reference picture used by the current picture, a reference picture used
for future picture or not a reference picture anymore, respectively. The
first num_ref_pics of ref_idcs correspond to the Reference pictures in
the predictor RPS. The last ref_idcs corresponds to the predictor
picture.
\end{itemize}
For example, consider the coding structure of Figure~\ref{fig:gop-example}.
This coding structure is of size 4. The pictures are listed in decoding
order. Frame1 shall therefore describe picture with $\textrm{POC}=4$. It
references picture 0, and therefore has $-4$ as a reference picture.
Similarly, Frame2 has a POC of 2, and since it references pictures 0 and
4, its reference pictures are listed as \verb|-2 2|. Frame3 is a special
case: even though it only references pictures with POC 0 and 2, it also
needs to include the picture with POC 4, which must be kept in order to
be used as a reference picture in the future. The reference picture list
for Frame3 therefore becomes \verb|-1 1 3|. Frame4 has a POC of 3 and
its list of reference pictures is \verb|-1 1|.
\begin{figure}[h]
\caption{A GOP structure}
\label{fig:gop-example}
\centering
\includegraphics[width=0.7\textwidth]{gop-structure-example}
\end{figure}
Inter RPS prediction may be used for Frame2, Frame3 and Frame4, hence
the predict parameter is set to 1 for these frames. Frame2 uses Frame1
as the predictor hence the deltaRIdx$-1$ is 0. Similarly for Frame3 and
Frame4 which use Frame2 and Frame3 as predictors, respectively. The
deltaRPS is equal to the POC of the predictor minus the POC of the
current picture, therefore the deltaRPS for Frame2 is $4 -2 = 2$, for
Frame3 is $2 - 1 = 1$ and for Frame4 is $1 - 3 = -2$.
In Frame2, reference pictures with POC 0 and 2 are used, so the
reference idcs for Frame2 are \verb|1 1| indicating that the reference
picture, $-4$, in Frame1 is still a reference picture in Frame2 and
Frame1 is also a reference picture in Frame2. The reference idcs for
Frame3 are \verb|1 1 1|. The first and second “1”s indicating that
the reference pictures "$-2$ $2$" in Frame2 are still reference pictures in
Frame3 and the last “1” indicating that Frame2 is also a reference
picture in Frame3. In Frame 4, the reference idcs are \verb|0 1 1 0|.
The first “0” indicates that the reference pictures “-1” in Frame 3 is
no longer a reference picture in Frame4. The next two “1”s indicate that
the reference pictures “$1$ $3$” are now reference pictures of Frame4.
The final “0” indicates that Frame3 is not a reference picture.
In order to specify this to the encoder, the parameters in
Table~\ref{tab:gop-example} could be used.
\begin{table}[ht]
\footnotesize
\caption{GOP structure example}
\label{tab:gop-example}
\centering
\begin{tabular}{lrrrr}
\hline
\thead{} &
\thead{Frame1} &
\thead{Frame2} &
\thead{Frame3} &
\thead{Frame4} \\
\hline
Type & P & B & B & B \\
POC & 4 & 2 & 1 & 3 \\
QPoffset & 1 & 2 & 3 & 3 \\
QPfactor & 0.5 & 0.5 & 0.5 & 0.5 \\
tcOffsetDiv2 & 0 & 1 & 2 & 2 \\
betaOffsetDiv2 & 0 & 0 & 0 & 0 \\
temporal_id & 0 & 1 & 2 & 2 \\
num_ref_pics_active & 1 & 1 & 1 & 1 \\
num_ref_pics & 1 & 2 & 3 & 2 \\
reference_pictures & $-$4 & $-$2 2 & $-$1 1 3 & $-$1 1 \\
predict & 0 & 1 & 1 & 1 \\
deltaRIdx$-$1 & & 0 & 0 & 0 \\
deltaRPS & & 2 & 1 & $-$2 \\
num_ref_idcs & & 2 & 3 & 4 \\
reference_idcs & & 1 1 & 1 1 1 & 0 1 1 0 \\
\hline
\end{tabular}
\end{table}
Here, the frames used for prediction have been given higher
quality by assigning a lower QP offset. Also, the non-reference
frames have been marked as belonging to a higher temporal layer,
to make it possible to decode only every other frame. Note: each
line should contain information for one frame, so this
configuration would be specified as:
\begin{verbatim}
Frame1: P 4 1 0.5 0 0 0 1 1 -4 0
Frame2: B 2 2 0.5 1 0 1 1 2 -2 2 1 0 2 2 1 1
Frame3: B 1 3 0.5 2 0 2 1 3 -1 1 3 1 0 1 3 1 1 1
Frame4: B 3 3 0.5 2 0 2 1 2 -1 1 1 0 -2 4 0 1 1 0
\end{verbatim}
The values of deltaRIdx$-1$, deltaRPS, num_ref_idcs and reference
idcs of Frame$K$ can be derived from the POC value of Frame$_K$ and
the POC, num_ref_pics and reference_pictures values of Frame$_M$, where
$K$ is the index of the RPS to be inter coded and the $M$ is the
index of the reference RPS, as follows.
\setlength{\algomargin}{2em}
\begin{algorithm}[h]
\SetKwData{deltaRIdx}{deltaRIdx}
\SetKwData{deltaRPS}{deltaRPS}
\SetKwData{numrefidcs}{num_ref_idcs}
\SetKwData{numrefpics}{num_ref_pics}
\SetKwData{referencepictures}{reference_pictures}
\SetKwData{referenceidcs}{reference_idcs}
\SetKwData{POC}{POC}
$\deltaRIdx_K - 1 \leftarrow K - M - 1$ \;
$\deltaRPS_K \leftarrow \POC_M - \POC_K$ \;
$\numrefidcs_K \leftarrow \numrefpics_M + 1$ \;
\For{$j \leftarrow 0$ \KwTo $\numrefpics_M$}{
\For{$i \leftarrow 0$ \KwTo $\numrefidcs_K$}{
\eIf{$\referencepictures_{M,j} + \deltaRPS_K == \referencepictures_{K,i}$}{
\lIf{$\referencepictures_{K,i}$ is used by the current frame}{
$\referenceidcs_{K,j} = 1$} \;
\lElse{$\referenceidcs_{K,j} = 2$} \;
}{
$\referenceidcs_K[j] = 0$ \;
}
}
}
\tcc{$\referencepictures_{M,\numrefpics_M}$ does not exist and is assumed to be 0}
\end{algorithm}
Note: The above (automatic) generation of the inter RPS parameter
values has been integrated into the encoder, and is activated by
the value of predict $= 2$ followed by the value of deltaRIdx$-1$,
only, as described above.
%%%%
%%%%
%%%%
\newgeometry{tmargin=1.6cm,lmargin=1cm,rmargin=1cm,bmargin=1in,nohead}
\subsection{Encoder parameters}
%%
%% File, I/O and source parameters
%%
\begin{OptionTable}{File, I/O and source parameters}
\Option{InputFile} &
\ShortOption{-i} &
\Default{\NotSet} &
Specifies the input video file.
Video data must be in a raw 4:2:0 planar format (Y$'$CbCr).
Note: When the bit depth of samples is larger than 8, each sample is encoded in
2 bytes (little endian, LSB-justified).
\\
\Option{BitstreamFile} &
\ShortOption{-b} &
\Default{\NotSet} &
Specifies the output coded bit stream file.
\\
\Option{ReconFile} &
\ShortOption{-o} &
\Default{\NotSet} &
Specifies the output locally reconstructed video file.
\\
\Option{SourceWidth}%
\Option{SourceHeight} &
\ShortOption{-wdt}%
\ShortOption{-hgt} &
\Default{0}%
\Default{0} &
Specifies the width and height of the input video in luma samples.
\\
\Option{InputBitDepth} &
\ShortOption{\None} &
\Default{8} &
Specifies the bit depth of the input video.
\\
\Option{InternalBitDepth} &
\ShortOption{\None} &
\Default{0 \\ (InputBitDepth)} &
Specifies the bit depth used for coding.
If the input video is a different bit depth to InternalBitDepth, it is
automatically converted by:
\begin{displaymath}
\left\lfloor
\frac{\mathrm{Pel} * 2^{\mathrm{InternalBitDepth}}}{
2^{\mathrm{InputBitDepth}}}
\right\rfloor
\end{displaymath}
Note: The effect of this option is as if the input video is externally
converted to the InternalBitDepth and then coded with this value as
InputBitDepth. The codec has no notion of two different bit depths.
\\
\Option{OutputBitDepth} &
\ShortOption{\None} &
\Default{0 \\ (InternalBitDepth)} &
Specifies the bit depth of the output locally reconstructed video file.
Note: This option has no effect on the decoding process.
\\
\Option{InputBitDepthC}%
\Option{InternalBitDepthC}%
\Option{OutputBitDepthC} &
\ShortOption{\None} &
\Default{(InputBitDepth)}%
\Default{(InternalBitDepth)}%
\Default{(InternalBitDepthC)} &
Specifies the various bit-depths for chroma components. These only need
to be specified if non-equal luma and chroma bit-depth processing is
required.
\\
\Option{ConformanceMode} &
\ShortOption{\None} &
\Default{0} &
Specifies the conformance mode (cropping/padding parameters) to be applied to the input
video. The following modes are available:
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & No cropping / padding \\
1 & Automatic padding to the next minimum CU size \\
2 & Padding according to parameters HorizontalPadding and VerticalPadding \\
3 & Cropping according to parameters ConfLeft, ConfRight, ConfTop and ConfBottom \\
\end{tabular}
\\
\Option{HorizontalPadding}%
\Option{VerticalPadding} &
\ShortOption{-pdx}%
\ShortOption{-pdy} &
\Default{0} &
Specifies the horizontal and vertical padding to be applied to the input
video in luma samples. Must be a multiple of the chroma resolution
(e.g. a multiple of two for 4:2:0).
\\
\Option{ConfLeft}%
\Option{ConfRight}%
\Option{ConfTop}%
\Option{ConfBottom} &
\ShortOption{\None} &
\Default{0} &
Specifies the horizontal and vertical cropping to be applied to the
input video in luma samples. Must be a multiple of the chroma
resolution (e.g. a multiple of two for 4:2:0).
\\
\Option{FrameRate} &
\ShortOption{-fr} &
\Default{0} &
Specifies the frame rate of the input video.
Note: This option only affects the reported bit rates.
\\
\Option{FrameSkip} &
\ShortOption{-fs} &
\Default{0} &
Specifies a number of frames to skip at beginning of input video file.
\\
\Option{FramesToBeEncoded} &
\ShortOption{-f} &
\Default{0 \\ (all)} &
Specifies the number of frames to be encoded.
\\
\end{OptionTable}
%%
%% profile, level and conformance options
%%
\begin{OptionTable}{Profile and level parameters}
\Option{Profile} &
\ShortOption{\None} &
\Default{none} &
Specifies the profile to which the encoded bitstream complies.
Valid values are: none, main, main10, main-still-picture.
Compatibility flags are automatically determined according to the profile.
If --Profile=main, then main10 will always be signalled as compatible.
If --Profile=main10, then main will be signalled as compatible if the bit-depth is 8-bit.
NB: There is currently no validation that the encoder configuration complies with the profile and level constraints.
\\
\Option{Level} &
\ShortOption{\None} &
\Default{none} &
Specifies the level to which the encoded bitstream complies.
Valid values are: none, 1, 2, 2.1, 3, 3.1, 4, 4.1, 5, 5.1, 5.2, 6, 6.1, 6.2.
NB: There is currently no validation that the encoder configuration complies with the profile and level constraints.
\\
\Option{Tier} &
\ShortOption{\None} &
\Default{main} &
Specifies the level tier to which the encoded bitsream complies.
Valid values are: main, high.
NB: There is currently no validation that the encoder configuration complies with the profile and level constraints.
\\
\Option{ProgressiveSource} &
\ShortOption{\None} &
\Default{false} &
Specifies the value of general_progressive_source_flag
\\
\Option{InterlacedSource} &
\ShortOption{\None} &
\Default{false} &
Specifies the value of general_interlaced_source_flag
\\
\Option{NonPackedSource} &
\ShortOption{\None} &
\Default{false} &
Specifies the value of general_non_packed_constraint_flag
\\
\Option{FrameOnly} &
\ShortOption{\None} &
\Default{false} &
Specifies the value of general_frame_only_constraint_flag
\\
\end{OptionTable}
%%
%% Unit definition parameters
%%
\begin{OptionTable}{Unit definition parameters}
\Option{MaxCUWidth} &
\ShortOption{\None} &
\Default{64} &
Defines the maximum CU width.
\\
\Option{MaxCUHeight} &
\ShortOption{\None} &
\Default{64} &
Defines the maximum CU height.
\\
\Option{MaxCUSize} &
\ShortOption{\None} &
\Default{64} &
Defines the maximum CU size.
\\
\Option{MaxPartitionDepth} &
\ShortOption{-h} &
\Default{4} &
Defines the depth of the CU tree.
\\
\Option{QuadtreeTULog2MaxSize} &
\ShortOption{\None} &
\Default{6 \\ ($= \mathrm{log}_2(64)$)} &
Defines the Maximum TU size in logarithm base 2.
\\
\Option{QuadtreeTULog2MinSize} &
\ShortOption{\None} &
\Default{2 \\ ($= \mathrm{log}_2(4)$)} &
Defines the Minimum TU size in logarithm base 2.
\\
\Option{QuadtreeTUMaxDepthIntra} &
\ShortOption{\None} &
\Default{1} &
Defines the depth of the TU tree for intra CUs.
\\
\Option{QuadtreeTUMaxDepthInter} &
\ShortOption{\None} &
\Default{2} &
Defines the depth of the TU tree for inter CUs.
\\
\end{OptionTable}
%%
%% Coding structure parameters
%%
\begin{OptionTable}{Coding structure parameters}
\Option{IntraPeriod} &
\ShortOption{-ip} &
\Default{$-1$} &
Specifies the intra frame period.
A value of $-1$ implies an infinite period.
\\
\Option{DecodingRefreshType} &
\ShortOption{-dr} &
\Default{0} &
Specifies the type of decoding refresh to apply at the intra frame period
picture.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Applies an I picture (not a clean random access point). \\
1 & Applies a non-IDR clean random access point (open GOP). \\
2 & Applies an IDR random access point (closed GOP). \\
\end{tabular}
\\
\Option{GOPSize} &
\ShortOption{-g} &
\Default{1} &
Specifies the size of the cyclic GOP structure.
\\
\Option{Frame\emph{N}} &
\ShortOption{\None} &
\Default{\NotSet} &
Multiple options that define the cyclic GOP structure that will be used
repeatedly throughout the sequence. The table should contain GOPSize
elements.
\par
See section~\ref{sec:gop-structure} for further details.
\\
\Option{ListCombination} &
\ShortOption{-lc} &
\Default{true} &
Enables or disables the use of the combined reference list for uni-prediction
in B-slices.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Reference list~0 and reference list~1 are identical and reference
list~0 is used as the combined reference list. \\
1 & The combined reference list is derived from reference list~0 and
reference list~1. \\
\end{tabular}
NB: LComb can only be 0 in low delay coding (more precisely, when list 0
and list 1 are the same)
\\
\end{OptionTable}
%%
%% Motion estimation parameters
%%
\begin{OptionTable}{Motion estimation parameters}
\Option{FastSearch} &
\ShortOption{\None} &
\Default{true} &
Enables or disables the use of a fast motion search.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Full search method \\
1 & Fast search method \\
\end{tabular}
\\
\Option{SearchRange} &
\ShortOption{-sr} &
\Default{96} &
Specifies the search range used for motion estimation.
Note: the search range is defined around a predictor. Motion vectors
derived by the motion estimation may thus have values larger than the
search range.
\\
\Option{BipredSearchRange} &
\ShortOption{\None} &
\Default{4} &
Specifies the search range used for bi-prediction refinement in motion
estimation.
\\
\Option{HadamardME} &
\ShortOption{\None} &
\Default{true} &
Enables or disables the use of the Hadamard transform in fractional-pel motion
estimation.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & SAD for cost estimation \\
1 & Hadamard for cost estimation \\
\end{tabular}
\\
\Option{ASR} &
\ShortOption{\None} &
\Default{false} &
Enables or disables the use of adaptive search ranges, where the motion
search range is dynamically adjusted according to the POC difference
between the current and the reference pictures.
\begin{displaymath}
\resizebox{\hsize}{!}{$
\mathrm{SearchRange}’ = \mathrm{Round}\left(
\mathrm{SearchRange}
* \mathrm{ADAPT\_SR\_SCALE}
* \frac{\mathrm{abs}(
\mathrm{POCcur} - \mathrm{POCref} )}{
\mathrm{RateGOPSize}}\right)
$}
\end{displaymath}
\\
\end{OptionTable}
%%
%% Mode decision parameters
%%
\begin{OptionTable}{Mode decision parameters}
\Option{LambdaModifier$N$} &
\ShortOption{-LM$N$} &
\Default{1.0} &
Specifies a value that is multiplied with the Lagrange multiplier
$\lambda$, for use in the rate-distortion optimised cost calculation
when encoding temporal layer~$N$.
\par
$N$ may be in the range 0--7.
\\
\Option{FEN} &
\ShortOption{\None} &
\Default{false} &
Enables or disables the use of fast encoder mode. When enabled,
the following occurs:
\begin{itemize}
\item In the SAD computation for blocks having size larger than 8, only
the lines of even rows in the block are considered.
\item The number of iterations used in the bi-directional motion vector
refinement in the motion estimation process is reduced from 4 to 1.
\end{itemize}
\\
\Option{FDM} &
\ShortOption{\None} &
\Default{true} &
Enables or disables the use of fast encoder decisions for 2Nx2N merge
mode. When enabled, the RD cost for the merge mode of the current
candidate is not evaluated if the merge skip mode was the best merge
mode for one of the previous candidates.
\\
\Option{RDpenalty} &
\ShortOption{\None} &
\Default{0} &
RD-penalty for 32x32 TU for intra in non-intra slices.
Enabling this parameter can reduce the visibility of CU boundaries in the coded picture.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & No RD-penalty \\
1 & RD-penalty \\
2 & Maximum RD-penalty (no 32x32 TU)\\
\end{tabular}
\\
\end{OptionTable}
%%
%% Quantization parameters
%%
\begin{OptionTable}{Quantization parameters}
\Option{QP} &
\ShortOption{-q} &
\Default{30.0} &
Specifies the base value of the quantization parameter.
\\
\Option{CbQpOffset}%
\Option{CrQpOffset} &
\ShortOption{-cbqpofs}%
\ShortOption{-crqpofs} &
\Default{0}%
\Default{0} &
Global offset to apply to the luma QP to derive the QP of Cb and Cr
respectively. These options correspond to the values of cb_qp_offset
and cr_qp_offset, that are transmitted in the PPS. Valid values are in
the range $[-12, 12]$.
\\
\Option{MaxCuDQPDepth} &
\ShortOption{\None} &
\Default{0} &
Defines maximum depth of a minimum CuDQP for sub-LCU-level delta QP.
MaxCuDQPDepth shall be greater than or equal to SliceGranularity.
\\
\Option{RDOQ} &
\ShortOption{\None} &
\Default{true} &
Enables or disables rate-distortion-optimized quantization.
\\
\Option{RDOQTS} &
\ShortOption{\None} &
\Default{true} &
Enables or disables rate-distortion-optimized quantization for transform-skipped TUs.
\\
\Option{DeltaQpRD} &
\ShortOption{-dqr} &
\Default{0} &
Specifies the maximum QP offset at slice level for multi-pass slice
encoding. When encoding, each slice is tested multiple times by using
slice QP values in the range $[-\mathrm{DeltaQpRD}, \mathrm{DeptaQpRD}]$,
and the best QP value is chosen as the slice QP.
\\
\Option{MaxDeltaQP} &
\ShortOption{-d} &
\Default{0} &
Specifies the maximum QP offset at the largest coding unit level for
the block-level adaptive QP assignment scheme. In the encoder, each
largest coding unit is tested multiple times by using the QP values in
the range $[-\mathrm{MaxDeltaQP}, \mathrm{MaxDeltaQP}]$, and the best QP
value is chosen as the QP value of the largest coding unit.
\\
\Option{dQPFile} &
\ShortOption{-m} &
\Default{\NotSet} &
Specifies a file containing a list of QP deltas. The $n$-th line
(where $n$ is 0 for the first line) of this file corresponds to the QP
value delta for the picture with POC value $n$.
\\
\Option{AdaptiveQpSelection} &
\ShortOption{-aqps} &
\Default{false} &
Specifies whether QP values for non-I frames will be calculated on the
fly based on statistics of previously coded frames.
\\
\Option{RecalculateQPAccordingToLambda} &
\ShortOption{\None} &
\Default{false} &
Recalculate QP values according to lambda values. Do not suggest to be enabled in all intra case.
\\
\end{OptionTable}
%%
%% Entropy coding parameters
%%
\begin{OptionTable}{Entropy coding parameters}
\Option{SBACRD} &
\ShortOption{\None} &
\Default{true} &
Enables or disables the use of bit counts from arithmetic coder in
rate-distortion decisions.
\\
\end{OptionTable}
%%
%% Slice coding parameters
%%
\begin{OptionTable}{Slice coding parameters}
%\Option{SliceGranularity} &
%\ShortOption{\None} &
%\Default{0} &
%Determines the depth in an LCU at which slices may begin and end.
%\par
%\begin{tabular}{cp{0.45\textwidth}}
% 0 & Slice addresses are LCU aligned \\
% $1 \leq n \leq 3$
% & Slice start addresses are aligned to CUs at depth $n$ \\
%\end{tabular}
%
%Note: The smallest permissible alignment is 16x16 CUs.
%Values of $n$ must satisfy this constraint, for example, with a 64x64
%LCU, $n$ must be less than or equal to 2.
%\\
\Option{SliceMode} &
\ShortOption{\None} &
\Default{0} &
Controls the slice partitioning method in conjunction with
SliceArgument.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Single slice \\
1 & Maximum number of CTUs per slice \\
2 & Maximum number of bytes per slice \\
3 & Maximum number of tiles per slice \\
\end{tabular}
\\
\Option{SliceArgument} &
\ShortOption{\None} &
\Default{\NotSet} &
Specifies the maximum number of CTUs, bytes or tiles in a slice depending on the
SliceMode setting.
\\
\Option{SliceSegmentMode} &
\ShortOption{\None} &
\Default{0} &
Enables (dependent) slice segment coding in conjunction with
SliceSegmentArgument.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Single slice \\
1 & Maximum number of CTUs per slice segment\\
2 & Maximum number of bytes per slice segment\\
3 & Maximum number of tiles per slice segment\\
\end{tabular}
\\
\Option{SliceSegmentArgument} &
\ShortOption{\None} &
\Default{\NotSet} &
Defines the maximum number of CTUs, bytes or tiles a slice segment
depending on the SliceSegmentMode setting.
\\
\Option{WaveFrontSynchro} &
\ShortOption{\None} &
\Default{false} &
Enables the use of specific CABAC probabilities synchronization at the
beginning of each line of CTBs in order to produce a bitstream that can
be encoded or decoded using one or more cores.
\\
\Option{NumTileColumnsMinus1}%
\Option{NumTileRowsMinus1} &
\ShortOption{\None} &
\Default{0} &
Specifies the tile based picture partitioning geometry as
$\mathrm{NumTileColumnsMinus1} + 1 \times \mathrm{NumTileRowsMinus1} + 1$
columns and rows.
\\
\Option{UniformSpacingIdc} &
\ShortOption{\None} &
\Default{0} &
Controls the mode used to determine per row and column tile sizes.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Each tile column width and tile row height is explicitly set
by ColumnWidthArray and RowHeightArray respectively \\
1 & Tile columns and tile rows are uniformly spaced. \\
\end{tabular}
\\
\Option{ColumnWidthArray}%
\Option{RowHeightArray} &
\ShortOption{\None} &
\Default{\NotSet} &
Specifies a space or comma separated list of widths and heights,
respectively, of each tile column or tile row. The first value in the
list corresponds to the leftmost tile column or topmost tile row.
\\
\end{OptionTable}
%%
%% Deblocking filter parameters
%%
\begin{OptionTable}{Deblocking filter parameters}
\Option{LoopFilterDisable} &
\ShortOption{\None} &
\Default{false} &
Enables or disables the in-loop deblocking filter.
\\
\Option{LFCrossSliceBoundaryFlag} &
\ShortOption{\None} &
\Default{true} &
Enables or disables the use of in-loop filtering across slice
boundaries.
\\
\Option{DeblockingFilterControlPresent}&
\ShortOption{\None}&
\Default{false}&
Enables or disables the presence of the deblocking filter control
parameters in the picture parameter set and in the slice segment header.
When disabled, the default deblocking filter parameters are used.
\\
\Option{LoopFilterOffsetInPPS}&
\ShortOption{\None}&
\Default{false}&
If enabled, the in-loop deblocking filter control parameters are sent in PPS.
Otherwise, the in-loop deblocking filter control parameters are sent in the slice segment header.
If deblocking filter parameters are sent in PPS, the same values of deblocking filter parameters
are used for all pictures in the sequence (i.e. deblocking parameter = base parameter value).
If deblocking filter parameters are sent in the slice segment header, varying deblocking filter
parameters can be specified by setting parameters tcOffsetDiv2 and betaOffsetDiv2 in the GOP structure table.
In this case, the final value of the deblocking filter parameter sent for a certain GOP picture is equal to
(base parameter + GOP parameter for this picture). Intra-pictures use the base parameters values.
\\
\Option{LoopFilterTcOffset_div2}&
\ShortOption{\None}&
\Default{0}&
Specifies the base value for the in-loop deblocking filter parameter tc_offset_div2. The final value of tc_offset_div2
shall be an integer number in the range $-6..6$.
\\
\Option{LoopFilterBetaOffset_div2}&
\ShortOption{\None}&
\Default{0}&
Specifies the base value for the in-loop deblocking filter parameter beta_offset_div2. The final value of beta_offset_div2
shall be an integer number in the range $-6..6$.
\\
\end{OptionTable}
%%
%% Coding tools parameters
%%
\begin{OptionTable}{Coding tools parameters}
%\Option{ALF} &
%\ShortOption{\None} &
%\Default{true} &
%Enables or disables the adaptive loop filter.
%\\
%\Option{ALFLowLatencyEncode} &
%\ShortOption{\None} &
%\Default{false} &
%Specifies the operating mode (low latency or high efficiency) of the
%adaptive loop filter.
%\\
\Option{SAO} &
\ShortOption{\None} &
\Default{true} &
Enables or disables the sample adaptive offset (SAO) filter.
\\
\Option{SAOLcuBoundary} &
\ShortOption{\None} &
\Default{false} &
Enables or disables SAO parameter estimation using non-deblocked pixels
for LCU bottom and right boundary areas.
\\
%\Option{LMChroma} &
%\ShortOption{\None} &
%\Default{true} &
%Enables or disables the intra chroma-from-luma prediction mode.
%\\
%\Option{NSQT} &
%\ShortOption{\None} &
%\Default{true} &
%Enables or disables the non-square quadtree transform.
%\\
\Option{ConstrainedIntraPred} &
\ShortOption{\None} &
\Default{false} &
Enables or disables constrained intra prediction. Constrained intra
prediction only permits samples from intra blocks in the same slice as the
current block to be used for intra prediction.
\\
\Option{TransquantBypassEnableFlag} &
\ShortOption{\None} &
\Default{false} &
Enables or disables the ability to bypass the transform,
quantization and filtering stages at CU level.
This option corresponds to the value of
transquant_bypass_enable_flag that is transmitted in the PPS.
See CUTransquantBypassFlagValue for further details.
\\
\Option{CUTransquantBypassFlagValue} &
\ShortOption{\None} &
\Default{0} &
Controls the per CU transformation, quantization and filtering
mode decision.
This option corresponds to the value of the per CU cu_transquant_bypass_flag.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Bypass is not performed on any CU \\
1 & Bypass is performed on all CUs \\
\end{tabular}
This option has no effect if TransquantBypassEnableFlag is disabled.
\\
\Option{PCMEnabledFlag} &
\ShortOption{\None} &
\Default{false} &
Enables or disables the use of PCM.
\\
\Option{PCMLog2MaxSize} &
\ShortOption{\None} &
\Default{5 \\ ($= \mathrm{log}_2(32)$)} &
Specifies log2 of the maximum PCM block size. When PCM is enabled, the
PCM mode is available for 2Nx2N intra PUs smaller than or equal to the
specified maximum PCM block size
\\
\Option{PCMLog2MinSize} &
\ShortOption{\None} &
\Default{3} &
Specifies log2 of the minimum PCM block size. When PCM is enabled, the
PCM mode is available for 2Nx2N intra PUs larger than or equal to the
specified minimum PCM block size.
\par
When larger than PCMLog2MaxSize, PCM mode is not used.
\\
\Option{PCMInputBitDepthFlag} &
\ShortOption{\None} &
\Default{1} &
If enabled specifies that PCM sample bit-depth is set equal to
InputBitDepth. Otherwise, it specifies that PCM sample bit-depth is set
equal to InternalBitDepth.
\\
\Option{PCMFilterDisableFlag} &
\ShortOption{\None} &
\Default{false} &
If enabled specifies that loop-filtering on reconstructed samples of PCM
blocks is skipped. Otherwise, it specifies that loop-filtering on
reconstructed samples of PCM blocks is not skipped.
% 0 = (loop-filtering is not skipped for PCM samples).
\\
\Option{WeightedPredP} &
\ShortOption{-wpP} &
\Default{false} &
Enables the use of weighted prediction in P slices.
\\
\Option{WeightedPredB} &
\ShortOption{-wpB} &
\Default{false} &
Enables the use of weighted prediction in B slices.
\\
\Option{SignHideFlag} &
\ShortOption{-SBH} &
\Default{true} &
If enabled specifies that for each 4x4 coefficient group for which the
number of coefficients between the first nonzero coefficient and the
last nonzero coefficient along the scanning line exceeds 4, the sign bit
of the first nonzero coefficient will not be directly transmitted in the
bitstream, but may be inferred from the parity of the sum of all nonzero
coefficients in the current coefficient group.
\\
\Option{StrongIntraSmoothing} &
\ShortOption{-sis} &
\Default{true} &
If enabled specifies that for 32x32 intra prediction block, the intra smoothing
when applied is either the 1:2:1 smoothing filter or a stronger bi-linear
interpolation filter. Key reference sample values are tested and if the criteria
is satisfied, the stronger intra smoothing filter is applied.
If disabled, the intra smoothing filter when applied is the 1:2:1 smoothing filter.
\\
\Option{TMVPMode} &
\ShortOption{\None} &
\Default{1} &
Controls the temporal motion vector prediction mode.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Disabled for all slices. \\
1 & Enabled for all slices. \\
2 & Disabled only for the first picture of each GOPSize. \\
\end{tabular}
\\
\Option{TransformSkip} &
\ShortOption{\None} &
\Default{false} &
Enables or disables transform-skipping mode decision for 4x4 TUs
\footnote{Enables transform_skip_enabled and per 4x4 TU tests}.
\\
\Option{TransformSkipFast} &
\ShortOption{\None} &
\Default{false} &
Enables or disables reduced testing of the transform-skipping mode
decision for chroma TUs. When enabled, no RDO search is performed for
chroma TUs, instead they are transform-skipped if the four corresponding
luma TUs are also skipped.
\par
This option has no effect if TransformSkip is disabled.
\\
\end{OptionTable}
%%
%% Rate control parameters
%%
\begin{OptionTable}{Rate control parameters}
\Option{RateControl} &
\ShortOption{\None} &
\Default{false} &
Rate control: enables rate control or not.
\\
\Option{TargetBitrate} &
\ShortOption{\None} &
\Default{0} &
Rate control: target bitrate, in bps.
\\
\Option{KeepHierarchicalBit} &
\ShortOption{\None} &
\Default{0} &
Rate control: 0: equal bit allocation among pictures;
1: fix ratio hierarchical bit allocation; 2: adaptive hierarchical ratio bit allocation.
It is suggested to enable hierarchical bit allocation for hierarchical-B coding structure.
\\
\Option{LCULevelRateControl} &
\ShortOption{\None} &
\Default{true} &
Rate control: true: LCU level RC; false: picture level RC.
\\
\Option{RCLCUSeparateModel} &
\ShortOption{\None} &
\Default{true} &
Rate control: use LCU level separate R-lambda model or not.
When LCULevelRateControl is equal to false, this parameter is meaningless.
\\
\Option{InitialQP} &
\ShortOption{\None} &
\Default{0} &
Rate control: initial QP value for the first picture.
0 to auto determine the initial QP value.
\\
\Option{RCForceIntraQP} &
\ShortOption{\None} &
\Default{false} &
Rate control: force intra QP to be equal to initial QP or not.
\\
\end{OptionTable}
%%
%% VUI parameters
%%
\begin{OptionTable}{VUI parameters}
\Option{VuiParametersPresent} &
\ShortOption{-vui} &
\Default{false} &
Enable generation of vui_parameters().
\\
\Option{AspectRatioInfoPresent} &
\ShortOption{} &
\Default{false} &
Signals whether aspect_ratio_idc is present.
\\
\Option{AspectRatioIdc} &
\ShortOption{} &
\Default{0} &
aspect_ratio_idc
\\
\Option{SarWidth} &
\ShortOption{} &
\Default{0} &
Specifies the horizontal size of the sample aspect ratio.
\\
\Option{SarHeight} &
\ShortOption{} &
\Default{0} &
Specifies the vertical size of the sample aspect ratio.
\\
\Option{OverscanInfoPresent} &
\ShortOption{} &
\Default{false} &
Signals whether overscan_info_present_flag is present.
\\
\Option{OverscanAppropriate} &
\ShortOption{} &
\Default{false} &
Indicates whether cropped decoded pictures are suitable for display using overscan.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Indicates that the decoded pictures should not be displayed using overscan. \\
1 & Indicates that the decoded pictures may be displayed using overscan. \\
\end{tabular}
\\
\Option{VideoSignalTypePresent} &
\ShortOption{} &
\Default{false} &
Signals whether video_format, video_full_range_flag, and colour_description_present_flag are present.
\\
\Option{VideoFormat} &
\ShortOption{} &
\Default{5} &
Indicates representation of pictures.
\\
\Option{VideoFullRange} &
\ShortOption{} &
\Default{false} &
Indicates the black level and range of luma and chroma signals.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Indicates that the luma and chroma signals are to be scaled prior to display. \\
1 & Indicates that the luma and chroma signals are not to be scaled prior to display. \\
\end{tabular}
\\
\Option{ColourDescriptionPresent} &
\ShortOption{} &
\Default{false} &
Signals whether colour_primaries, transfer_characteristics and matrix_coefficients are present.
\\
\Option{ColourPrimaries} &
\ShortOption{} &
\Default{2} &
Indicates chromaticity coordinates of the source primaries.
\\
\Option{TransferCharateristics} &
\ShortOption{} &
\Default{2} &
Indicates the opto-electronic transfer characteristics of the source.
\\
\Option{MatrixCoefficients} &
\ShortOption{} &
\Default{2} &
Describes the matrix coefficients used in deriving luma and chroma from RGB primaries.
\\
\Option{ChromaLocInfoPresent} &
\ShortOption{} &
\Default{false} &
Signals whether chroma_sample_loc_type_top_field and chroma_sample_loc_type_bottom_field are present.
\\
\Option{ChromaSampleLocTypeTopField} &
\ShortOption{} &
\Default{0} &
Specifies the location of chroma samples for top field.
\\
\Option{ChromaSampleLocTypeBottomField} &
\ShortOption{} &
\Default{0} &
Specifies the location of chroma samples for bottom field.
\\
\Option{NeutralChromaIndication} &
\ShortOption{} &
\Default{false} &
Indicates that the value of all decoded chroma samples is equal to 1<<(BitDepthCr-1).
\\
\Option{DefaultDisplayWindowFlag} &
\ShortOption{\None} &
\Default{0} &
Indicates the presence of the Default Window parameters.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Disabled \\
1 & Enabled \\
\end{tabular}
\\
\Option{DefDispWinLeftOffset}%
\Option{DefDispWinRightOffset}%
\Option{DefDispWinTopOffset}%
\Option{DefDispWinBottomOffset} &
\ShortOption{\None} &
\Default{0} &
Specifies the horizontal and vertical offset to be applied to the
input video from the conformance window in luma samples.
Must be a multiple of the chroma resolution (e.g. a multiple of two for 4:2:0).
\\
\Option{BitstreamRestriction} &
\ShortOption{} &
\Default{false} &
Signals whether bitstream restriction parameters are present.
\\
\Option{TilesFixedStructure} &
\ShortOption{} &
\Default{false} &
Indicates that each active picture parameter set has the same values of the syntax elements related to tiles.
\\
\Option{MotionVectorsOverPicBoundaries} &
\ShortOption{} &
\Default{false} &
Indicates that no samples outside the picture boundaries are used for inter prediction.
\\
\Option{MaxBytesPerPicDenom} &
\ShortOption{} &
\Default{2} &
Indicates a number of bytes not exceeded by the sum of the sizes of the VCL NAL units associated with any coded picture.
\\
\Option{MaxBitsPerMinCuDenom} &
\ShortOption{} &
\Default{1} &
Indicates an upper bound for the number of bits of coding_unit() data.
\\
\Option{Log2MaxMvLengthHorizontal} &
\ShortOption{} &
\Default{15} &
Indicate the maximum absolute value of a decoded horizontal MV component in quarter-pel luma units.
\\
\Option{Log2MaxMvLengthVertical} &
\ShortOption{} &
\Default{15} &
Indicate the maximum absolute value of a decoded vertical MV component in quarter-pel luma units.
\\
\end{OptionTable}
%%
%% SEI messages
%%
\begin{OptionTable}{SEI messages}
\Option{SEIDecodedPictureHash} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the calculation and insertion of the Decoded picture hash
SEI messages.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Disabled \\
1 & Transmits MD5 in SEI message and writes the value to the encoder
log \\
2 & Transmits CRC in SEI message and writes the value to the encoder
log \\
3 & Transmits checksum in SEI message and writes the value to the encoder
log \\
\end{tabular}
\\
\Option{SEIpictureDigest} &
\ShortOption{\None} &
\Default{0} &
Deprecated alias for SEIDecodedPictureHash.
Do not use anymore.
\\
\Option{SEIRecoveryPoint} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Recovery point
SEI messages.
\\
\Option{SEIActiveParameterSets} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Active parameter sets
SEI messages.
\\
\Option{SEIBufferingPeriod} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Buffering period
SEI messages. This option has no effect if VuiParametersPresent is disabled.
SEIBufferingPeriod requires SEIActiveParameterSets to be enabled.
\\
\Option{SEIPictureTiming} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Picture timing
SEI messages. This option has no effect if VuiParametersPresent is disabled.
\\
\Option{SEIDecodingUnitInfo} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Decoding unit information
SEI messages. This option has no effect if VuiParametersPresent is disabled.
\\
\Option{SEIGradualDecodingRefreshInfo} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Gradual decoding refresh information
SEI messages.
\\
\Option{SEITemporalLevel0Index} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Temporal level zero index
SEI messages.
\\
\Option{SEIDisplayOrientation} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Display orientation
SEI messages.
\par
\begin{tabular}{cp{0.30\textwidth}}
0 & Disabled \\
N: $0 < N < (2^{16} - 1)$ & Enable display orientation SEI message with
\mbox{anticlockwise_rotation = N}
and \mbox{display_orientation_repetition_period = 1} \\
\end{tabular}
\\
\Option{SEIFramePacking} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Frame packing arrangement SEI messages.
\\
\Option{SEIFramePackingType} &
\ShortOption{\None} &
\Default{0} &
Indicates the arrangement type in the Frame packing arrangement SEI message.
This option has no effect if SEIFramePacking is disabled.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Checkerboard \\
1 & Line Alternate \\
2 & Column Alternate \\
3 & Side by Side \\
4 & Top Bottom \\
5 & Frame Alternate \\
6 & 2D Image \\
7 & Tile Format \\
\end{tabular}
\\
\Option{SEIFramePackingInterpretation} &
\ShortOption{\None} &
\Default{0} &
Indicates the constituent frames relationship in the Frame packing arrangement SEI message.
This option has no effect if SEIFramePacking is disabled.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Unspecified \\
1 & Frame 0 is associated with the left view of a stereo pair \\
2 & Frame 0 is associated with the right view of a stereo pair \\
\end{tabular}
\\
\Option{SEIFramePackingQuincunx} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the quincunx_sampling signalling in the
Frame packing arrangement SEI messages. This option has no
effect if SEIFramePacking is disabled.
\\
\Option{SEIFramePackingId} &
\ShortOption{\None} &
\Default{0} &
Indicates the session number in the Frame packing arrangement
SEI messages. This option has no effect if SEIFramePacking is
disabled.
\\
\Option{SEIToneMappingInfo} &
\ShortOption{\None} &
\Default{0} &
Enables or disables the insertion of the Tone Mapping SEI message.
\\
\Option{SEIToneMapId} &
\ShortOption{\None} &
\Default{0} &
Specifies Id of Tone Mapping SEI message for a given session.
\\
\Option{SEIToneMapCancelFlag} &
\ShortOption{\None} &
\Default{0} &
Indicates that Tone Mapping SEI message cancels the persistance or follows.
\\
\Option{SEIToneMapPersistenceFlag} &
\ShortOption{\None} &
\Default{1} &
Specifies the persistence of the Tone Mapping SEI message.
\\
\Option{SEIToneMapCodedDataBitDepth} &
\ShortOption{\None} &
\Default{10} &
Specifies Coded Data BitDepth of Tone Mapping SEI messages.
\\
\Option{SEIToneMapTargetBitDepth} &
\ShortOption{\None} &
\Default{8} &
Specifies Output BitDepth of Tome mapping function.
\\
\Option{SEIToneMapModelId} &
\ShortOption{\None} &
\Default{0} &
Specifies Model utilized for mapping coded data into
target_bit_depth range.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & linear mapping with clipping \\
1 & sigmoidal mapping \\
2 & user-defined table mapping \\
3 & piece-wise linear mapping \\
4 & luminance dynamic range mapping \\
\end{tabular}
\\
\Option{SEIToneMapMinValue} &
\ShortOption{\None} &
\Default{0} &
Specifies the minimum value in mode 0.
\\
\Option{SEIToneMapMaxValue} &
\ShortOption{\None} &
\Default{1023} &
Specifies the maxmum value in mode 0.
\\
\Option{SEIToneMapSigmoidMidpoint} &
\ShortOption{\None} &
\Default{512} &
Specifies the centre point in mode 1.
\\
\Option{SEIToneMapSigmoidWidth} &
\ShortOption{\None} &
\Default{960} &
Specifies the distance between 5% and 95% values of
the target_bit_depth in mode 1.
\\
\Option{SEIToneMapStartOfCodedInterval} &
\ShortOption{\None} &
\Default{\None} &
Array of user-defined mapping table.
Default table can be set to the following:
\par
\begin{tabular}{cp{0.45\textwidth}}
0 12 24 36 48 60 72 84 96 108 120 132 144 156 168 180
192 192 196 204 208 216 220 228 232 240 248 252 260 264
272 276 284 292 292 296 300 304 308 312 320 324 328 332
336 344 348 352 356 360 368 372 376 380 384 388 396 400
404 408 412 420 424 428 432 436 444 444 444 448 452 456
460 464 468 472 476 476 480 484 488 492 496 500 504 508
508 512 516 520 524 528 532 536 540 540 544 548 552 556
560 564 568 572 572 576 580 584 588 592 596 600 604 604
608 612 616 620 624 628 632 636 636 640 644 648 652 656
660 664 668 672 672 672 676 680 680 684 688 692 692 696
700 704 704 708 712 716 716 720 724 724 728 732 736 736
740 744 748 748 752 756 760 760 764 768 768 772 776 780
780 784 788 792 792 796 800 804 804 808 812 812 816 820
824 824 828 832 836 836 840 844 848 848 852 856 860 860
860 864 864 868 872 872 876 880 880 884 884 888 892 892
896 900 900 904 908 908 912 912 916 920 920 924 928 928
932 936 936 940 940 944 948 948 952 956 956 960 964 964
968 968 972 976 976 980 984 984 988 992 992 996 996 1000
1004 1004 1008 1012 1012 1016 1020 1024
\end{tabular}
\\
\Option{SEIToneMapNumPivots} &
\ShortOption{\None} &
\Default{5} &
Specifies the number of pivot points in mode 3.
\\
\Option{SEIToneMapCodedPivotValue} &
\ShortOption{\None} &
\Default{\None} &
Array of coded pivot point in mode 3.
Default table can be set to the following:
\par
\begin{tabular}{cp{0.45\textwidth}}
64 128 256 512 768
\end{tabular}
\\
\Option{SEIToneMapTargetPivotValue} &
\ShortOption{\None} &
\Default{\None} &
Array of target pivot point in mode 3.
Default table can be set to the following:
\par
\begin{tabular}{cp{0.45\textwidth}}
48 73 111 168 215
\end{tabular}
\\
\Option{SEIToneMapCameraIsoSpeedIdc} &
\ShortOption{\None} &
\Default{0} &
Indicates the camera ISO speed for daylight illumination.
\\
\Option{SEIToneMapCameraIsoSpeedValue} &
\ShortOption{\None} &
\Default{420} &
Specifies the camera ISO speed for daylight illumination of Extended_ISO.
\\
\Option{SEIToneMapExposureCompensationValueSignFlag} &
\ShortOption{\None} &
\Default{0} &
Specifies the sign of ExposureCompensationValue.
\\
\Option{SEIToneMapExposureCompensationValueNumerator} &
\ShortOption{\None} &
\Default{0} &
Specifies the numerator of ExposureCompensationValue.
\\
\Option{SEIToneMapExposureCompensationValueDenomIdc} &
\ShortOption{\None} &
\Default{2} &
Specifies the denominator of ExposureCompensationValue.
\\
\Option{SEIToneMapRefScreenLuminanceWhite} &
\ShortOption{\None} &
\Default{350} &
Specifies reference screen brightness setting in units of candela per square metre.
\\
\Option{SEIToneMapExtendedRangeWhiteLevel} &
\ShortOption{\None} &
\Default{800} &
Indicates the luminance dynamic range.
\\
\Option{SEIToneMapNominalBlackLevelLumaCodeValue} &
\ShortOption{\None} &
\Default{16} &
Specifies luma sample value of the nominal black level assigned decoded pictures.
\\
\Option{SEIToneMapNominalWhiteLevelLumaCodeValue} &
\ShortOption{\None} &
\Default{235} &
Specifies luma sample value of the nominal white level assigned decoded pictures.
\\
\Option{SEIToneMapExtendedWhiteLevelLumaCodeValue} &
\ShortOption{\None} &
\Default{300} &
Specifies luma sample value of the extended dynamic range assigned decoded pictures.
\\
\end{OptionTable}
%%
%%
%%
\subsection{Hardcoded encoder parameters}
\begin{MacroTable}{CommonDef.h constants}
ADAPT_SR_SCALE &
1 &
Defines a scaling factor used to derive the motion search range is
adaptive (see ASR configuration parameter). Default value is 1.
\\
MAX_GOP &
64 &
maximum size of value of hierarchical GOP.
\\
MAX_NUM_REF &
4 &
maximum number of multiple reference frames
\\
MAX_NUM_REF_LC &
8 &
maximum number of combined reference frames
\\
AMVP_MAX_NUM_CANDS &
2 &
maximum number of final candidates
\\
AMVP_MAX_NUM_CANDS_MEM &
3 &
\\
MRG_MAX_NUM_CANDS &
5 &
\\
DYN_REF_FREE &
off &
dynamic free of reference memories
\\
MAX_TLAYER &
8 &
maximum number of temporal layers
\\
HB_LAMBDA_FOR_LDC &
on &
use of B-style lambda for non-key pictures in low-delay mode
\\
GPB_SIMPLE &
on &
Fast estimation of generalized B in low-delay mode
\\
GPB_SIMPLE_UNI &
on &
Fast estimation of generalized B in low-delay mode for uni-direction
\\
FASTME_SMOOTHER_MV &
on &
Fast ME using smoother MV assumption
\\
ADAPT_SR_SCALE &
on &
division factor for adaptive search range
\\
CLIP_TO_709_RANGE &
off &
\\
EARLY_SKIP_THRES &
1.5 &
early skip if RD < EARLY_SKIP_THRES*avg[BestSkipRD]
\\
MAX_NUM_REF_PICS &
16 &
\\
MAX_CHROMA_FORMAT_IDC &
3 &
\\
\end{MacroTable}
\subsubsection*{TypeDef.h}
Numerous constants that guard individual adoptions are defined within
\url{source/Lib/TLibCommon/TypeDef.h}.
%%
%%
%%
\clearpage
\section{Using the decoder}
\begin{verbatim}
TappDecoder -b str.bin -o dec.yuv [options]
\end{verbatim}
\begin{OptionTable}{Decoder options}
\Option{} &
\ShortOption{-h} &
\Default{\None} &
Prints usage information.
\\
\Option{} &
\ShortOption{-o} &
\Default{\NotSet} &
Defines reconstructed YUV file name.
\\
\Option{} &
\ShortOption{-s} &
\Default{0} &
Defines the number of pictures in decoding order to skip.
\\
\Option{OutputBitDepth} &
\ShortOption{-d} &
\Default{0 \\ (Native)} &
Specifies the luma bit-depth of the reconstructed YUV file (the value 0 indicates
that the native bit-depth is used)
\\
\Option{OutputBitDepthC} &
\ShortOption{\None} &
\Default{0 \\ (Native)} &
Defines the chroma bit-depth of the reconstructed YUV file (the value 0 indicates
that the native bit-depth is used)
\\
\Option{SEIPictureDigest} &
\ShortOption{\None} &
\Default{1} &
Enable or disable verification of any Picture hash SEI messages. When
this parameter is set to 0, the feature is disabled and all messages are
ignored. When set to 1 (default), the feature is enabled and the decoder
has the following behaviour:
\begin{itemize}
\item
If Picture hash SEI messages are included in the bitstream, the same type
of hash is calculated for each decoded picture and written to the
log together with an indication whether the calculted value matches
the value in the SEI message.
Decoding will continue even if there is a mismatch.
\item
After decoding is complete, if any MD5sum comparison failed, a warning
is printed and the decoder exits with the status EXIT_FAILURE
\item
The per-picture MD5 log message has the following formats:
[MD5:d41d8cd98f00b204e9800998ecf8427e,(OK)],
[MD5:d41d8cd98f00b204e9800998ecf8427e,(unk)],
[MD5:d41d8cd98f00b204e9800998ecf8427e,(***ERROR***)] [rxMD5:b9e1...]
where, "(unk)" implies that no MD5 was signalled for this picture,
"(OK)" implies that the decoder agrees with the signalled MD5,
"(***ERROR***)" implies that the decoder disagrees with the signalled
MD5. "[rxMD5:...]" is the signalled MD5 if different.
\end{itemize}
\\
\Option{RespectDefDispWindow} &
\ShortOption{-w} &
\Default{0} &
Video region to be output by the decoder.
\par
\begin{tabular}{cp{0.45\textwidth}}
0 & Output content inside the conformance window. \\
1 & Output content inside the default window. \\
\end{tabular}
\\
\end{OptionTable}
\end{document}
|
{"hexsha": "7c6b8917b113786bfb9016620ed4353bc771d7ce", "size": 58874, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/software-manual.tex", "max_stars_repo_name": "pargles/HM-12.0", "max_stars_repo_head_hexsha": "787b9c0c559d11e939f0d9fcd8f50387ca4ae5fa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 394, "max_stars_repo_stars_event_min_datetime": "2015-01-08T01:26:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T03:07:30.000Z", "max_issues_repo_path": "doc/software-manual.tex", "max_issues_repo_name": "pargles/HM-12.0", "max_issues_repo_head_hexsha": "787b9c0c559d11e939f0d9fcd8f50387ca4ae5fa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2016-11-30T08:24:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-06T14:12:58.000Z", "max_forks_repo_path": "doc/software-manual.tex", "max_forks_repo_name": "pargles/HM-12.0", "max_forks_repo_head_hexsha": "787b9c0c559d11e939f0d9fcd8f50387ca4ae5fa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 151, "max_forks_repo_forks_event_min_datetime": "2015-01-17T01:07:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T08:11:07.000Z", "avg_line_length": 29.3197211155, "max_line_length": 125, "alphanum_fraction": 0.7426368176, "num_tokens": 16943}
|
import xarray as xr
import numpy as np
import dask.bag as db
import dask.array as da
from time import time
from scipy.interpolate import LinearNDInterpolator
from ..core import Instrument, Model
from .attenuation import calc_radar_atm_attenuation
from .psd import calc_mu_lambda
from ..core.instrument import ureg, quantity
def calc_total_reflectivity(model, detect_mask=False):
"""
This method calculates the total (convective + stratiform) reflectivity (Ze).
Parameters
----------
model: :func:`emc2.core.Model` class
The model to calculate the parameters for.
detect_mask: bool
True - generating a mask determining signal below noise floor.
Returns
-------
model: :func:`emc2.core.Model`
The xarray Dataset containing the calculated radar moments.
"""
Ze_tot = np.where(np.isfinite(model.ds["sub_col_Ze_tot_strat"].values),
10 ** (model.ds["sub_col_Ze_tot_strat"].values / 10.), 0)
if model.process_conv:
Ze_tot = np.where(np.isfinite(model.ds["sub_col_Ze_tot_conv"].values), Ze_tot +
10 ** (model.ds["sub_col_Ze_tot_conv"].values / 10.), Ze_tot)
model.ds['sub_col_Ze_tot'] = xr.DataArray(10 * np.log10(Ze_tot), dims=model.ds["sub_col_Ze_tot_strat"].dims)
model.ds['sub_col_Ze_tot'].values = np.where(np.isinf(model.ds['sub_col_Ze_tot'].values), np.nan,
model.ds['sub_col_Ze_tot'].values)
model.ds['sub_col_Ze_tot'].attrs["long_name"] = \
"Total (convective + stratiform) equivalent radar reflectivity factor"
model.ds['sub_col_Ze_tot'].attrs["units"] = "dBZ"
if model.process_conv:
model.ds['sub_col_Ze_att_tot'] = 10 * np.log10(Ze_tot *
model.ds['hyd_ext_conv'].fillna(1) * model.ds[
'hyd_ext_strat'].fillna(1) *
model.ds['atm_ext'].fillna(1))
else:
model.ds['sub_col_Ze_att_tot'] = 10 * np.log10(Ze_tot *
model.ds['hyd_ext_strat'].fillna(1) *
model.ds['atm_ext'].fillna(1))
model.ds['sub_col_Ze_att_tot'].values = np.where(np.isinf(model.ds['sub_col_Ze_att_tot'].values), np.nan,
model.ds['sub_col_Ze_att_tot'].values)
model.ds['sub_col_Ze_att_tot'].attrs["long_name"] = \
"Total (convective + stratiform) attenuated (hydrometeor + gaseous) equivalent radar reflectivity factor"
model.ds['sub_col_Ze_att_tot'].attrs["units"] = "dBZ"
model.ds["sub_col_Ze_tot"] = model.ds["sub_col_Ze_tot"].where(np.isfinite(model.ds["sub_col_Ze_tot"]))
model.ds["sub_col_Ze_att_tot"] = model.ds["sub_col_Ze_att_tot"].where(
np.isfinite(model.ds["sub_col_Ze_att_tot"]))
model.ds["detect_mask"] = model.ds["Ze_min"] >= model.ds["sub_col_Ze_att_tot"]
model.ds["detect_mask"].attrs["long_name"] = "Radar detectability mask"
model.ds["detect_mask"].attrs["units"] = ("1 = radar signal below noise floor, 0 = signal detected")
return model
def accumulate_attenuation(model, is_conv, z_values, hyd_ext, atm_ext, OD_from_sfc=True,
use_empiric_calc=False, **kwargs):
"""
Accumulates atmospheric and condensate radar attenuation (linear units) from TOA or the surface.
Output fields are condensate and atmospheric transmittance.
Parameters
----------
model: Model
The model to generate the parameters for.
is_conv: bool
True if the cell is convective
z_values: ndarray
model output height array in m.
hyd_ext: ndarray
fwd calculated extinction due to condensate per layer (empirical - dB km^-1, m^-1 otherwise).
atm_ext: ndarray
atmospheric attenuation per layer (dB/km).
OD_from_sfc: bool
If True, then calculate optical depth from the surface.
use_empirical_calc: bool
When True using empirical relations from literature for the fwd calculations
(the cloud fraction still follows the scheme logic set by use_rad_logic).
Returns
-------
model: :func:`emc2.core.Model`
The model with the added simulated lidar parameters.
"""
if is_conv:
cloud_str = "conv"
else:
cloud_str = "strat"
if not use_empiric_calc:
hyd_ext = hyd_ext * 1e3
if OD_from_sfc:
OD_str = "model layer base"
else:
OD_str = "model layer top"
n_subcolumns = model.num_subcolumns
Dims = model.ds["%s_q_subcolumns_cl" % cloud_str].shape
if OD_from_sfc:
dz = np.diff(z_values / 1e3, axis=1, prepend=0.)
hyd_ext = np.cumsum(
np.tile(dz, (n_subcolumns, 1, 1)) *
np.concatenate((np.zeros(Dims[:2] + (1,)), hyd_ext[:, :, :-1]), axis=2), axis=2)
atm_ext = np.cumsum(dz * np.concatenate((np.zeros((Dims[1],) + (1,)),
atm_ext[:, :-1]), axis=1), axis=1)
else:
dz = np.diff(z_values / 1e3, axis=1, append=0.)
hyd_ext = np.flip(
np.cumsum(np.flip(np.tile(dz, (n_subcolumns, 1, 1)) *
np.concatenate((hyd_ext[:, :, 1:],
np.zeros(Dims[:2] + (1,))), axis=2),
axis=2), axis=2), axis=2)
atm_ext = np.flip(
np.cumsum(np.flip(dz * np.concatenate((atm_ext[:, 1:],
np.zeros((Dims[1],) + (1,))), axis=1), axis=1), axis=1), axis=1)
if use_empiric_calc:
model.ds['hyd_ext_%s' % cloud_str] = xr.DataArray(10 ** (-2 * hyd_ext / 10.),
dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
else:
model.ds['hyd_ext_%s' % cloud_str] = \
xr.DataArray(np.exp(-2 * hyd_ext), dims=model.ds["sub_col_Ze_tot_%s" % cloud_str].dims)
model.ds['atm_ext'] = xr.DataArray(10 ** (-2 * atm_ext / 10), dims=model.ds[model.T_field].dims)
model.ds['hyd_ext_%s' % cloud_str].attrs["long_name"] = \
"Two-way %s hydrometeor transmittance at %s" % (cloud_str, OD_str)
model.ds['hyd_ext_%s' % cloud_str].attrs["units"] = "1"
model.ds['atm_ext'].attrs["long_name"] = \
"Two-way atmospheric transmittance due to H2O and O2 at %s" % OD_str
model.ds['atm_ext'].attrs["units"] = "1"
return model
def calc_radar_empirical(instrument, model, is_conv, p_values, t_values, z_values, atm_ext,
OD_from_sfc=True, use_empiric_calc=False, hyd_types=None, **kwargs):
"""
Calculates the radar stratiform or convective reflectivity and attenuation
in a sub-columns using empirical formulation from literature.
Parameters
----------
instrument: :func:`emc2.core.Instrument` class
The instrument to calculate the reflectivity parameters for.
model: :func:`emc2.core.Model` class
The model to calculate the parameters for.
is_conv: bool
True if the cell is convective
p_values: ndarray
model output pressure array in Pa.
t_values: ndarray
model output temperature array in C.
z_values: ndarray
model output height array in m.
atm_ext: ndarray
atmospheric attenuation per layer (dB/km).
OD_from_sfc: bool
If True, then calculate optical depth from the surface.
hyd_types: list or None
list of hydrometeor names to include in calcuation. using default Model subclass types if None.
Additonal keyword arguments are passed into
:py:func:`emc2.simulator.lidar_moments.accumulate_attenuation`.
Returns
-------
model: :func:`emc2.core.Model`
The model with the added simulated lidar parameters.
"""
hyd_types = model.set_hyd_types(hyd_types)
if is_conv:
cloud_str = "conv"
else:
cloud_str = "strat"
if not instrument.instrument_class.lower() == "radar":
raise ValueError("Reflectivity can only be derived from a radar!")
Dims = model.ds["%s_q_subcolumns_cl" % cloud_str].shape
model.ds["sub_col_Ze_tot_%s" % cloud_str] = xr.DataArray(
np.zeros(Dims), dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
for hyd_type in hyd_types:
q_field = "%s_q_subcolumns_%s" % (cloud_str, hyd_type)
WC_tot = np.zeros(Dims)
WC = model.ds["%s_q_subcolumns_%s" % (cloud_str, hyd_type)] * p_values / \
(instrument.R_d * (t_values + 273.15)) * 1e3
# Fox and Illingworth (1997)
if hyd_type.lower() == "cl":
Ze_emp = 0.031 * WC ** 1.56
WC_tot += WC
# Hagen and Yuter (2003)
elif hyd_type.lower() == "pl":
Ze_emp = ((WC * 1e3) / 3.4) ** 1.75
WC_tot += WC
else:
# Hogan et al. (2006)
if 2e9 <= instrument.freq < 4e9:
Ze_emp = 10 ** (((np.log10(WC) + 0.0197 * t_values + 1.7) / 0.060) / 10.)
elif 27e9 <= instrument.freq < 40e9:
Ze_emp = 10 ** (((np.log10(WC) + 0.0186 * t_values + 1.63) /
(0.000242 * t_values + 0.0699)) / 10.)
elif 75e9 <= instrument.freq < 110e9:
Ze_emp = 10 ** (((np.log10(WC) + 0.00706 * t_values + 0.992) /
(0.000580 * t_values + 0.0923)) / 10.)
else:
Ze_emp = 10 ** (((np.log10(WC) + 0.0186 * t_values + 1.63) /
(0.000242 * t_values + 0.0699)) / 10.)
var_name = "sub_col_Ze_%s_%s" % (hyd_type, cloud_str)
model.ds[var_name] = xr.DataArray(
Ze_emp.values, dims=model.ds[q_field].dims)
model.ds["sub_col_Ze_tot_%s" % cloud_str] += Ze_emp.fillna(0)
kappa_f = 6 * np.pi / (instrument.wavelength * model.Rho_hyd["cl"].magnitude) * \
((instrument.eps_liq - 1) / (instrument.eps_liq + 2)).imag * 4.34e6 # dB m^3 g^-1 km^-1
model = accumulate_attenuation(model, is_conv, z_values, WC_tot * kappa_f, atm_ext,
OD_from_sfc=OD_from_sfc, use_empiric_calc=True, **kwargs)
return model
def calc_radar_bulk(instrument, model, is_conv, p_values, z_values, atm_ext, OD_from_sfc=True,
hyd_types=None, mie_for_ice=False, **kwargs):
"""
Calculates the radar stratiform or convective reflectivity and attenuation
in a sub-columns using bulk scattering LUTs assuming geometric scatterers
(radiation scheme logic).
Effective radii for each hydrometeor class must be provided (in model.ds).
Parameters
----------
instrument: Instrument
The instrument to simulate. The instrument must be a lidar.
model: Model
The model to generate the parameters for.
is_conv: bool
True if the cell is convective
p_values: ndarray
model output pressure array in Pa.
z_values: ndarray
model output height array in m.
atm_ext: ndarray
atmospheric attenuation per layer (dB/km).
OD_from_sfc: bool
If True, then calculate optical depth from the surface.
hyd_types: list or None
list of hydrometeor names to include in calcuation. using default Model subclass types if None.
mie_for_ice: bool
If True, using bulk mie caculation LUTs. Otherwise, currently using the bulk C6
scattering LUTs for 8-column severly roughned aggregate.
Additonal keyword arguments are passed into
:py:func:`emc2.simulator.lidar_moments.accumulate_attenuation`.
Returns
-------
model: :func:`emc2.core.Model`
The model with the added simulated lidar parameters.
"""
hyd_types = model.set_hyd_types(hyd_types)
n_subcolumns = model.num_subcolumns
if is_conv:
cloud_str = "conv"
re_fields = model.conv_re_fields
else:
cloud_str = "strat"
re_fields = model.strat_re_fields
if model.model_name in ["E3SM", "CESM2"]:
bulk_ice_lut = "CESM_ice"
bulk_mie_ice_lut = "mie_ice_CESM_PSD"
bulk_liq_lut = "CESM_liq"
else:
bulk_ice_lut = "E3_ice"
bulk_mie_ice_lut = "mie_ice_E3_PSD"
bulk_liq_lut = "E3_liq"
Dims = model.ds["%s_q_subcolumns_cl" % cloud_str].shape
model.ds["sub_col_Ze_tot_%s" % cloud_str] = xr.DataArray(
np.zeros(Dims), dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
hyd_ext = np.zeros(Dims)
rhoa_dz = np.tile(
np.abs(np.diff(p_values, axis=1, append=0.)) / instrument.g,
(n_subcolumns, 1, 1))
dz = np.tile(
np.diff(z_values, axis=1, append=0.), (n_subcolumns, 1, 1))
for hyd_type in hyd_types:
if hyd_type[-1] == 'l':
rho_b = model.Rho_hyd[hyd_type] # bulk water
re_array = np.tile(model.ds[re_fields[hyd_type]].values, (n_subcolumns, 1, 1))
if model.lambda_field is not None: # assuming my and lambda can be provided only for liq hydrometeors
if not model.lambda_field[hyd_type] is None:
lambda_array = model.ds[model.lambda_field[hyd_type]].values
mu_array = model.ds[model.mu_field[hyd_type]].values
else:
rho_b = instrument.rho_i # bulk ice
fi_factor = model.fluffy[hyd_type].magnitude * model.Rho_hyd[hyd_type] / rho_b + \
(1 - model.fluffy[hyd_type].magnitude) * (model.Rho_hyd[hyd_type] / rho_b) ** (1 / 3)
re_array = np.tile(model.ds[re_fields[hyd_type]].values * fi_factor,
(n_subcolumns, 1, 1))
tau_hyd = np.where(model.ds["%s_q_subcolumns_%s" % (cloud_str, hyd_type)] > 0,
3 * model.ds["%s_q_subcolumns_%s" % (cloud_str, hyd_type)] * rhoa_dz /
(2 * rho_b * re_array * 1e-6), 0)
A_hyd = tau_hyd / (2 * dz) # model assumes geometric scatterers
if np.isin(hyd_type, ["ci", "pi"]):
if mie_for_ice:
r_eff_bulk = instrument.bulk_table[bulk_mie_ice_lut]["r_e"].values.copy()
Qback_bulk = instrument.bulk_table[bulk_mie_ice_lut]["Q_back"].values
Qext_bulk = instrument.bulk_table[bulk_mie_ice_lut]["Q_ext"].values
else:
r_eff_bulk = instrument.bulk_table[bulk_ice_lut]["r_e"].values.copy()
Qback_bulk = instrument.bulk_table[bulk_ice_lut]["Q_back"].values
Qext_bulk = instrument.bulk_table[bulk_ice_lut]["Q_ext"].values
else:
if model.model_name in ["E3SM", "CESM2"]:
mu_b = np.tile(instrument.bulk_table[bulk_liq_lut]["mu"].values,
(instrument.bulk_table[bulk_liq_lut]["lambdas"].size)).flatten()
lambda_b = instrument.bulk_table[bulk_liq_lut]["lambda"].values.flatten()
else:
r_eff_bulk = instrument.bulk_table[bulk_liq_lut]["r_e"].values
Qback_bulk = instrument.bulk_table[bulk_liq_lut]["Q_back"].values
Qext_bulk = instrument.bulk_table[bulk_liq_lut]["Q_ext"].values
if np.logical_and(np.isin(hyd_type, ["cl", "pl"]), model.model_name in ["E3SM", "CESM2"]):
print("2-D interpolation of bulk liq radar backscattering using mu-lambda values")
rel_locs = model.ds[model.q_names_stratiform[hyd_type]].values > 0.
interpolator = LinearNDInterpolator(np.stack((mu_b, lambda_b), axis=1), Qback_bulk.flatten())
interp_vals = interpolator(mu_array[rel_locs], lambda_array[rel_locs])
back_tmp = np.ones_like(model.ds[model.q_names_stratiform[hyd_type]].values, dtype=float) * np.nan
ext_tmp = np.copy(back_tmp)
np.place(back_tmp, rel_locs,
(interp_vals * instrument.wavelength ** 4) /
(instrument.K_w * np.pi ** 5) * 1e-6)
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)] = xr.DataArray(
np.tile(back_tmp, (n_subcolumns, 1, 1)) * A_hyd,
dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
print("2-D interpolation of bulk liq radar extinction using mu-lambda values")
interpolator = LinearNDInterpolator(np.stack((mu_b, lambda_b), axis=1), Qext_bulk.flatten())
interp_vals = interpolator(mu_array[rel_locs], lambda_array[rel_locs])
np.place(ext_tmp, rel_locs, interp_vals)
hyd_ext += np.tile(ext_tmp, (n_subcolumns, 1, 1)) * A_hyd
else:
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)] = xr.DataArray(
(np.interp(re_array, r_eff_bulk, Qback_bulk) * A_hyd * instrument.wavelength ** 4) /
(instrument.K_w * np.pi ** 5) * 1e-6,
dims=model.ds["%s_q_subcolumns_cl" % cloud_str].dims)
hyd_ext += np.interp(re_array, r_eff_bulk, Qext_bulk) * A_hyd
model.ds["sub_col_Ze_tot_%s" % cloud_str] += model.ds["sub_col_Ze_%s_%s" % (
hyd_type, cloud_str)].fillna(0)
model = accumulate_attenuation(model, is_conv, z_values, hyd_ext, atm_ext,
OD_from_sfc=OD_from_sfc, use_empiric_calc=False, **kwargs)
return model
def calc_radar_micro(instrument, model, z_values, atm_ext, OD_from_sfc=True,
hyd_types=None, mie_for_ice=True, parallel=True, chunk=None, **kwargs):
"""
Calculates the first 3 radar moments (reflectivity, mean Doppler velocity and spectral
width) in a given column for the given radar using the microphysics (MG2) logic.
Parameters
----------
instrument: Instrument
The instrument to simulate. The instrument must be a lidar.
model: Model
The model to generate the parameters for.
z_values: ndarray
model output height array in m.
atm_ext: ndarray
atmospheric attenuation per layer (dB/km).
OD_from_sfc: bool
If True, then calculate optical depth from the surface.
hyd_types: list or None
list of hydrometeor names to include in calcuation. using default Model subclass types if None.
mie_for_ice: bool
If True, using full mie caculation LUTs. Otherwise, currently using the C6
scattering LUTs for 8-column severly roughned aggregate.
parallel: bool
If True, use parallelism in calculating lidar parameters.
chunk: int or None
The number of entries to process in one parallel loop. None will send all of
the entries to the Dask worker queue at once. Sometimes, Dask will freeze if
too many tasks are sent at once due to memory issues, so adjusting this number
might be needed if that happens.
Additonal keyword arguments are passed into
:py:func:`emc2.simulator.psd.calc_mu_lambda`.
:py:func:`emc2.simulator.lidar_moments.accumulate_attenuation`.
Returns
-------
model: :func:`emc2.core.Model`
The model with the added simulated lidar parameters.
"""
hyd_types = model.set_hyd_types(hyd_types)
method_str = "LUTs (microphysics logic)"
Dims = model.ds["strat_q_subcolumns_cl"].values.shape
if mie_for_ice:
scat_str = "Mie"
else:
if model.model_name in ["E3SM", "CESM2"]:
scat_str = "m-D_A-D (D. Mitchell)"
ice_lut = "CESM_ice"
ice_diam_var = "p_diam"
else:
scat_str = "C6"
ice_lut = "E3_ice"
ice_diam_var = "p_diam_eq_V"
moment_denom_tot = np.zeros(Dims)
V_d_numer_tot = np.zeros(Dims)
sigma_d_numer_tot = np.zeros(Dims)
for hyd_type in hyd_types:
frac_names = model.strat_frac_names[hyd_type]
n_names = model.N_field[hyd_type]
if not np.isin("sub_col_Ze_tot_strat", [x for x in model.ds.keys()]):
model.ds["sub_col_Ze_tot_strat"] = xr.DataArray(
np.zeros(Dims), dims=model.ds.strat_q_subcolumns_cl.dims)
model.ds["sub_col_Vd_tot_strat"] = xr.DataArray(
np.zeros(Dims), dims=model.ds.strat_q_subcolumns_cl.dims)
model.ds["sub_col_sigma_d_tot_strat"] = xr.DataArray(
np.zeros(Dims), dims=model.ds.strat_q_subcolumns_cl.dims)
model.ds["sub_col_Ze_%s_strat" % hyd_type] = xr.DataArray(
np.zeros(Dims), dims=model.ds.strat_q_subcolumns_cl.dims)
model.ds["sub_col_Vd_%s_strat" % hyd_type] = xr.DataArray(
np.zeros(Dims), dims=model.ds.strat_q_subcolumns_cl.dims)
model.ds["sub_col_sigma_d_%s_strat" % hyd_type] = xr.DataArray(
np.zeros(Dims), dims=model.ds.strat_q_subcolumns_cl.dims)
fits_ds = calc_mu_lambda(model, hyd_type, subcolumns=True, **kwargs).ds
N_0 = fits_ds["N_0"].values
lambdas = fits_ds["lambda"].values
mu = fits_ds["mu"].values
total_hydrometeor = model.ds[frac_names].values * model.ds[n_names].values
if np.logical_and(np.isin(hyd_type, ["ci", "pi"]), not mie_for_ice):
p_diam = instrument.scat_table[ice_lut][ice_diam_var].values
beta_p = instrument.scat_table[ice_lut]["beta_p"].values
alpha_p = instrument.scat_table[ice_lut]["alpha_p"].values
else:
p_diam = instrument.mie_table[hyd_type]["p_diam"].values
beta_p = instrument.mie_table[hyd_type]["beta_p"].values
alpha_p = instrument.mie_table[hyd_type]["alpha_p"].values
num_subcolumns = model.num_subcolumns
v_tmp = model.vel_param_a[hyd_type] * p_diam ** model.vel_param_b[hyd_type]
v_tmp = -v_tmp.magnitude
if hyd_type == "cl":
_calc_liquid = lambda x: _calculate_observables_liquid(
x, total_hydrometeor, N_0, lambdas, mu,
alpha_p, beta_p, v_tmp, num_subcolumns, instrument, p_diam)
if parallel:
print("Doing parallel radar calculations for %s" % hyd_type)
if chunk is None:
tt_bag = db.from_sequence(np.arange(0, Dims[1], 1))
my_tuple = tt_bag.map(_calc_liquid).compute()
else:
my_tuple = []
j = 0
while j < Dims[1]:
if j + chunk >= Dims[1]:
ind_max = Dims[1]
else:
ind_max = j + chunk
print("Stage 1 of 2: processing columns %d-%d out of %d" % (j, ind_max, Dims[1]))
tt_bag = db.from_sequence(np.arange(j, ind_max, 1))
my_tuple += tt_bag.map(_calc_liquid).compute()
j += chunk
else:
my_tuple = [x for x in map(_calc_liquid, np.arange(0, Dims[1], 1))]
V_d_numer_tot = np.nan_to_num(np.stack([x[0] for x in my_tuple], axis=1))
moment_denom_tot = np.nan_to_num(np.stack([x[1] for x in my_tuple], axis=1))
hyd_ext = np.nan_to_num(np.stack([x[2] for x in my_tuple], axis=1))
model.ds["sub_col_Ze_cl_strat"][:, :, :] = np.stack(
[x[3] for x in my_tuple], axis=1)
model.ds["sub_col_Vd_cl_strat"][:, :, :] = np.stack(
[x[4] for x in my_tuple], axis=1)
model.ds["sub_col_sigma_d_cl_strat"][:, :, :] = np.stack(
[x[5] for x in my_tuple], axis=1)
del my_tuple
else:
sub_q_array = model.ds["strat_q_subcolumns_%s" % hyd_type].values
_calc_other = lambda x: _calculate_other_observables(
x, total_hydrometeor, N_0, lambdas, model.num_subcolumns,
beta_p, alpha_p, v_tmp,
instrument.wavelength, instrument.K_w,
sub_q_array, hyd_type, p_diam)
if parallel:
print("Doing parallel radar calculation for %s" % hyd_type)
if chunk is None:
tt_bag = db.from_sequence(np.arange(0, Dims[1], 1))
my_tuple = tt_bag.map(_calc_other).compute()
else:
my_tuple = []
j = 0
while j < Dims[1]:
if j + chunk >= Dims[1]:
ind_max = Dims[1]
else:
ind_max = j + chunk
print("Stage 1 of 2: Processing columns %d-%d out of %d" % (j, ind_max, Dims[1]))
tt_bag = db.from_sequence(np.arange(j, ind_max, 1))
my_tuple += tt_bag.map(_calc_other).compute()
j += chunk
else:
my_tuple = [x for x in map(_calc_other, np.arange(0, Dims[1], 1))]
V_d_numer_tot += np.nan_to_num(np.stack([x[0] for x in my_tuple], axis=1))
moment_denom_tot += np.nan_to_num(np.stack([x[1] for x in my_tuple], axis=1))
hyd_ext = np.nan_to_num(np.stack([x[2] for x in my_tuple], axis=1))
model.ds["sub_col_Ze_%s_strat" % hyd_type][:, :, :] = np.stack([x[3] for x in my_tuple], axis=1)
model.ds["sub_col_Vd_%s_strat" % hyd_type][:, :, :] = np.stack([x[4] for x in my_tuple], axis=1)
model.ds["sub_col_sigma_d_%s_strat" % hyd_type][:, :, :] = np.stack([x[5] for x in my_tuple], axis=1)
if "sub_col_Ze_tot_strat" in model.ds.variables.keys():
model.ds["sub_col_Ze_tot_strat"] += model.ds["sub_col_Ze_%s_strat" % hyd_type].fillna(0)
else:
model.ds["sub_col_Ze_tot_strat"] = model.ds["sub_col_Ze_%s_strat" % hyd_type].fillna(0)
model.ds["sub_col_Vd_%s_strat" % hyd_type].attrs["long_name"] = \
"Mean Doppler velocity from stratiform %s hydrometeors" % hyd_type
model.ds["sub_col_Vd_%s_strat" % hyd_type].attrs["units"] = "m s-1"
model.ds["sub_col_Vd_%s_strat" % hyd_type].attrs["Processing method"] = method_str
model.ds["sub_col_sigma_d_%s_strat" % hyd_type].attrs["long_name"] = \
"Spectral width from stratiform %s hydrometeors" % hyd_type
model.ds["sub_col_sigma_d_%s_strat" % hyd_type].attrs["units"] = "m s-1"
model.ds["sub_col_sigma_d_%s_strat" % hyd_type].attrs["Processing method"] = method_str
model.ds["sub_col_Vd_tot_strat"] = xr.DataArray(V_d_numer_tot / moment_denom_tot,
dims=model.ds["sub_col_Ze_tot_strat"].dims)
print("Now calculating total spectral width (this may take some time)")
for hyd_type in hyd_types:
fits_ds = calc_mu_lambda(model, hyd_type, subcolumns=True, **kwargs).ds
N_0 = fits_ds["N_0"].values
lambdas = fits_ds["lambda"].values
mu = fits_ds["mu"].values
if np.logical_and(np.isin(hyd_type, ["ci", "pi"]), not mie_for_ice):
p_diam = instrument.scat_table[ice_lut][ice_diam_var].values
beta_p = instrument.scat_table[ice_lut]["beta_p"].values
alpha_p = instrument.scat_table[ice_lut]["alpha_p"].values
else:
p_diam = instrument.mie_table[hyd_type]["p_diam"].values
beta_p = instrument.mie_table[hyd_type]["beta_p"].values
alpha_p = instrument.mie_table[hyd_type]["alpha_p"].values
v_tmp = model.vel_param_a[hyd_type] * p_diam ** model.vel_param_b[hyd_type]
v_tmp = -v_tmp.magnitude
vel_param_a = model.vel_param_a
vel_param_b = model.vel_param_b
frac_names = model.strat_frac_names[hyd_type]
n_names = model.N_field[hyd_type]
total_hydrometeor = model.ds[frac_names] * model.ds[model.N_field[hyd_type]]
Vd_tot = model.ds["sub_col_Vd_tot_strat"].values
if hyd_type == "cl":
_calc_sigma_d_liq = lambda x: _calc_sigma_d_tot_cl(
x, N_0, lambdas, mu, instrument,
vel_param_a, vel_param_b, total_hydrometeor,
p_diam, Vd_tot, num_subcolumns)
if parallel:
if chunk is None:
tt_bag = db.from_sequence(np.arange(0, Dims[1], 1))
sigma_d_numer = tt_bag.map(_calc_sigma_d_liq).compute()
else:
sigma_d_numer = []
j = 0
while j < Dims[1]:
if j + chunk >= Dims[1]:
ind_max = Dims[1]
else:
ind_max = j + chunk
print("Stage 2 of 2: Processing columns %d-%d out of %d" % (j, ind_max, Dims[1]))
tt_bag = db.from_sequence(np.arange(j, ind_max, 1))
sigma_d_numer += tt_bag.map(_calc_sigma_d_liq).compute()
j += chunk
else:
sigma_d_numer = [x for x in map(_calc_sigma_d_liq, np.arange(0, Dims[1], 1))]
sigma_d_numer_tot = np.nan_to_num(np.stack([x[0] for x in sigma_d_numer], axis=1))
else:
sub_q_array = model.ds["strat_q_subcolumns_%s" % hyd_type].values
_calc_sigma = lambda x: _calc_sigma_d_tot(
x, num_subcolumns, v_tmp, N_0, lambdas, mu,
total_hydrometeor, Vd_tot, sub_q_array, p_diam, beta_p)
if parallel:
if chunk is None:
tt_bag = db.from_sequence(np.arange(0, Dims[1], 1))
sigma_d_numer = tt_bag.map(_calc_sigma).compute()
else:
sigma_d_numer = []
j = 0
while j < Dims[1]:
if j + chunk >= Dims[1]:
ind_max = Dims[1]
else:
ind_max = j + chunk
print("Stage 2 of 2: processing columns %d-%d out of %d" % (j, ind_max, Dims[1]))
tt_bag = db.from_sequence(np.arange(j, ind_max, 1))
sigma_d_numer += tt_bag.map(_calc_sigma).compute()
j += chunk
else:
sigma_d_numer = [x for x in map(_calc_sigma, np.arange(0, Dims[1], 1))]
sigma_d_numer_tot += np.nan_to_num(np.stack([x[0] for x in sigma_d_numer], axis=1))
model.ds = model.ds.drop_vars(("N_0", "lambda", "mu"))
model.ds["sub_col_sigma_d_tot_strat"] = xr.DataArray(np.sqrt(sigma_d_numer_tot / moment_denom_tot),
dims=model.ds["sub_col_Vd_tot_strat"].dims)
model = accumulate_attenuation(model, False, z_values, hyd_ext, atm_ext,
OD_from_sfc=OD_from_sfc, use_empiric_calc=False, **kwargs)
model.ds['sub_col_Vd_tot_strat'].attrs["long_name"] = \
"Mean Doppler velocity from all stratiform hydrometeors"
model.ds['sub_col_Vd_tot_strat'].attrs["units"] = "m s-1"
model.ds['sub_col_Vd_tot_strat'].attrs["Processing method"] = method_str
model.ds['sub_col_Vd_tot_strat'].attrs["Ice scattering database"] = scat_str
model.ds['sub_col_sigma_d_tot_strat'].attrs["long_name"] = \
"Spectral width from all stratiform hydrometeors"
model.ds['sub_col_sigma_d_tot_strat'].attrs["units"] = "m s-1"
model.ds["sub_col_sigma_d_tot_strat"].attrs["Processing method"] = method_str
model.ds["sub_col_sigma_d_tot_strat"].attrs["Ice scattering database"] = scat_str
return model
def calc_radar_moments(instrument, model, is_conv,
OD_from_sfc=True, hyd_types=None, parallel=True, chunk=None, mie_for_ice=False,
use_rad_logic=True, use_empiric_calc=False, **kwargs):
"""
Calculates the reflectivity, doppler velocity, and spectral width
in a given column for the given radar.
NOTE:
When starting a parallel task (in microphysics approach), it is recommended
to wrap the top-level python script calling the EMC^2 processing ('lines_of_code')
with the following command (just below the 'import' statements):
if __name__ == “__main__”:
lines_of_code
Parameters
----------
instrument: Instrument
The instrument to simulate. The instrument must be a radar.
model: Model
The model to generate the parameters for.
is_conv: bool
True if the cell is convective
z_field: str
The name of the altitude field to use.
OD_from_sfc: bool
If True, then calculate optical depth from the surface.
hyd_types: list or None
list of hydrometeor names to include in calcuation. using default Model subclass types if None.
parallel: bool
If True, then use parallelism to calculate each column quantity.
chunk: None or int
If using parallel processing, only send this number of time periods to the
parallel loop at one time. Sometimes Dask will crash if there are too many
tasks in the queue, so setting this value will help avoid that.
mie_for_ice: bool
If True, using full mie caculation LUTs. Otherwise, currently using the C6
scattering LUTs for 8-column aggregate at 270 K.
use_rad_logic: bool
When True using radiation scheme logic in calculations, which includes using
the cloud fraction fields utilized in a model radiative scheme, as well as bulk
scattering LUTs (effective radii dependent scattering variables). Otherwise, and
only in the stratiform case, using the microphysics scheme logic, which includes
the cloud fraction fields utilized by the model microphysics scheme and single
particle scattering LUTs.
NOTE: because of its single-particle calculation method, the microphysics
approach is significantly slower than the radiation approach. Also, the cloud
fraction logic in these schemes does not necessarilytly fully overlap.
use_empirical_calc: bool
When True using empirical relations from literature for the fwd calculations
(the cloud fraction still follows the scheme logic set by use_rad_logic).
Additonal keyword arguments are passed into
:py:func:`emc2.simulator.psd.calc_mu_lambda`.
:py:func:`emc2.simulator.lidar_moments.accumulate_attenuation`.
:py:func:`emc2.simulator.lidar_moments.calc_radar_empirical`.
:py:func:`emc2.simulator.lidar_moments.calc_radar_bulk`.
:py:func:`emc2.simulator.lidar_moments.calc_radar_micro`.
Returns
-------
model: :func:`emc2.core.Model`
The xarray Dataset containing the calculated radar moments.
"""
hyd_types = model.set_hyd_types(hyd_types)
if is_conv:
cloud_str = "conv"
cloud_str_full = "convective"
if np.logical_and(not use_empiric_calc, not use_rad_logic):
use_rad_logic = True # Force rad scheme logic if in conv scheme
else:
cloud_str = "strat"
cloud_str_full = "stratiform"
if use_empiric_calc:
scat_str = "Empirical (no utilized scattering database)"
elif mie_for_ice:
scat_str = "Mie"
else:
scat_str = "C6"
if not instrument.instrument_class.lower() == "radar":
raise ValueError("Instrument must be a radar!")
if "%s_q_subcolumns_cl" % cloud_str not in model.ds.variables.keys():
raise KeyError("Water mixing ratio in %s subcolumns must be generated first!" % cloud_str_full)
p_field = model.p_field
t_field = model.T_field
z_field = model.z_field
# Do unit conversions using pint - pressure in Pa, T in K, z in m
p_temp = model.ds[p_field].values * getattr(ureg, model.ds[p_field].attrs["units"])
p_values = p_temp.to('pascal').magnitude
t_temp = quantity(model.ds[t_field].values, model.ds[t_field].attrs["units"])
t_values = t_temp.to('celsius').magnitude
z_temp = model.ds[z_field].values * getattr(ureg, model.ds[z_field].attrs["units"])
z_values = z_temp.to('meter').magnitude
del p_temp, t_temp, z_temp
kappa_ds = calc_radar_atm_attenuation(instrument, model)
atm_ext = kappa_ds.ds["kappa_att"].values
t0 = time()
if use_empiric_calc:
print("Generating %s radar variables using empirical formulation" % cloud_str_full)
method_str = "Empirical"
model = calc_radar_empirical(instrument, model, is_conv, p_values, t_values, z_values,
atm_ext, OD_from_sfc=OD_from_sfc, hyd_types=hyd_types, **kwargs)
elif use_rad_logic:
print("Generating %s radar variables using radiation logic" % cloud_str_full)
method_str = "Bulk (radiation logic)"
model = calc_radar_bulk(instrument, model, is_conv, p_values, z_values,
atm_ext, OD_from_sfc=OD_from_sfc, mie_for_ice=mie_for_ice, hyd_types=hyd_types,
**kwargs)
else:
print("Generating %s radar variables using microphysics logic (slowest processing)" % cloud_str_full)
method_str = "LUTs (microphysics logic)"
calc_radar_micro(instrument, model, z_values,
atm_ext, OD_from_sfc=OD_from_sfc,
hyd_types=hyd_types, mie_for_ice=mie_for_ice,
parallel=parallel, chunk=chunk, **kwargs)
for hyd_type in hyd_types:
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)] = 10 * np.log10(
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)])
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].values = \
np.where(np.isinf(model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].values), np.nan,
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].values)
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)] = model.ds[
"sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].where(
np.isfinite(model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)]))
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].attrs["long_name"] = \
"Equivalent radar reflectivity factor from %s %s hydrometeors" % (cloud_str_full, hyd_type)
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].attrs["units"] = "dBZ"
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].attrs["Processing method"] = method_str
model.ds["sub_col_Ze_%s_%s" % (hyd_type, cloud_str)].attrs["Ice scattering database"] = scat_str
model.ds['sub_col_Ze_att_tot_%s' % cloud_str] = model.ds["sub_col_Ze_tot_%s" % cloud_str] * \
model.ds['hyd_ext_%s' % cloud_str].fillna(1) * model.ds['atm_ext'].fillna(1)
model.ds["sub_col_Ze_tot_%s" % cloud_str] = model.ds["sub_col_Ze_tot_%s" % cloud_str].where(
np.isfinite(model.ds["sub_col_Ze_tot_%s" % cloud_str]))
model.ds["sub_col_Ze_att_tot_%s" % cloud_str] = model.ds["sub_col_Ze_att_tot_%s" % cloud_str].where(
np.isfinite(model.ds["sub_col_Ze_att_tot_%s" % cloud_str]))
model.ds["sub_col_Ze_tot_%s" % cloud_str] = 10 * np.log10(model.ds["sub_col_Ze_tot_%s" % cloud_str])
model.ds["sub_col_Ze_att_tot_%s" % cloud_str] = 10 * np.log10(model.ds["sub_col_Ze_att_tot_%s" % cloud_str])
model.ds["sub_col_Ze_tot_%s" % cloud_str].values = \
np.where(np.isinf(model.ds["sub_col_Ze_tot_%s" % cloud_str].values), np.nan,
model.ds["sub_col_Ze_tot_%s" % cloud_str].values)
model.ds["sub_col_Ze_att_tot_%s" % cloud_str].values = \
np.where(np.isinf(model.ds["sub_col_Ze_att_tot_%s" % cloud_str].values), np.nan,
model.ds["sub_col_Ze_att_tot_%s" % cloud_str].values)
model.ds["sub_col_Ze_att_tot_%s" % cloud_str].attrs["long_name"] = \
"Attenuated equivalent radar reflectivity factor from all %s hydrometeors" % cloud_str_full
model.ds["sub_col_Ze_att_tot_%s" % cloud_str].attrs["units"] = "dBZ"
model.ds["sub_col_Ze_att_tot_%s" % cloud_str].attrs["Processing method"] = method_str
model.ds["sub_col_Ze_att_tot_%s" % cloud_str].attrs["Ice scattering database"] = scat_str
model.ds["sub_col_Ze_tot_%s" % cloud_str].attrs["long_name"] = \
"Equivalent radar reflectivity factor from all %s hydrometeors" % cloud_str_full
model.ds["sub_col_Ze_tot_%s" % cloud_str].attrs["units"] = "dBZ"
model.ds["sub_col_Ze_tot_%s" % cloud_str].attrs["Processing method"] = method_str
model.ds["sub_col_Ze_tot_%s" % cloud_str].attrs["Ice scattering database"] = scat_str
model.ds['hyd_ext_%s' % cloud_str].attrs["Processing method"] = method_str
model.ds['hyd_ext_%s' % cloud_str].attrs["Ice scattering database"] = scat_str
print("Done! total processing time = %.2fs" % (time() - t0))
return model
def _calc_sigma_d_tot_cl(tt, N_0, lambdas, mu, instrument,
vel_param_a, vel_param_b, total_hydrometeor,
p_diam, Vd_tot, num_subcolumns):
hyd_type = "cl"
Dims = Vd_tot.shape
sigma_d_numer = np.zeros((Dims[0], Dims[2]), dtype='float64')
moment_denom = np.zeros((Dims[0], Dims[2]), dtype='float64')
if tt % 50 == 0:
print('Stratiform moment for class cl progress: %d/%d' % (tt, total_hydrometeor.shape[1]))
num_diam = len(p_diam)
Dims = Vd_tot.shape
for k in range(Dims[2]):
if np.all(total_hydrometeor[tt, k] == 0):
continue
N_0_tmp = N_0[:, tt, k].astype('float64')
N_0_tmp, d_diam_tmp = np.meshgrid(N_0_tmp, p_diam)
lambda_tmp = lambdas[:, tt, k].astype('float64')
lambda_tmp, d_diam_tmp = np.meshgrid(lambda_tmp, p_diam)
mu_temp = mu[:, tt, k] * np.ones_like(lambda_tmp)
N_D = N_0_tmp * d_diam_tmp ** mu_temp * np.exp(-lambda_tmp * d_diam_tmp)
Calc_tmp = np.tile(
instrument.mie_table[hyd_type]["beta_p"].values,
(num_subcolumns, 1)) * N_D.T
moment_denom = np.trapz(Calc_tmp, x=p_diam, axis=1).astype('float64')
v_tmp = vel_param_a[hyd_type] * p_diam ** vel_param_b[hyd_type]
v_tmp = -v_tmp.magnitude.astype('float64')
Calc_tmp2 = (v_tmp - np.tile(Vd_tot[:, tt, k], (num_diam, 1)).T) ** 2 * Calc_tmp.astype('float64')
sigma_d_numer[:, k] = np.trapz(Calc_tmp2, x=p_diam, axis=1)
return sigma_d_numer, moment_denom
def _calc_sigma_d_tot(tt, num_subcolumns, v_tmp, N_0, lambdas, mu,
total_hydrometeor, vd_tot, sub_q_array, p_diam, beta_p):
Dims = vd_tot.shape
sigma_d_numer = np.zeros((Dims[0], Dims[2]), dtype='float64')
moment_denom = np.zeros((Dims[0], Dims[2]), dtype='float64')
num_diam = len(p_diam)
mu = mu.max()
if tt % 50 == 0:
print('Stratiform moment for class progress: %d/%d' % (tt, Dims[1]))
for k in range(Dims[2]):
if np.all(total_hydrometeor[tt, k] == 0):
continue
N_0_tmp = N_0[:, tt, k]
lambda_tmp = lambdas[:, tt, k]
if np.all(np.isnan(N_0_tmp)):
continue
N_D = []
for i in range(Dims[0]):
N_D.append(N_0_tmp[i] * p_diam ** mu * np.exp(-lambda_tmp[i] * p_diam))
N_D = np.stack(N_D, axis=1).astype('float64')
Calc_tmp = np.tile(beta_p, (num_subcolumns, 1)) * N_D.T
moment_denom = np.trapz(Calc_tmp, x=p_diam, axis=1).astype('float64')
Calc_tmp2 = (v_tmp - np.tile(vd_tot[:, tt, k], (num_diam, 1)).T) ** 2 * Calc_tmp.astype('float64')
Calc_tmp2 = np.trapz(Calc_tmp2, x=p_diam, axis=1)
sigma_d_numer[:, k] = np.where(sub_q_array[:, tt, k] == 0, 0, Calc_tmp2)
return sigma_d_numer, moment_denom
def _calculate_observables_liquid(tt, total_hydrometeor, N_0, lambdas, mu,
alpha_p, beta_p, v_tmp, num_subcolumns, instrument, p_diam):
height_dims = N_0.shape[2]
V_d_numer_tot = np.zeros((N_0.shape[0], height_dims))
V_d = np.zeros((N_0.shape[0], height_dims))
Ze = np.zeros_like(V_d)
sigma_d = np.zeros_like(V_d)
moment_denom_tot = np.zeros_like(V_d_numer_tot)
hyd_ext = np.zeros_like(V_d_numer_tot)
num_diam = len(p_diam)
if tt % 50 == 0:
print("Processing column %d" % tt)
np.seterr(all="ignore")
for k in range(height_dims):
if np.all(total_hydrometeor[tt, k] == 0):
continue
if num_subcolumns > 1:
N_0_tmp = np.squeeze(N_0[:, tt, k])
lambda_tmp = np.squeeze(lambdas[:, tt, k])
mu_temp = np.squeeze(mu[:, tt, k])
else:
N_0_tmp = N_0[:, tt, k]
lambda_tmp = lambdas[:, tt, k]
mu_temp = mu[:, tt, k]
if all([np.all(np.isnan(x)) for x in N_0_tmp]):
continue
N_D = []
for i in range(N_0_tmp.shape[0]):
N_D.append(N_0_tmp[i] * p_diam ** mu_temp[i] * np.exp(-lambda_tmp[i] * p_diam))
N_D = np.stack(N_D, axis=0)
Calc_tmp = beta_p * N_D
tmp_od = np.trapz(alpha_p * N_D, x=p_diam, axis=1)
moment_denom = np.trapz(Calc_tmp, x=p_diam, axis=1).astype('float64')
Ze[:, k] = \
(moment_denom * instrument.wavelength ** 4) / (instrument.K_w * np.pi ** 5) * 1e-6
Calc_tmp2 = v_tmp * Calc_tmp.astype('float64')
V_d_numer = np.trapz(Calc_tmp2, x=p_diam, axis=1)
V_d[:, k] = V_d_numer / moment_denom
Calc_tmp2 = (v_tmp - np.tile(V_d[:, k], (num_diam, 1)).T) ** 2 * Calc_tmp
sigma_d_numer = np.trapz(Calc_tmp2, x=p_diam, axis=1)
sigma_d[:, k] = np.sqrt(sigma_d_numer / moment_denom)
V_d_numer_tot[:, k] += V_d_numer
moment_denom_tot[:, k] += moment_denom
hyd_ext[:, k] += tmp_od
return V_d_numer_tot, moment_denom_tot, hyd_ext, Ze, V_d, sigma_d
def _calculate_other_observables(tt, total_hydrometeor, N_0, lambdas,
num_subcolumns, beta_p, alpha_p, v_tmp, wavelength,
K_w, sub_q_array, hyd_type, p_diam):
Dims = sub_q_array.shape
if tt % 50 == 0:
print('Stratiform moment for class %s progress: %d/%d' % (hyd_type, tt, Dims[1]))
Ze = np.zeros((num_subcolumns, Dims[2]))
V_d = np.zeros_like(Ze)
sigma_d = np.zeros_like(Ze)
V_d_numer_tot = np.zeros_like(Ze)
moment_denom_tot = np.zeros_like(Ze)
hyd_ext = np.zeros_like(Ze)
for k in range(Dims[2]):
if np.all(total_hydrometeor[tt, k] == 0):
continue
num_diam = len(p_diam)
N_D = []
for i in range(V_d.shape[0]):
N_0_tmp = N_0[i, tt, k]
lambda_tmp = lambdas[i, tt, k]
N_D.append(N_0_tmp * np.exp(-lambda_tmp * p_diam))
N_D = np.stack(N_D, axis=0)
Calc_tmp = np.tile(beta_p, (num_subcolumns, 1)) * N_D
tmp_od = np.tile(alpha_p, (num_subcolumns, 1)) * N_D
tmp_od = np.trapz(tmp_od, x=p_diam, axis=1)
tmp_od = np.where(sub_q_array[:, tt, k] == 0, 0, tmp_od)
moment_denom = np.trapz(Calc_tmp, x=p_diam, axis=1)
moment_denom = np.where(sub_q_array[:, tt, k] == 0, 0, moment_denom)
Ze[:, k] = \
(moment_denom * wavelength ** 4) / (K_w * np.pi ** 5) * 1e-6
Calc_tmp2 = Calc_tmp * v_tmp
V_d_numer = np.trapz(Calc_tmp2, axis=1, x=p_diam)
V_d_numer = np.where(sub_q_array[:, tt, k] == 0, 0, V_d_numer)
V_d[:, k] = V_d_numer / moment_denom
Calc_tmp2 = (v_tmp - np.tile(V_d[:, k], (num_diam, 1)).T) ** 2 * Calc_tmp
Calc_tmp2 = np.trapz(Calc_tmp2, axis=1, x=p_diam)
sigma_d_numer = np.where(sub_q_array[:, tt, k] == 0, 0, Calc_tmp2)
sigma_d[:, k] = np.sqrt(sigma_d_numer / moment_denom)
V_d_numer_tot[:, k] += V_d_numer
moment_denom_tot[:, k] += moment_denom
hyd_ext[:, k] += tmp_od
return V_d_numer_tot, moment_denom_tot, hyd_ext, Ze, V_d, sigma_d
|
{"hexsha": "0a5c69706d7949713bdbb39757af37c8ecba5896", "size": 47710, "ext": "py", "lang": "Python", "max_stars_repo_path": "emc2/simulator/radar_moments.py", "max_stars_repo_name": "columncolab/EMC2", "max_stars_repo_head_hexsha": "19dc8e25ebf477e1ad632d9e2d91692dcd80d3e4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-10-14T15:08:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T20:34:27.000Z", "max_issues_repo_path": "emc2/simulator/radar_moments.py", "max_issues_repo_name": "columncolab/EMC2", "max_issues_repo_head_hexsha": "19dc8e25ebf477e1ad632d9e2d91692dcd80d3e4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-01-10T16:30:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T20:17:45.000Z", "max_forks_repo_path": "emc2/simulator/radar_moments.py", "max_forks_repo_name": "columncolab/EMC2", "max_forks_repo_head_hexsha": "19dc8e25ebf477e1ad632d9e2d91692dcd80d3e4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-10-07T14:58:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-27T18:47:12.000Z", "avg_line_length": 48.8831967213, "max_line_length": 114, "alphanum_fraction": 0.6086355062, "include": true, "reason": "import numpy,from scipy", "num_tokens": 13166}
|
#!/usr/bin/env python
import argparse
import gzip
import re
import sys
from collections import Counter
from functools import partial
from itertools import zip_longest
import faiss
import networkx as nx
import numpy as np
from chinese_whispers import chinese_whispers, aggregate_clusters
from gensim.models import KeyedVectors
from utils import grouper
parser = argparse.ArgumentParser()
parser.add_argument('--neighbors', '-n', type=int, default=10)
parser.add_argument('--pickle', type=argparse.FileType('wb'))
parser.add_argument('words', type=argparse.FileType('rb'))
parser.add_argument('contexts', type=argparse.FileType('rb'))
parser.add_argument('relations', type=argparse.FileType('rb'))
parser.add_argument('triples', type=argparse.FileType('rb'))
args = parser.parse_args()
wordmodel = KeyedVectors.load_word2vec_format(args.words, binary=False)
contextmodel = KeyedVectors.load_word2vec_format(args.contexts, binary=False)
relationmodel = KeyedVectors.load_word2vec_format(args.relations, binary=False)
spos = set()
POS = r'\#\w+$'
extract = partial(re.sub, POS, '')
with gzip.open(args.triples) as f:
for line in f:
_, verb, subject, object = line.decode('utf-8').strip().split(' ', 3)
if verb in wordmodel and subject in contextmodel and object in relationmodel:
spos.add((verb, subject, object))
spos = list(spos)
index2triple = {}
X = np.empty((len(spos), wordmodel.vector_size + contextmodel.vector_size + relationmodel.vector_size), 'float32')
for i, (verb, subject, object) in enumerate(spos):
# This changes order from VSO to SVO because I use it everywhere.
j = 0
X[i, j:j + contextmodel.vector_size] = contextmodel[subject]
j += contextmodel.vector_size
X[i, j:j + wordmodel.vector_size] = wordmodel[verb]
j += wordmodel.vector_size
X[i, j:j + relationmodel.vector_size] = relationmodel[object]
index2triple[i] = (extract(subject), extract(verb), extract(object))
knn = faiss.IndexFlatIP(X.shape[1])
knn.add(X)
G, maximal_distance = nx.Graph(), -1
for slice in grouper(range(X.shape[0]), 2048):
slice = [j for j in slice if j is not None]
D, I = knn.search(X[slice, :], args.neighbors + 1)
last = min(slice)
print('%d / %d' % (last, X.shape[0]), file=sys.stderr)
for i, (_D, _I) in enumerate(zip(D, I)):
source = index2triple[last + i]
words = Counter()
for d, j in zip(_D.ravel(), _I.ravel()):
if last + i != j:
words[index2triple[j]] = float(d)
for target, distance in words.most_common(args.neighbors):
G.add_edge(source, target, weight=distance)
maximal_distance = distance if distance > maximal_distance else maximal_distance
for _, _, d in G.edges(data=True):
d['weight'] = maximal_distance / d['weight']
if args.pickle is not None:
import pickle
pickle.dump(list(G.edges(data=True)), args.pickle, protocol=3)
sys.exit(0)
chinese_whispers(G, weighting='top', iterations=20)
clusters = aggregate_clusters(G)
for label, cluster in sorted(aggregate_clusters(G).items(), key=lambda e: len(e[1]), reverse=True):
print('# Cluster %d\n' % label)
subjects = {subject for subject, _, _ in cluster}
predicates = {predicate for _, predicate, _ in cluster}
objects = {object for _, _, object in cluster}
print('Predicates: %s' % ', '.join(predicates))
print('Subjects: %s' % ', '.join(subjects))
print('Objects: %s\n' % ', '.join(objects))
|
{"hexsha": "3744a57a84991dd6606cc931be9d593a42805115", "size": 3498, "ext": "py", "lang": "Python", "max_stars_repo_path": "trihosg.py", "max_stars_repo_name": "uhh-lt/triframes", "max_stars_repo_head_hexsha": "1e87ad99d03055f652701ea2f8fea88a1786210d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-07-04T16:17:48.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-14T11:02:09.000Z", "max_issues_repo_path": "trihosg.py", "max_issues_repo_name": "uhh-lt/triframes", "max_issues_repo_head_hexsha": "1e87ad99d03055f652701ea2f8fea88a1786210d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-06-11T13:15:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-23T19:38:23.000Z", "max_forks_repo_path": "trihosg.py", "max_forks_repo_name": "uhh-lt/triframes", "max_forks_repo_head_hexsha": "1e87ad99d03055f652701ea2f8fea88a1786210d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-30T18:08:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T08:31:39.000Z", "avg_line_length": 31.8, "max_line_length": 114, "alphanum_fraction": 0.6883933676, "include": true, "reason": "import numpy,import networkx", "num_tokens": 897}
|
import cv2
import numpy as np
import torch
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.data import MetadataCatalog
from detectron2.data import transforms as T
from detectron2.modeling import build_model
from detectron2.utils.visualizer import ColorMode, GenericMask, Visualizer, _create_text_labels
from projects.IFC.ifc.data.dataset_mapper import build_augmentation
from visualizer import ClipVisualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
self.predictor = ClipPredictor(cfg)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions, images = self.predictor(image)
max_h, max_w = 0, 0
for image in images:
h, w = image.shape[:2]
max_h = h if max_h < h else max_h
max_w = w if max_w < w else max_w
concat_image = np.zeros((max_h, max_w * len(images), 3))
for i, image in enumerate(images):
h, w = image.shape[:2]
concat_image[:h, max_w*i:max_w*i+w, :] = image
# Convert image from OpenCV BGR format to Matplotlib RGB format.
concat_image = concat_image[:, :, ::-1]
visualizer = ClipVisualizer(concat_image, len(images), max_h, max_w, metadata=self.metadata, instance_mode=self.instance_mode)
instances = [p["instances"].to(self.cpu_device) for p in predictions]
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
class ClipPredictor:
def __init__(self, cfg):
self.cfg = cfg.clone() # cfg can be modified by model
self.model = build_model(self.cfg)
self.model.eval()
if len(cfg.DATASETS.TEST):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.frame_num = cfg.INPUT.SAMPLING_FRAME_NUM
self.aug = T.AugmentationList(
[T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)] +
build_augmentation(cfg, True)
)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
inputs = []
images = []
for _ in range(self.frame_num):
aug_input = T.AugInput(original_image)
transforms = self.aug(aug_input)
image = aug_input.image
#image = self.aug.get_transform(original_image).apply_image(original_image)
images.append(image[:, :, ::-1])
height, width = image.shape[:2]
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[None]
inputs.append({"image": image, "height": height, "width": width})
predictions = self.model(inputs)
return predictions, images
|
{"hexsha": "ed4e23b45720df872261c073a9e55f41611299cb", "size": 4426, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/IFC/demo/clip/predictor.py", "max_stars_repo_name": "sukjunhwang/IFC", "max_stars_repo_head_hexsha": "fb2ee4571dba4700eab3b52f10e147225b763e2a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2021-10-30T02:05:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:49:57.000Z", "max_issues_repo_path": "projects/IFC/demo/clip/predictor.py", "max_issues_repo_name": "sukjunhwang/IFC", "max_issues_repo_head_hexsha": "fb2ee4571dba4700eab3b52f10e147225b763e2a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-10T03:27:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T22:41:01.000Z", "max_forks_repo_path": "projects/IFC/demo/clip/predictor.py", "max_forks_repo_name": "sukjunhwang/IFC", "max_forks_repo_head_hexsha": "fb2ee4571dba4700eab3b52f10e147225b763e2a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-10-31T08:30:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T13:46:22.000Z", "avg_line_length": 36.8833333333, "max_line_length": 134, "alphanum_fraction": 0.6143244465, "include": true, "reason": "import numpy", "num_tokens": 980}
|
from warnings import warn
from numpy import asarray
from scipy.sparse import isspmatrix_csc, isspmatrix_csr, isspmatrix, \
SparseEfficiencyWarning, csc_matrix
import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
import umfpack
noScikit = True
isUmfpack = hasattr( umfpack, 'UMFPACK_OK' )
useUmfpack = True
__all__ = [ 'use_solver', 'spsolve', 'splu', 'spilu', 'factorized' ]
def use_solver( **kwargs ):
"""
Valid keyword arguments with defaults (other ignored)::
useUmfpack = True
assumeSortedIndices = False
The default sparse solver is umfpack when available. This can be changed by
passing useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
Umfpack requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
if isUmfpack:
umfpack.configure( **kwargs )
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b """
if isspmatrix( b ):
b = b.toarray()
if b.ndim > 1:
if max( b.shape ) == b.size:
b = b.squeeze()
else:
raise ValueError("rhs must be a vector (has shape %s)" % (b.shape,))
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires CSC or CSR matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
if M != b.size:
raise ValueError("matrix - rhs size mismatch (%s - %s)"
% (A.shape, b.size))
use_umfpack = use_umfpack and useUmfpack
if isUmfpack and use_umfpack:
if noScikit:
warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,'
' install scikits.umfpack instead', DeprecationWarning )
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
b = asarray(b, dtype=A.dtype).reshape(-1)
family = {'d' : 'di', 'D' : 'zi'}
umf = umfpack.UmfpackContext( family[A.dtype.char] )
return umf.linsolve( umfpack.UMFPACK_A, A, b,
autoTranspose = True )
else:
if isspmatrix_csc(A):
flag = 1 # CSC format
elif isspmatrix_csr(A):
flag = 0 # CSR format
else:
A = csc_matrix(A)
flag = 1
b = asarray(b, dtype=A.dtype)
options = dict(ColPerm=permc_spec)
return _superlu.gssv(N, A.nnz, A.data, A.indices, A.indptr, b, flag,
options=options)[0]
def splu(A, permc_spec=None, diag_pivot_thresh=None,
drop_tol=None, relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [SLU]_
drop_tol : float, optional
(deprecated) No effect.
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [SLU]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [SLU]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [SLU]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.dsolve._superlu.SciPyLUType
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [SLU] SuperLU http://crd.lbl.gov/~xiaoye/SuperLU/
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") #is this true?
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix A.
The resulting object is an approximation to the inverse of A.
Parameters
----------
A
Sparse matrix to factorize
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
milu : str, optional
Which version of modified ILU to use. (Choices: ``silu``,
``smilu_1``, ``smilu_2`` (default), ``smilu_3``.)
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.dsolve._superlu.SciPyLUType
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase ``fill_factor`` AND decrease ``drop_tol``.
This function uses the SuperLU library.
"""
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") #is this true?
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
return _superlu.gstrf(N, A.nnz, A.data, A.indices, A.indptr,
ilu=True, options=_options)
def factorized( A ):
"""
Return a fuction for solving a sparse linear system, with A pre-factorized.
Example:
solve = factorized( A ) # Makes LU decomposition.
x1 = solve( rhs1 ) # Uses the LU factors.
x2 = solve( rhs2 ) # Uses again the LU factors.
"""
if isUmfpack and useUmfpack:
if noScikit:
warn( 'scipy.sparse.linalg.dsolve.umfpack will be removed,'
' install scikits.umfpack instead', DeprecationWarning )
if not isspmatrix_csc(A):
A = csc_matrix(A)
warn('splu requires CSC matrix format', SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() #upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
family = {'d' : 'di', 'D' : 'zi'}
umf = umfpack.UmfpackContext( family[A.dtype.char] )
# Make LU decomposition.
umf.numeric( A )
def solve( b ):
return umf.solve( umfpack.UMFPACK_A, A, b, autoTranspose = True )
return solve
else:
return splu( A ).solve
|
{"hexsha": "73f3633ae2601e8a929e9c04a5a32dee8a647646", "size": 9090, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/sparse/linalg/dsolve/linsolve.py", "max_stars_repo_name": "mandli/scipy", "max_stars_repo_head_hexsha": "ce90df2874c39595ef69a586a3e7fdd9cb9b6f48", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-02-20T13:49:40.000Z", "max_stars_repo_stars_event_max_datetime": "2016-02-20T13:49:40.000Z", "max_issues_repo_path": "scipy/sparse/linalg/dsolve/linsolve.py", "max_issues_repo_name": "mandli/scipy", "max_issues_repo_head_hexsha": "ce90df2874c39595ef69a586a3e7fdd9cb9b6f48", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scipy/sparse/linalg/dsolve/linsolve.py", "max_forks_repo_name": "mandli/scipy", "max_forks_repo_head_hexsha": "ce90df2874c39595ef69a586a3e7fdd9cb9b6f48", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5806451613, "max_line_length": 82, "alphanum_fraction": 0.6178217822, "include": true, "reason": "from numpy,from scipy", "num_tokens": 2354}
|
#!/usr/bin/env python
import numpy as np
from scipy.spatial.distance import pdist, squareform
### For matlab interface
# scipy.io.loadmat and scipy.io.savemat
# Create the following array where each row is a point in 2D space:
# [[0 1]
# [1 0]
# [2 0]]
x = np.array([[0, 1], [1, 0], [2, 0]])
print x
# Compute the Euclidean distance between all rows of x.
# d[i, j] is the Euclidean distance between x[i, :] and x[j, :],
# and d is the following array:
# [[ 0. 1.41421356 2.23606798]
# [ 1.41421356 0. 1. ]
# [ 2.23606798 1. 0. ]]
d = squareform(pdist(x, 'euclidean'))
print d
|
{"hexsha": "ffeac8ee9e821b822e7f19bd551d0c69e8bc5870", "size": 630, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnn_python_tutorial/numpy/mat_sci.py", "max_stars_repo_name": "DeercoderPractice/python", "max_stars_repo_head_hexsha": "4a32cc8922f47baea390e8167e34f185f67ae0fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cnn_python_tutorial/numpy/mat_sci.py", "max_issues_repo_name": "DeercoderPractice/python", "max_issues_repo_head_hexsha": "4a32cc8922f47baea390e8167e34f185f67ae0fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnn_python_tutorial/numpy/mat_sci.py", "max_forks_repo_name": "DeercoderPractice/python", "max_forks_repo_head_hexsha": "4a32cc8922f47baea390e8167e34f185f67ae0fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2, "max_line_length": 67, "alphanum_fraction": 0.6047619048, "include": true, "reason": "import numpy,from scipy", "num_tokens": 215}
|
from sympy import *
from sympy.solvers.solveset import linsolve
p0 = Symbol("p0", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
p3 = Symbol("p3", real=True)
p4 = Symbol("p4", real=True)
p5 = Symbol("p5", real=True)
m = {
1: sympify("1 - 2 * 9 ** (-n) + (162 / 43) * (11 / 81) ** n + (96 / 43) * (2 / 3) ** n"),
2: sympify("1 - 8 * 9 ** (-n) + (992 / 43) * (11 / 81) ** n + (384 / 43) * (2 / 3) ** n"),
3: sympify("1 - 26 * 9 ** (-n) + (5202 / 43) * (11 / 81) ** n + (1248 / 43) * (2 / 3) ** n"),
4: sympify("1 - 80 * 9 ** (-n) + (26432 / 43) * (11 / 81) ** n + (3840 / 43) * (2 / 3) ** n"),
5: sympify("1 - 242 * 9 ** (-n) + (133122 / 43) * (11 / 81) ** n + (11616 / 43) * (2 / 3) ** n")
}
equations = [(0**p)*p0 + (1**p)*p1 + (2**p)*p2 + (3**p)*p3 + (4**p)*p4 + (5**p)*p5 - m[p] for p in (1, 2, 3, 4, 5)]
equations.append(p0 + p1 + p2 + p3 + p4 + p5 - 1)
solution = linsolve(equations, (p0, p1, p2, p3, p4, p5))
solution = list(solution)[0]
print([s for s in solution])
print([N(s) for s in solution])
print([limit(s, Symbol("n"), oo) for s in solution])
|
{"hexsha": "5d7bcc2f7fe0a8890f77f95c082936e0541a67bd", "size": 1105, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/hermann5_distribution.py", "max_stars_repo_name": "mmsbrggr/polar", "max_stars_repo_head_hexsha": "34348baf6992232e47cee7a4d56b5a96567c50b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-10-06T13:29:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-11T19:42:43.000Z", "max_issues_repo_path": "scripts/hermann5_distribution.py", "max_issues_repo_name": "mmsbrggr/polar", "max_issues_repo_head_hexsha": "34348baf6992232e47cee7a4d56b5a96567c50b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-26T15:58:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T13:47:28.000Z", "max_forks_repo_path": "scripts/hermann5_distribution.py", "max_forks_repo_name": "mmsbrggr/polar", "max_forks_repo_head_hexsha": "34348baf6992232e47cee7a4d56b5a96567c50b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-01T15:08:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T14:10:06.000Z", "avg_line_length": 38.1034482759, "max_line_length": 115, "alphanum_fraction": 0.4814479638, "include": true, "reason": "from sympy", "num_tokens": 514}
|
"""@author: Bryan Silverthorn <bcs@cargo-cult.org>"""
import os.path
import csv
import numpy
import sklearn
import condor
import borg
import borg.experiments.simulate_runs
logger = borg.get_logger(__name__, default_level = "INFO")
def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):
"""Simulate portfolio execution on a train/test split."""
train_data = all_data.masked(train_mask)
test_data = all_data.masked(test_mask)
if instances is not None:
ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]
train_data = train_data.filter(*ids)
if independent:
train_data = train_data.collect_independent(mixture).only_nonempty()
else:
train_data = train_data.collect_systematic(mixture).only_nonempty()
budget = test_data.common_budget
#budget = test_data.common_budget / 2 # XXX
suite = borg.fake.FakeSuite(test_data)
if maker.subname == "preplanning-dir":
model_kwargs = {"K": 64}
if "set_alpha" in maker.variants:
model_kwargs["alpha"] = 1e-2
else:
model_kwargs = {}
solver = maker(suite, train_data, model_kwargs = model_kwargs)
successes = []
for (i, instance_id) in enumerate(test_data.run_lists):
logger.info("simulating run %i/%i on %s", i, len(test_data), instance_id)
with suite.domain.task_from_path(instance_id) as instance:
with borg.accounting() as accountant:
answer = solver.start(instance).run_then_stop(budget)
succeeded = suite.domain.is_final(instance, answer)
logger.info(
"%s %s on %s (%.2f CPU s)",
maker.name,
"succeeded" if succeeded else "failed",
os.path.basename(instance),
accountant.total.cpu_seconds,
)
if succeeded:
successes.append(accountant.total.cpu_seconds)
logger.info(
"%s had %i successes over %i instances",
maker.name,
len(successes),
len(test_data),
)
description = "{0} ({1})".format(mixture, "Sep." if independent else "Sys.")
return (
description,
maker.name,
instances,
len(successes),
numpy.mean(successes),
numpy.median(successes),
)
@borg.annotations(
out_path = ("results CSV output path"),
runs = ("path to JSON runs specification", "positional", None, borg.util.load_json),
repeats = ("number of times to repeat each run", "option", None, int),
workers = ("submit jobs?", "option", "w"),
local = ("workers are local?", "flag"),
)
def main(out_path, runs, repeats = 128, workers = 0, local = False):
"""Simulate portfolio and solver behavior."""
logger.info("simulating %i runs", len(runs))
get_run_data = borg.util.memoize(borg.storage.RunData.from_bundle)
def yield_jobs():
for run in runs:
all_data = get_run_data(run["bundle"])
validation = sklearn.cross_validation.ShuffleSplit(len(all_data), repeats, test_fraction = 0.2, indices = False)
if run["portfolio_name"] == "-":
makers = map(borg.experiments.simulate_runs.SolverMaker, all_data.solver_names)
else:
makers = [borg.experiments.simulate_runs.PortfolioMaker(run["portfolio_name"])]
max_instances = len(all_data) * 0.8
for (train_mask, test_mask) in validation:
for instances in map(int, map(round, numpy.r_[10.0:max_instances:32j])):
for maker in makers:
yield (
simulate_run,
[
run,
maker,
all_data,
train_mask,
test_mask,
instances,
run["independent"],
run["mixture"],
],
)
with borg.util.openz(out_path, "wb") as out_file:
writer = csv.writer(out_file)
writer.writerow(["description", "solver", "instances", "successes", "mean_time", "median_time"])
for (_, row) in condor.do(yield_jobs(), workers, local):
writer.writerow(row)
out_file.flush()
if __name__ == "__main__":
borg.script(main)
|
{"hexsha": "535b13a09704e36ecc7e3745c34855aa0d9c81f8", "size": 4571, "ext": "py", "lang": "Python", "max_stars_repo_path": "borg/experiments/simulate_iid.py", "max_stars_repo_name": "borg-project/borg", "max_stars_repo_head_hexsha": "5140cff6c96de365b2eba9f07b7fc606b4b16c1b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-03-13T06:40:19.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-23T10:35:46.000Z", "max_issues_repo_path": "borg/experiments/simulate_iid.py", "max_issues_repo_name": "borg-project/borg", "max_issues_repo_head_hexsha": "5140cff6c96de365b2eba9f07b7fc606b4b16c1b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "borg/experiments/simulate_iid.py", "max_forks_repo_name": "borg-project/borg", "max_forks_repo_head_hexsha": "5140cff6c96de365b2eba9f07b7fc606b4b16c1b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-06-17T15:51:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-20T20:28:07.000Z", "avg_line_length": 33.3649635036, "max_line_length": 124, "alphanum_fraction": 0.5712098009, "include": true, "reason": "import numpy", "num_tokens": 972}
|
function [airway_mapped_image, airway_tree_root] = PTKMapAirwayCentrelineToImage(centreline_results, airway_image)
% PTKMapAirwayCentrelineToImage.
%
%
%
%
% Licence
% -------
% Part of the TD Pulmonary Toolkit. https://github.com/tomdoel/pulmonarytoolkit
% Author: Tom Doel, 2014. www.tomdoel.com
% Distributed under the GNU GPL v3 licence. Please see website for details.
%
airway_mapped_image_raw = zeros(airway_image.ImageSize, 'uint16');
airway_mapped_image = airway_image.BlankCopy;
airway_tree_root = centreline_results.AirwayCentrelineTree;
centreline_bronchi_to_do = CoreStack(airway_tree_root);
bronchus_index = uint16(1);
number_of_branches = airway_tree_root.CountBranches;
parent_map = cell(number_of_branches, 1);
child_map = cell(number_of_branches, 1);
% Assign a label to each centreline bronchus, and mark the label
% image with that index at each centreline voxel
while ~centreline_bronchi_to_do.IsEmpty
next_centreline_bronchus = centreline_bronchi_to_do.Pop;
voxels = PTKTreeUtilities.GetCentrelineVoxelsForTheseBranches(next_centreline_bronchus, airway_image);
airway_mapped_image_raw(voxels) = bronchus_index;
next_centreline_bronchus.BronchusIndex = bronchus_index;
% Add parent index to this branch, and add this branch index to
% parent's child indices
if ~isempty(next_centreline_bronchus.Parent)
parent = next_centreline_bronchus.Parent;
parent_index = parent.BronchusIndex;
parent_map{bronchus_index} = parent_index;
child_map{parent_index} = [child_map{parent_index}, bronchus_index];
end
centreline_bronchi_to_do.Push(next_centreline_bronchus.Children);
bronchus_index = bronchus_index + 1;
end
% Find the nearest centreline point for every voxel in the airway
% segmentation, and assign every voxel to that label
[~, nearest_centreline_index] = bwdist(airway_mapped_image_raw > 0);
airway_mapped_image_raw(:) = airway_mapped_image_raw(nearest_centreline_index(:));
airway_mapped_image_raw(airway_image.RawImage ~= 1) = 0;
airway_mapped_image.ChangeRawImage(airway_mapped_image_raw);
airway_mapped_image.ChangeColorLabelParentChildMap(parent_map, child_map)
end
|
{"author": "tomdoel", "repo": "pulmonarytoolkit", "sha": "09688a006d548fb85795df0338d1ed4f4a010fb9", "save_path": "github-repos/MATLAB/tomdoel-pulmonarytoolkit", "path": "github-repos/MATLAB/tomdoel-pulmonarytoolkit/pulmonarytoolkit-09688a006d548fb85795df0338d1ed4f4a010fb9/Library/Airways/PTKMapAirwayCentrelineToImage.m"}
|
# -*- coding: utf-8 -*-
"""
This module contains various tools used within Markov modeling / segmentation.
:author: Jean-Baptiste Courbot - www.jb-courbot.fr
:date: Feb 23, 2018
"""
import numpy as np
from numpy import cos
def phi_theta(a,b):
"""
Weighting function to account for orientation in Ising models.
:param float a: first parameter
:param float b: second parameter
:returns: *(float)*: abs(cos(a-b)).
"""
return np.abs(cos(a-b))
def gen_beta(vois, angle):
"""
Computation of the outputs of the weighting function given neighbor position
and values of V (angle).
:param ndarray vois: stack of neighbor number
:param ndarray angle: priviledged directions / values of V
:returns: **beta** *(ndarray)*: values generated in the lookuptable.
"""
# The following numbering is used :
#
# --------------
# y+1 | 6 | 5 | 4 |
# --------------
# y | 7 | | 3 |
# --------------
# y-1 | 0 | 1 | 2 |
# --------------
# x-1 | x | x+1
beta = np.ones_like(vois) ; #beta = beta.astype(float)
pi = np.pi
beta[(vois==3)+(vois==7)] = phi_theta(pi/2,angle)
beta[(vois==4)+(vois==0)] = phi_theta(3*pi/4.,angle)
beta[(vois==5)+(vois==1)] = phi_theta(0,angle)
beta[(vois==6)+(vois==2)] = phi_theta(pi/4.,angle)
if angle==0:
beta=np.ones_like(vois)
#beta[vois==-1] = 0.
if np.ndim(beta) ==3:
beta /= beta.sum(axis=2)[:,:,np.newaxis]#beta_sum
elif np.ndim(beta) == 1:
beta /=beta.sum()
return beta
def psi_ising(x_1,x_2,alpha):
""" Ising potential function
:param float x_1: first argument of the potential, eventually ndarray.
:param float x_2: second argument, eventually ndarray of the same size that x_1.
:param float alpha: granularity parameter
:returns: **res** *(ndarray)* - output of the potential, eventually ndarray.
"""
res = alpha * (1.-2.*(x_2==x_1))
return res
def init_champs(par):
""" Set a random intialization for the class field X.
:param parameter par: parameter set of the Gibbs sampling
:returns: **X_init** *(ndarray)* Initialization for X.
"""
X_init = np.random.choice(par.x_range,size=(par.S0,par.S1))
return X_init
def get_num_voisins(x,y,image):
""" Retrieving of local pixel neighborhood numbering, accounting for borders.
The following numbering is used :
y+1 | 6 | 5 | 4 |
y | 7 | | 3 |
y-1 | 0 | 1 | 2 |
x-1 | x | x+1
**Note :** by convention, non-existing neighbor are labeled '-1'.
:param float x: x-position of pixel in image
:param float y: y-position of pixel in image
:param ndarray image: concerned image, actually used for its size only.
:returns: **voisins** *(ndarray)* - set of neighbor number
"""
S0 = image.shape[0]-1
S1 = image.shape[1]-1
if x < S0 and x > 0 and y < S1 and y > 0 :
voisins = np.array([0,1,2,3,4,5,6,7])
if x == 0:
if y==0:
voisins = np.array([-1,-1,-1,3,4,5,-1,-1])
elif y==S1:
voisins = np.array([-1,1,2,3,-1,-1,-1,-1])
else:
voisins = np.array([-1,1,2,3,4,5,-1,-1])
elif x == S0:
if y == 0:
voisins=np.array([-1,-1,-1,-1,-1,5,6,7])
elif y == S1:
voisins = np.array([0,1,-1,-1,-1,-1,-1,7])
else:
voisins = np.array([0,1,-1,-1,-1,5,6,7])
elif y==0:
voisins = np.array([-1,-1,-1,3,4,5,6,7])
elif y == S1:
voisins = np.array([0,1,2,3,-1,-1,-1,7])
return voisins
def get_vals_voisins_tout(image):
"""
Retrieving of local pixel neighborhood values, accounting for borders.
The following numbering is used :
y+1 | 6 | 5 | 4 |
y | 7 | | 3 |
y-1 | 0 | 1 | 2 |
x-1 | x | x+1
Note that along borders, pixels values are duplicated.
:param ndarray image: concerned image, actually used for its size only.
:returns: **vals** *(ndarray)* - set of neighboring values aranges in (xdim, ydim, 9) array.
"""
S0 = image.shape[0]
S1 = image.shape[1]
vals = np.zeros(shape=(S0,S1,8)) # 8-voisinage
im = np.zeros(shape=(S0+2,S1+2)) # image with 1-px 0-padding
im[1:S0+1,1:S1+1] = image
# On duplique les bords :
im[0,:] = im[1,:]
im[-1,:] = im[-2,:]
im[:,0] = im[:,1]
im[:,-1] = im[:,-2]
vals[:,:,0] = im[0:S0, 0:S1]
vals[:,:,1] = im[1:S0+1, 0:S1]
vals[:,:,2] = im[2:S0+2, 0:S1]
vals[:,:,3] = im[2:S0+2, 1:S1+1]
vals[:,:,4] = im[2:S0+2, 2:S1+2]
vals[:,:,5] = im[1:S0+1, 2:S1+2]
vals[:,:,6] = im[0:S0, 2:S1+2]
vals[:,:,7] = im[0:S0, 1:S1+1]
return vals
|
{"hexsha": "89e2c44d37b394df9ebc1024d9266492e0756909", "size": 5115, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/fields_tools.py", "max_stars_repo_name": "courbot/otmf", "max_stars_repo_head_hexsha": "22015bb067c49c6b24645a4c0136bac2a7d40e95", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/fields_tools.py", "max_issues_repo_name": "courbot/otmf", "max_issues_repo_head_hexsha": "22015bb067c49c6b24645a4c0136bac2a7d40e95", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/fields_tools.py", "max_forks_repo_name": "courbot/otmf", "max_forks_repo_head_hexsha": "22015bb067c49c6b24645a4c0136bac2a7d40e95", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.575, "max_line_length": 98, "alphanum_fraction": 0.5073313783, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1614}
|
#include <string>
#include <iostream>
#include <algorithm>
#include <boost/lambda/lambda.hpp>
#include <boost/lambda/casts.hpp>
#include <ctime>
#include <cstdlib>
using namespace boost::lambda ;
struct MyRandomizer {
char operator( )( ) {
return static_cast<char>( rand( ) % 256 ) ;
}
} ;
std::string deleteControls ( std::string startstring ) {
std::string noControls( " " ) ;//creating space for
//the standard algorithm remove_copy_if
std::remove_copy_if( startstring.begin( ) , startstring.end( ) , noControls.begin( ) ,
ll_static_cast<int>( _1 ) < 32 && ll_static_cast<int>( _1 ) == 127 ) ;
return noControls ;
}
std::string deleteExtended( std::string startstring ) {
std::string noExtended ( " " ) ;//same as above
std::remove_copy_if( startstring.begin( ) , startstring.end( ) , noExtended.begin( ) ,
ll_static_cast<int>( _1 ) > 127 || ll_static_cast<int>( _1 ) < 32 ) ;
return noExtended ;
}
int main( ) {
std::string my_extended_string ;
for ( int i = 0 ; i < 40 ; i++ ) //we want the extended string to be 40 characters long
my_extended_string.append( " " ) ;
srand( time( 0 ) ) ;
std::generate_n( my_extended_string.begin( ) , 40 , MyRandomizer( ) ) ;
std::string no_controls( deleteControls( my_extended_string ) ) ;
std::string no_extended ( deleteExtended( my_extended_string ) ) ;
std::cout << "string with all characters: " << my_extended_string << std::endl ;
std::cout << "string without control characters: " << no_controls << std::endl ;
std::cout << "string without extended characters: " << no_extended << std::endl ;
return 0 ;
}
|
{"hexsha": "cfb93ad5385ef760ba711c63c6a0ec4d33d5b842", "size": 1710, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "lang/C++/strip-control-codes-and-extended-characters-from-a-string.cpp", "max_stars_repo_name": "ethansaxenian/RosettaDecode", "max_stars_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "lang/C++/strip-control-codes-and-extended-characters-from-a-string.cpp", "max_issues_repo_name": "ethansaxenian/RosettaDecode", "max_issues_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lang/C++/strip-control-codes-and-extended-characters-from-a-string.cpp", "max_forks_repo_name": "ethansaxenian/RosettaDecode", "max_forks_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 38.8636363636, "max_line_length": 93, "alphanum_fraction": 0.6333333333, "num_tokens": 442}
|
import numpy as np
from scipy.sparse import csc_matrix, diags
from scipy.sparse.linalg import splu
DOUBLE_EPS = 1e-14
SIZING_EPS = 1e-6
MIN_EDGE_LENGTH = 1e-2
MAX_RADIUS = 0.5
def axial_stiffness_matrix(L, A, E):
K = np.ones((2, 2))
K[0, 1] = -1.0
K[1, 0] = -1.0
L = max(L, MIN_EDGE_LENGTH) # nasty trick to deal with collapsed edges
K *= A * E / L
return K
def torsional_stiffness_matrix(L, J, G):
"""
Parameters
----------
L : [float]
bar element length
J : [float]
torsional constant, unit: length unit^4
In the case of circular, cylindrical shaft, it's equal to
the polar moment of inertia of the cross section.
G : [float]
modulus of rigidity
Return
------
K_tor_x : 2x2 numpy array
"""
return axial_stiffness_matrix(L, J, G)
def bending_stiffness_matrix(L, E, Iz, axis=2):
"""
Parameters
----------
L : [float]
element length
E : [float]
Young's modulus
Iz : [float]
moment of inertia of the section about the z axis
Iz = \int_A y^2 dA
axis: int
1 = local y axis , 2 = local z axis, default to 2
Return
------
K_bend_z : 4x4 numpy array
"""
K = np.zeros([4, 4])
sign = 1.0 if axis == 2 else -1.0
K[0, :] = np.array([12.0, sign * 6 * L, -12.0, sign * 6 * L])
K[1, :] = np.array([sign * 6 * L, 4 * (L ** 2), sign * -6 * L, 2 * (L ** 2)])
K[2, :] = -K[0, :]
K[3, :] = np.array([sign * 6 * L, 2 * (L ** 2), -sign * 6 * L, 4 * (L ** 2)])
K *= E * Iz / L ** 3
return K
def nu2G(nu, E):
return E / (2 * (1 + nu))
def local_element_stiffness_matrix(L, A, Jx, Iy, Iz, E, nu):
"""complete 12x12 stiffness matrix for a bisymmetrical member.
Since for small displacements the axial force effects, torsion,
and bending about each axis are uncoupled, the influence coeff
**relating** these effects are zero.
Parameters
----------
L : float
element length
A : float
cross section area
Jx : float
torsional constant
In the case of circular, cylindrical shaft, it's equal to
the polar moment of inertia of the cross section.
Iy : float
moment of inertia w.r.t. y
Iz : float
moment of inertia w.r.t. z
E : float
Young's modulus
nu : float
Poisson ratio
Returns
-------
K : 12x12 numpy array
"""
G = nu2G(nu, E)
# Fx1, Fx2 : u1, u2
# 0, 6 : 0, 6
axial_x_k = axial_stiffness_matrix(L, A, E)
# Mx1, Mx2 : \theta_x1, \theta_x2
# 3, 9 : 3, 9
tor_x_k = torsional_stiffness_matrix(L, Jx, G)
# Fy1, Mz1, Fy2, Mz2 : v1, \theta_z1, v2, \theta_z2
# 1, 5, 7, 11 : 1, 5, 7, 11
bend_z_k = bending_stiffness_matrix(L, E, Iz, axis=2)
# Fz1, My1, Fz2, My2 : v1, \theta_z1, v2, \theta_z2
# 2, 4, 8, 10 : 2, 4, 8, 10
bend_y_k = bending_stiffness_matrix(L, E, Iz, axis=1)
K = np.zeros([12, 12])
K[np.ix_([0, 6], [0, 6])] += axial_x_k
K[np.ix_([3, 9], [3, 9])] += tor_x_k
K[np.ix_([1, 5, 7, 11], [1, 5, 7, 11])] += bend_z_k
K[np.ix_([2, 4, 8, 10], [2, 4, 8, 10])] += bend_y_k
return K
def element_rotation_matrix(start_node_coords, end_node_coords):
L = np.linalg.norm(end_node_coords - start_node_coords)
c_x = (end_node_coords[0] - start_node_coords[0]) / L
c_y = (end_node_coords[1] - start_node_coords[1]) / L
c_z = (end_node_coords[2] - start_node_coords[2]) / L
R = np.array([[c_x, c_y, c_z, 0, 0, 0], [0, 0, 0, c_x, c_y, c_z]])
return R
def local_to_global_transformation_matrix(
start_node_coords, end_node_coords, rot_y2x=0.0
):
L = np.linalg.norm(start_node_coords - end_node_coords)
# by convention, the new x axis is along the element's direction
# directional cosine of the new x axis in the global world frame
c_x = (end_node_coords[0] - start_node_coords[0]) / L
c_y = (end_node_coords[1] - start_node_coords[1]) / L
R3 = np.zeros([3, 3])
c_z = (end_node_coords[2] - start_node_coords[2]) / L
# TODO rotaxis
if abs(abs(c_z) - 1.0) < DOUBLE_EPS:
# the element is parallel to global z axis
# cross product is not defined, in this case
# it's just a rotation about the global z axis
# in x-y plane
R3[0, 2] = -c_z
R3[1, 1] = 1
R3[2, 0] = c_z
else:
# local x_axis = element's vector
new_x = np.array([c_x, c_y, c_z])
# local y axis = cross product with global z axis
new_y = -np.cross(new_x, [0, 0, 1.0])
new_y /= np.linalg.norm(new_y)
new_z = np.cross(new_x, new_y)
R3[0, :] = new_x
R3[1, :] = new_y
R3[2, :] = new_z
R = np.zeros((12, 12))
for i in range(4):
R[i * 3 : (i + 1) * 3, i * 3 : (i + 1) * 3] = R3
return R
def global_element_stiffness_matrix(
A, Jx, Iy, Iz, E, nu, start_node_coords, end_node_coords
):
L = np.linalg.norm(end_node_coords - start_node_coords)
local_stiffness_matrix = local_element_stiffness_matrix(L, A, Jx, Iy, Iz, E, nu)
rotation_matrix = local_to_global_transformation_matrix(
start_node_coords, end_node_coords
)
global_Ke = (rotation_matrix.T.dot(local_stiffness_matrix)).dot(rotation_matrix)
return global_Ke
def assemble_global_stiffness_matrix(
nodes, edges, E, nu, areas, jxs, iys, izs, edge_dof_map, dof_per_node=6
):
n_dof = nodes.shape[0] * dof_per_node
dof_per_element = dof_per_node * 2
row = []
col = []
data = []
for e, e_id, area, jx, iy, iz in zip(edges, edge_dof_map, areas, jxs, iys, izs):
start_node_coords = nodes[e[0]]
end_node_coords = nodes[e[1]]
Ke = global_element_stiffness_matrix(
area, jx, iy, iz, E, nu, start_node_coords, end_node_coords
)
for i in range(dof_per_element):
for j in range(dof_per_element):
if abs(Ke[i, j]) > DOUBLE_EPS:
row.append(e_id[i])
col.append(e_id[j])
data.append(Ke[i, j])
Ksp = csc_matrix((data, (row, col)), shape=(n_dof, n_dof), dtype=float)
return Ksp
def node_id_to_dof_map(num_nodes, dof_per_node=6):
return np.arange(num_nodes * dof_per_node).reshape(-1, dof_per_node)
def edge_id_to_dof_map(edges, node_dof_map):
return np.hstack([node_dof_map[edges[:, 0]], node_dof_map[edges[:, 1]]])
def compute_permutation_matrix(
num_nodes, support_nodes: dict, node_dof_map, dof_per_node=6):
# support nodes is dict {node_index: dof_fixities e.g. [0,0,0]}
n_dofs = num_nodes * dof_per_node
fixities_dofs = np.zeros(n_dofs) # 0 means support is turned off
for node in support_nodes.keys():
dofs_indices = node_dof_map[node]
fixities_dofs[dofs_indices] = support_nodes[node]
# permutation map
fixity_filter = fixities_dofs == 0
n_fixed_dofs = np.int(np.sum(fixities_dofs))
n_free_dofs = n_dofs - n_fixed_dofs
dof_indices = np.arange(n_dofs)
id_map = np.zeros_like(dof_indices)
id_map[:n_free_dofs] = dof_indices[fixity_filter]
id_map[n_free_dofs:] = dof_indices[~fixity_filter]
# permutation matrix
perm_data = []
perm_row = []
perm_col = []
for i in range(n_dofs):
perm_row.append(i)
perm_col.append(id_map[i])
perm_data.append(1)
permutation_matrix = csc_matrix(
(perm_data, (perm_row, perm_col)), shape=(n_dofs, n_dofs)
)
return permutation_matrix, n_free_dofs
def partition_matrix(matrix_to_partition, permutation_matrix, split_index):
reordered = permutation_matrix * matrix_to_partition * permutation_matrix.T
return (
reordered[:split_index, :split_index],
reordered[split_index:, split_index:],
)
def partition_vector(vector_to_partition, permutation_matrix, split_index):
reordered = permutation_matrix * vector_to_partition
return (
reordered[:split_index],
reordered[split_index:],
)
def solve(
nodes,
edges,
loads,
supports,
E: float,
nu: float,
areas: np.ndarray,
jxs: np.ndarray,
iys: np.ndarray,
izs: np.ndarray,
dof_per_node=6,
):
"""
nodes: [[x_i,y_i,x_i]] node i
edges: [[i,j]] edges from node i to node j
loads: [[f_x_i, f_y_i, f_z_i]] load at node i
supports: {node_index:[0/1,0/1,0/1]} dict of supports
E: Young's modulus for all elements
mu: Poisson's ratio for all elements
areas: [a_i] Cross-sectional areas for all elements
jxs: [jx_i] torsion constants for all elements
iys: [iy_i] moments of inertia w.r.t. y for all elements
izs: [iz_i] moments of inertia w.r.t. z for all elements
Returns displacement vector for all nodes
"""
num_nodes = nodes.shape[0]
node_dof_map = node_id_to_dof_map(num_nodes)
edge_dof_map = edge_id_to_dof_map(edges, node_dof_map)
permutation_matrix, n_free_dofs = compute_permutation_matrix(
num_nodes, supports, node_dof_map
)
K = assemble_global_stiffness_matrix(
nodes, edges, E, nu, areas, jxs, iys, izs, edge_dof_map
)
K_free, K_fixed = partition_matrix(K, permutation_matrix, n_free_dofs)
F = loads.flatten()
F_free, F_fixed = partition_vector(F, permutation_matrix, n_free_dofs)
U_free = solve_stiffness_system(K_free, F_free)
U = np.zeros(num_nodes * dof_per_node)
U[:n_free_dofs] = U_free
return (permutation_matrix.T * U).reshape((-1, dof_per_node))
def solve_circular_section(
nodes,
edges,
loads,
supports,
E: float,
nu: float,
radii: np.ndarray,
dof_per_node=6,
):
areas = get_area_circle(radii)
jxs = get_torsional_constant_circle(radii)
izs = get_inertia_circle(radii)
iys = get_inertia_circle(radii)
return solve(
nodes,
edges,
loads,
supports,
E,
nu,
areas,
jxs,
iys,
izs,
dof_per_node=dof_per_node,
)
def solve_stiffness_system(K_free, F_free):
D_diag = 1 / np.sqrt(K_free.diagonal())
D = diags(D_diag, format="csc")
K_free_precond = (D.dot(K_free)).dot(
D
) # precondition stiffness matrix to improve numerical stability
K_LU = splu(K_free_precond, diag_pivot_thresh=0, options={"SymmetricMode": True})
U_free = K_LU.solve(D.dot(F_free))
return D_diag * U_free
def get_area_circle(radius):
return np.pi * radius ** 2
def get_inertia_circle(radius):
return np.pi * radius ** 4 / 4
def get_torsional_constant_circle(radius):
return np.pi * radius ** 4 / 2
if __name__ == "__main__":
node_dof_map = node_id_to_dof_map(4)
edges = [[0, 1], [1, 2], [2, 0], [3, 1], [3, 0], [2, 3]]
print(edge_id_to_dof_map(np.array(edges), node_dof_map))
|
{"hexsha": "685cfcded88d532ca3d7dd06352223f5c1cafca7", "size": 10880, "ext": "py", "lang": "Python", "max_stars_repo_path": "04/utils/analysis.py", "max_stars_repo_name": "danhaive/4.453x-resources", "max_stars_repo_head_hexsha": "789bcbac426bee12102fb7788f5b54dfa2773752", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-11T16:47:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T15:20:07.000Z", "max_issues_repo_path": "04/utils/analysis.py", "max_issues_repo_name": "danhaive/4.453x-resources", "max_issues_repo_head_hexsha": "789bcbac426bee12102fb7788f5b54dfa2773752", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "04/utils/analysis.py", "max_forks_repo_name": "danhaive/4.453x-resources", "max_forks_repo_head_hexsha": "789bcbac426bee12102fb7788f5b54dfa2773752", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7267759563, "max_line_length": 85, "alphanum_fraction": 0.6170955882, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3446}
|
# -*- coding: utf-8 -*-
import random
import gym
import numpy as np
from stable_baselines3.common.env_checker import check_env
from plan_opt.demand import Demand
from plan_opt.envs.rampup2 import LEGAL_CHANGES
def env_health(config, env=None, first_step=False, random_steps=0, verbose=0):
if env is None:
demand = Demand()
demand.generate_demand()
demand.add_sudden_change()
demand.data = np.around(demand.data)
demand.data = demand.data.astype("int32")
env = gym.make(config["ENV_ID"]).create(config, demand)
if verbose > 0:
a = env.observation_space.sample()
print("Observation Shape:", a.shape)
print("Observation Length:", len(env.obs_demand))
print("Observation Sample:\n", a)
check_env(env)
if first_step:
obs = env._set_initial_state(initial_state_status=3)
obs, reward, done, info = env.step(2)
print_step_details(env, obs, reward, done, info)
if random_steps > 0:
for i in range(random_steps):
print("Random step:\t", i)
a = env.reset()
action = random.sample(LEGAL_CHANGES[env.obs_last_legal_status], 1)[0]
obs, reward, done, info = env.step(action)
print_step_details(env, obs, reward, done, info)
def print_step_details(env, o, r, d, i):
"""Prints all available detail about a single step.
Action surroundings range from one timestep back and `attr`:horizon into the future.
Args:
env (RampupEnv): [description]
o (np.array): [description]
r (int): [description]
d (bool): [description]
i (Dict): [description]
"""
i_long = ""
for k, v in i.items():
i_long += "\n {:<25}{}".format(k, v)
print(
"Timestep:\t",
env.state_time,
# f"{env.state_time-1} -> {env.state_time}",
"\nAction:\t\t",
env.obs_last_legal_status,
"\nDemand:\t\t",
env.demand.data[env.state_time],
"\nReward:\t\t",
r,
"\nDone:\t\t",
d,
"\nInfo:\t\t",
i_long,
"\nShape:\t\t",
o.shape,
"\nObservation:\n",
o,
"\n",
)
|
{"hexsha": "a488215e8a64f7ffd04efa413f1750c6ca019ce8", "size": 2223, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plan_opt/env_health.py", "max_stars_repo_name": "sebas-seck/plan-opt", "max_stars_repo_head_hexsha": "bf95edc2c3609aea7572887097be0f2f75e19216", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plan_opt/env_health.py", "max_issues_repo_name": "sebas-seck/plan-opt", "max_issues_repo_head_hexsha": "bf95edc2c3609aea7572887097be0f2f75e19216", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plan_opt/env_health.py", "max_forks_repo_name": "sebas-seck/plan-opt", "max_forks_repo_head_hexsha": "bf95edc2c3609aea7572887097be0f2f75e19216", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1392405063, "max_line_length": 88, "alphanum_fraction": 0.5825461089, "include": true, "reason": "import numpy", "num_tokens": 568}
|
# Created by rahman at 14:51 2020-03-05 using PyCharm
import os
import random
import pandas as pd
import scipy
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
city = 'ny' #'ny'
DATAPATH = '../data/' + city + "/"
classifiers = {
'RF':(RandomForestClassifier, {"n_estimators": 101, "max_depth": 10}),
'GBM': (GradientBoostingClassifier,{'n_estimators':100, 'max_depth': 3}),
'AB':(AdaBoostClassifier, {"n_estimators": 101}),
'LR_SAG_L2penalty':(LogisticRegression, {'solver': 'sag'}),
'LR_liblinear_L2penalty': (LogisticRegression, {'solver': 'liblinear', 'penalty': 'l2'})}
def folder_setup(city):
'''setup folders for each city
Args:
city: city
Returns:
'''
if not os.path.exists('data/'+city):
os.mkdir('data/'+city)
if not os.path.exists('data/'+city+'/emb/'):
os.mkdir('data/'+city+'/emb/')
if not os.path.exists('data/'+city+'/feature/'):
os.mkdir('data/'+city+'/feature/')
#os.mkdir('data/'+city+'/process/')
if not os.path.exists('data/'+city+'/result/'):
os.mkdir('data/'+city+'/result/')
def isFriends(friends,a, b):
friends_a=friends[friends.u1==a].u2
return True if b in friends_a.values else False
def pickPairs(friends, i, SP, MAX_PAIRS,ulist):
'''picks friend and stranger pairs
Args:
friends: friends list (asymetric) [u1, u2]
i: iteration
SP: list of stranger pairs
MAX_PAIRS: number of existing friend pairs
ulist: randomly shuffled user list
Returns:
pairs: [u1,u2,label]
'''
#print " permutation", i
while len(ulist) >= 2:
a = ulist.pop()
b = ulist.pop()
if not isFriends(friends, a,b):
SP.append([a,b])
if len(SP)>=MAX_PAIRS:
return SP
else:
print ("friends found ", a,b)
return SP
def make_allPairs(pairsFile, u_list_file, DATAPATH, friendFile, makeStrangers):
'''gets friend and stranger pairs and writes to "clean_allPairs.csv"
Args:
friends: friends list (asymetric) [u1, u2] unordered pairs, duplicates exist
u_list_file: dataset from which to read uids
Returns:
pairs: [u1,u2,label]
'''
u_list = pd.read_csv(DATAPATH + u_list_file).index.values
friends = pd.read_csv(DATAPATH + friendFile)
# take only pairs {u1, u2} where u1<u2, because {u2, u1} also exist but is a duplicate
smallFriends=friends.loc[(friends.u1< friends.u2) & (friends.u1.isin(u_list))& (friends.u2.isin(u_list))].reset_index(drop=True)
smallFriends["label"] = 1
if makeStrangers:
MAX_PAIRS, SP = len(smallFriends.u1), []
#print MAX_PAIRS
i = 0
while len(SP) < MAX_PAIRS:
SP = pickPairs(friends, i, SP, MAX_PAIRS, random.sample(u_list, k=len(u_list)))
i += 1
#print SP
with open(DATAPATH + "strangers.csv", "wb") as f:
for pair in SP:
f.write(str(pair[0]) + ", " + str(pair[1]) + '\n')
strangers=pd.read_csv(DATAPATH+"strangers.csv", names=['u1', 'u2'])
strangers["label"]=0
allPairs=smallFriends.append(strangers, ignore_index=True)
#print "smallFriends.shape, strangers.shape", smallFriends.shape, strangers.shape, "allPairs.shape", allPairs.shape
assert(len(allPairs)==len(smallFriends)*2 == len(strangers)*2)
allPairs.to_csv(DATAPATH+ pairsFile)#, index=False)
return allPairs
def pair_construct(u_list, friendFile, downSample):
''' construct users pairs
Args:
u_list: user list
friends: file of DF of list of friends
pairFile: store here for future
downSample: Boolean True for word2vec features,
if False downsample later after calculation of overlap based features
Returns:
pair: u1, u2, label
'''
friends = pd.read_csv(DATAPATH + friendFile)
# positive i.e. friend pairs
pair_p = friends.loc[(friends.u1.isin(u_list)) & (friends.u2.isin(u_list))].copy()
# sampling negative pairs , i.e. strangers
pair_n = pd.DataFrame(pd.np.random.choice(u_list, 9 * pair_p.shape[0]), columns=['u1'])
pair_n['u2'] = pd.np.random.choice(u_list, 9 * pair_p.shape[0])
# remove dup user in pair
pair_n = pair_n.loc[pair_n.u1 != pair_n.u2]
# remove asymetric dups
pair_n = pair_n.loc[pair_n.u1 < pair_n.u2]
# remove dups
pair_n = pair_n.drop_duplicates().reset_index(drop=True)
# delete friends inside by setting the columns of the positive pairs to be indexes
pair_n = pair_n.loc[~pair_n.set_index(list(pair_n.columns)).index.isin(pair_p.set_index(list(pair_p.columns)).index)]
# now shuffle and reset the index
pair_n = pair_n.loc[pd.np.random.permutation(pair_n.index)].reset_index(drop=True)
if downSample:
pair_n = pair_n.loc[0:1 * pair_p.shape[0] - 1, :] # down sampling for emb features only
pair_p['label'] = 1
pair_n['label'] = 0
print ("pair_n.shape, pair_p.shape", pair_n.shape, pair_p.shape)
pair = pd.concat([pair_p, pair_n], ignore_index=True)
pair = pair.reset_index(drop=True)
return pair
|
{"hexsha": "872c3164b902b3c7e37aee45754fc82e7de2e325", "size": 5341, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/shared_tools/utils.py", "max_stars_repo_name": "tahleen-rahman/all2friends", "max_stars_repo_head_hexsha": "156ba257677def409661e8b68ccdfb1e896ba721", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/shared_tools/utils.py", "max_issues_repo_name": "tahleen-rahman/all2friends", "max_issues_repo_head_hexsha": "156ba257677def409661e8b68ccdfb1e896ba721", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-08T21:47:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:35:39.000Z", "max_forks_repo_path": "src/shared_tools/utils.py", "max_forks_repo_name": "tahleen-rahman/all2friends", "max_forks_repo_head_hexsha": "156ba257677def409661e8b68ccdfb1e896ba721", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6722222222, "max_line_length": 132, "alphanum_fraction": 0.6324658304, "include": true, "reason": "import scipy", "num_tokens": 1453}
|
import numpy as np
# try to import numba
# or define dummy decorator
try:
from numba import autojit
except:
def autojit(func):
return func
# util functions for network simulation
def smooth_trace(trace, scale):
scale = int(scale)
if scale == 1 or scale == 0:
return trace
slen = int(len(trace) / scale)
if slen == 0:
return trace
return np.array([np.mean(trace[i*scale:(i+1)*scale]) for i in xrange(slen)])
@autojit
def choose_k_from_n(n, k):
# use vaguely estimated metric of when sorting random numbers is better
if float(k) / float(n) > 0.125:
ans = np.argsort(np.random.rand(n))[:k]
return ans
nums = range(n)
swaps = (np.random.rand(k) * xrange(n, n - k, -1)).astype('int') + xrange(k)
for i in xrange(k):
# swap with some random element from here to end - these swap positions precalculated
nums[i], nums[swaps[i]] = nums[swaps[i]], nums[i]
ans = nums[:k]
return ans
def fixed_connectivity(n, k):
prelist = np.zeros(k * n, dtype = int)
postlist = np.zeros_like(prelist)
for j in xrange(n):
presynapses = choose_k_from_n(n, k)
prelist[j * k:(j + 1) * k] = presynapses
postlist[j * k:(j + 1) * k] = j * np.ones(k, dtype = int)
return prelist, postlist
|
{"hexsha": "f773d57aa74b4992ccc7a738ae5e36925acad880", "size": 1310, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/brian2/utils_net.py", "max_stars_repo_name": "caglorithm/stimulus_neural_populations", "max_stars_repo_head_hexsha": "58567901bed6f6bc17fc2975435138c33bb6be66", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-06-04T07:39:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T17:34:37.000Z", "max_issues_repo_path": "models/brian2/utils_net.py", "max_issues_repo_name": "caglorithm/stimulus_neural_populations", "max_issues_repo_head_hexsha": "58567901bed6f6bc17fc2975435138c33bb6be66", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/brian2/utils_net.py", "max_forks_repo_name": "caglorithm/stimulus_neural_populations", "max_forks_repo_head_hexsha": "58567901bed6f6bc17fc2975435138c33bb6be66", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-06-04T14:12:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T15:31:02.000Z", "avg_line_length": 31.9512195122, "max_line_length": 93, "alphanum_fraction": 0.6160305344, "include": true, "reason": "import numpy,import numba,from numba", "num_tokens": 378}
|
[STATEMENT]
lemma exL_exMap_lem:
fixes
f :: "Label -~> sterm" and
lz :: "Label -~> sterm" and f' :: "Label -~> sterm"
assumes "dom f = dom lz" and "dom f' = dom f"
shows
"\<forall>L1 L2. finite L1
\<longrightarrow> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. (the(f l)\<^bsup>[Fvar s, Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> (\<forall>z. (the(f l)\<^bsup>[Fvar s, Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> z)
\<longrightarrow> (\<exists>u. t \<Rightarrow>\<^sub>\<beta> u \<and> z \<Rightarrow>\<^sub>\<beta> u)))
\<and> the(f' l) = \<sigma>[s,p]t))
\<longrightarrow> finite L2
\<longrightarrow> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. the(f l)\<^bsup>[Fvar s, Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t \<and> the(lz l) = \<sigma>[s,p]t))
\<longrightarrow> (\<exists>L'. finite L'
\<and> (\<exists>lu. dom lu = dom f
\<and> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. (the(f' l)\<^bsup>[Fvar s, Fvar p]\<^esup>) \<Rightarrow>\<^sub>\<beta> t
\<and> the(lu l) = \<sigma>[s,p]t))
\<and> (\<forall>l\<in>dom f. body (the (f' l)))
\<and> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. (the(lz l)\<^bsup>[Fvar s, Fvar p]\<^esup>) \<Rightarrow>\<^sub>\<beta> t
\<and> the(lu l) = \<sigma>[s,p]t))
\<and> (\<forall>l\<in>dom f. body (the (lz l)))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (f' l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lz l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom f \<and> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (f' l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom f. body (the (f' l))) \<and> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (lz l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom f. body (the (lz l)))))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
dom f = dom lz
dom f' = dom f
goal (1 subgoal):
1. \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (f' l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (f l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lz l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom f \<and> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (f' l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom f. body (the (f' l))) \<and> (\<forall>l\<in>dom f. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (lz l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom f. body (the (lz l)))))
[PROOF STEP]
proof (induct rule: fmap_induct3)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom Map.empty \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None)) \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None))))
2. \<And>x a b c F1 F2 F3. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1\<rbrakk> \<Longrightarrow> \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
[PROOF STEP]
case empty
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom Map.empty \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None)) \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None))))
2. \<And>x a b c F1 F2 F3. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1\<rbrakk> \<Longrightarrow> \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom Map.empty \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None)) \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None))))
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the None = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom Map.empty \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None)) \<and> (\<forall>l\<in>dom Map.empty. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the None\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom Map.empty. body (the None))))
goal (1 subgoal):
1. \<And>x a b c F1 F2 F3. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1\<rbrakk> \<Longrightarrow> \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x a b c F1 F2 F3. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1\<rbrakk> \<Longrightarrow> \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
[PROOF STEP]
case (insert x a b c F1 F2 F3)
[PROOF STATE]
proof (state)
this:
\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l)))))
dom F1 = dom F2
dom F3 = dom F1
x \<notin> dom F1
goal (1 subgoal):
1. \<And>x a b c F1 F2 F3. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1\<rbrakk> \<Longrightarrow> \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l)))))
dom F1 = dom F2
dom F3 = dom F1
x \<notin> dom F1
goal (1 subgoal):
1. \<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
[PROOF STEP]
proof (intro strip)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
fix L1 :: "fVariable set" and L2 :: "fVariable set"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
fix
L :: "fVariable set" and
t :: sterm and F :: "Label -~> sterm" and
P :: "sterm \<Rightarrow> sterm \<Rightarrow> fVariable \<Rightarrow> fVariable \<Rightarrow> bool"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
assume
"dom F1 = dom F" and
*: "\<forall>l\<in>dom (F1(x \<mapsto> a)).
\<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
\<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p"
[PROOF STATE]
proof (state)
this:
dom F1 = dom F
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
hence
F: "\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
\<longrightarrow> P (the(F1 l)) (the(F l)) s p"
[PROOF STATE]
proof (prove)
using this:
dom F1 = dom F
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
goal (1 subgoal):
1. \<forall>l\<in>dom F1. \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
proof (intro strip)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
fix l :: Label and s :: fVariable and p :: fVariable
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
assume "l \<in> dom F1"
[PROOF STATE]
proof (state)
this:
l \<in> dom F1
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
hence "l \<in> dom (F1(x \<mapsto> a))"
[PROOF STATE]
proof (prove)
using this:
l \<in> dom F1
goal (1 subgoal):
1. l \<in> dom (F1(x \<mapsto> a))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
l \<in> dom (F1(x \<mapsto> a))
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
l \<in> dom (F1(x \<mapsto> a))
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
assume "s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p"
[PROOF STATE]
proof (state)
this:
s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
l \<in> dom (F1(x \<mapsto> a))
s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
[PROOF STEP]
have "P (the((F1(x \<mapsto> a)) l)) (the((F(x \<mapsto> t)) l)) s p"
[PROOF STATE]
proof (prove)
using this:
l \<in> dom (F1(x \<mapsto> a))
s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
goal (1 subgoal):
1. P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
[PROOF STEP]
using *
[PROOF STATE]
proof (prove)
using this:
l \<in> dom (F1(x \<mapsto> a))
s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
goal (1 subgoal):
1. P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
from \<open>x \<notin> dom F1\<close> \<open>l \<in> dom F1\<close>
[PROOF STATE]
proof (chain)
picking this:
x \<notin> dom F1
l \<in> dom F1
[PROOF STEP]
have "l \<noteq> x"
[PROOF STATE]
proof (prove)
using this:
x \<notin> dom F1
l \<in> dom F1
goal (1 subgoal):
1. l \<noteq> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
l \<noteq> x
goal (1 subgoal):
1. \<And>l s p. \<lbrakk>dom F1 = dom F; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p; l \<in> dom F1; s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p\<rbrakk> \<Longrightarrow> P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
l \<noteq> x
[PROOF STEP]
show "P (the(F1 l)) (the(F l)) s p"
[PROOF STATE]
proof (prove)
using this:
P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
l \<noteq> x
goal (1 subgoal):
1. P (the (F1 l)) (the (F l)) s p
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
P (the (F1 l)) (the (F l)) s p
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the (F1 l)) (the (F l)) s p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from *
[PROOF STATE]
proof (chain)
picking this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
[PROOF STEP]
have "\<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P a t s p"
[PROOF STATE]
proof (prove)
using this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the ((F1(x \<mapsto> a)) l)) (the ((F(x \<mapsto> t)) l)) s p
goal (1 subgoal):
1. \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P a t s p
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P a t s p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note this F
[PROOF STATE]
proof (state)
this:
\<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P a t s p
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L \<and> p \<notin> L \<and> s \<noteq> p \<longrightarrow> P (the (F1 l)) (the (F l)) s p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 (the ((F1(x \<mapsto> a)) l)) (the ((?F2(x \<mapsto> ?t2)) l)) s p\<rbrakk> \<Longrightarrow> \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 a ?t2 s p
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 (the ((F1(x \<mapsto> a)) l)) (the ((?F2(x \<mapsto> ?t2)) l)) s p\<rbrakk> \<Longrightarrow> \<forall>l\<in>dom F1. \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 (the (F1 l)) (the (?F2 l)) s p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note pred = this
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 (the ((F1(x \<mapsto> a)) l)) (the ((?F2(x \<mapsto> ?t2)) l)) s p\<rbrakk> \<Longrightarrow> \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 a ?t2 s p
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 (the ((F1(x \<mapsto> a)) l)) (the ((?F2(x \<mapsto> ?t2)) l)) s p\<rbrakk> \<Longrightarrow> \<forall>l\<in>dom F1. \<forall>s p. s \<notin> ?L2 \<and> p \<notin> ?L2 \<and> s \<noteq> p \<longrightarrow> ?P2 (the (F1 l)) (the (?F2 l)) s p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note
tmp =
pred[of _ L1 "(\<lambda>t t' s p.
\<exists>t''. (t\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t''
\<and> (\<forall>z. t\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> z \<longrightarrow> (\<exists>u. t'' \<Rightarrow>\<^sub>\<beta> u \<and> z \<Rightarrow>\<^sub>\<beta> u)))
\<and> t' = \<sigma>[s,p] t'')"]
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (a\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. a\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> ?t2 = \<sigma>[s,p] t'')
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the (?F2 l) = \<sigma>[s,p] t'')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note predc = tmp(1)
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (a\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. a\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> ?t2 = \<sigma>[s,p] t'')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note predF3 = tmp(2)
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the (?F2 l) = \<sigma>[s,p] t'')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note tmp = pred[of _ L2
"(\<lambda>t t' s p. \<exists>t''. t\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t'' \<and> t' = \<sigma>[s,p] t'')"]
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. a\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> ?t2 = \<sigma>[s,p] t'')
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the (?F2 l) = \<sigma>[s,p] t'')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note predb = tmp(1)
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. a\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> ?t2 = \<sigma>[s,p] t'')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note predF2 = tmp(2)
[PROOF STATE]
proof (state)
this:
\<lbrakk>dom F1 = dom ?F2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the ((?F2(x \<mapsto> ?t2)) l) = \<sigma>[s,p] t'')\<rbrakk> \<Longrightarrow> \<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the (?F2 l) = \<sigma>[s,p] t'')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
assume
a: "\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> z
\<longrightarrow> (\<exists>u. t \<Rightarrow>\<^sub>\<beta> u \<and> z \<Rightarrow>\<^sub>\<beta> u)))
\<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)" and
b: "\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)" and
"finite L1" and "finite L2"
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)
finite L1
finite L2
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from
diamond_binder[OF this(3) predc[OF sym[OF \<open>dom F3 = dom F1\<close>] this(1)]
this(4) predb[OF \<open>dom F1 = dom F2\<close> this(2)]]
[PROOF STATE]
proof (chain)
picking this:
\<exists>L'. finite L' \<and> (\<exists>t''. (\<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t'' = \<sigma>[s,p] u)) \<and> (\<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t'' = \<sigma>[s,p] u)))
[PROOF STEP]
obtain La t where
"finite La" and
pred_c: "\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p
\<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> u \<and> t = \<sigma>[s,p] u)" and
pred_b: "\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p
\<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> u \<and> t = \<sigma>[s,p] u)"
[PROOF STATE]
proof (prove)
using this:
\<exists>L'. finite L' \<and> (\<exists>t''. (\<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t'' = \<sigma>[s,p] u)) \<and> (\<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t'' = \<sigma>[s,p] u)))
goal (1 subgoal):
1. (\<And>La t. \<lbrakk>finite La; \<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u); \<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
finite La
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
finite La
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from this(1)
[PROOF STATE]
proof (chain)
picking this:
finite La
[PROOF STEP]
have "finite (La \<union> FV c \<union> FV b)"
[PROOF STATE]
proof (prove)
using this:
finite La
goal (1 subgoal):
1. finite (La \<union> FV c \<union> FV b)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
finite (La \<union> FV c \<union> FV b)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from exFresh_s_p_cof[OF this]
[PROOF STATE]
proof (chain)
picking this:
\<exists>s p. s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
[PROOF STEP]
obtain s p where
sp: "s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p"
[PROOF STATE]
proof (prove)
using this:
\<exists>s p. s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
goal (1 subgoal):
1. (\<And>s p. s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
with pred_c
[PROOF STATE]
proof (chain)
picking this:
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
[PROOF STEP]
obtain u where "c\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> u"
[PROOF STATE]
proof (prove)
using this:
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
goal (1 subgoal):
1. (\<And>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
c\<^bsup>[Fvar s,Fvar p]\<^esup> => u
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from par_beta_lc[OF this]
[PROOF STATE]
proof (chain)
picking this:
lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>) \<and> lc u
[PROOF STEP]
have "lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>)"
[PROOF STATE]
proof (prove)
using this:
lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>) \<and> lc u
goal (1 subgoal):
1. lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
with lc_body[of "c\<^bsup>[Fvar s,Fvar p]\<^esup>" s p] sp sclose_sopen_eq_t[of s c p 0]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>); s \<noteq> p\<rbrakk> \<Longrightarrow> body \<sigma>[s,p] (c\<^bsup>[Fvar s,Fvar p]\<^esup>)
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
\<lbrakk>s \<notin> FV c; p \<notin> FV c; s \<noteq> p\<rbrakk> \<Longrightarrow> {0 \<leftarrow> [s,p]} {0 \<rightarrow> [Fvar s,Fvar p]} c = c
lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>)
[PROOF STEP]
have c: "body c"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>); s \<noteq> p\<rbrakk> \<Longrightarrow> body \<sigma>[s,p] (c\<^bsup>[Fvar s,Fvar p]\<^esup>)
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
\<lbrakk>s \<notin> FV c; p \<notin> FV c; s \<noteq> p\<rbrakk> \<Longrightarrow> {0 \<leftarrow> [s,p]} {0 \<rightarrow> [Fvar s,Fvar p]} c = c
lc (c\<^bsup>[Fvar s,Fvar p]\<^esup>)
goal (1 subgoal):
1. body c
[PROOF STEP]
by (auto simp: closez_def openz_def)
[PROOF STATE]
proof (state)
this:
body c
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from sp pred_b
[PROOF STATE]
proof (chain)
picking this:
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
[PROOF STEP]
obtain u where "b\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> u"
[PROOF STATE]
proof (prove)
using this:
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
goal (1 subgoal):
1. (\<And>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
b\<^bsup>[Fvar s,Fvar p]\<^esup> => u
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from par_beta_lc[OF this]
[PROOF STATE]
proof (chain)
picking this:
lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>) \<and> lc u
[PROOF STEP]
have "lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>)"
[PROOF STATE]
proof (prove)
using this:
lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>) \<and> lc u
goal (1 subgoal):
1. lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
with lc_body[of "b\<^bsup>[Fvar s,Fvar p]\<^esup>" s p] sp sclose_sopen_eq_t[of s b p 0]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>); s \<noteq> p\<rbrakk> \<Longrightarrow> body \<sigma>[s,p] (b\<^bsup>[Fvar s,Fvar p]\<^esup>)
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
\<lbrakk>s \<notin> FV b; p \<notin> FV b; s \<noteq> p\<rbrakk> \<Longrightarrow> {0 \<leftarrow> [s,p]} {0 \<rightarrow> [Fvar s,Fvar p]} b = b
lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>)
[PROOF STEP]
have "body b"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>); s \<noteq> p\<rbrakk> \<Longrightarrow> body \<sigma>[s,p] (b\<^bsup>[Fvar s,Fvar p]\<^esup>)
s \<notin> La \<union> FV c \<union> FV b \<and> p \<notin> La \<union> FV c \<union> FV b \<and> s \<noteq> p
\<lbrakk>s \<notin> FV b; p \<notin> FV b; s \<noteq> p\<rbrakk> \<Longrightarrow> {0 \<leftarrow> [s,p]} {0 \<rightarrow> [Fvar s,Fvar p]} b = b
lc (b\<^bsup>[Fvar s,Fvar p]\<^esup>)
goal (1 subgoal):
1. body b
[PROOF STEP]
by (auto simp: closez_def openz_def)
[PROOF STATE]
proof (state)
this:
body b
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note c this
[PROOF STATE]
proof (state)
this:
body c
body b
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
body c
body b
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
note bodycb = this
[PROOF STATE]
proof (state)
this:
body c
body b
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from
predF3[OF sym[OF \<open>dom F3 = dom F1\<close>] a]
predF2[OF \<open>dom F1 = dom F2\<close> b]
\<open>finite L1\<close> \<open>finite L2\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t'')
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the (F2 l) = \<sigma>[s,p] t'')
finite L1
finite L2
[PROOF STEP]
have
"\<exists>L'. finite L'
\<and> (\<exists>lu. dom lu = dom F1
\<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> the (lu l) = \<sigma>[s,p] t))
\<and> (\<forall>l\<in>dom F1. body (the (F3 l)))
\<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> the (lu l) = \<sigma>[s,p] t))
\<and> (\<forall>l\<in>dom F1. body (the (F2 l))))"
[PROOF STATE]
proof (prove)
using this:
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t'' => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t'')
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t''. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t'' \<and> the (F2 l) = \<sigma>[s,p] t'')
finite L1
finite L2
goal (1 subgoal):
1. \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))
[PROOF STEP]
by (rule_tac x = L1 in allE[OF insert(1)], simp)
[PROOF STATE]
proof (state)
this:
\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))
[PROOF STEP]
obtain Lb f where
"finite Lb" and "dom f = dom F1" and
pred_F3: "\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> the (f l) = \<sigma>[s,p] t)" and
body_F3: "\<forall>l\<in>dom F1. body (the (F3 l))" and
pred_F2: "\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p
\<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t
\<and> the (f l) = \<sigma>[s,p] t)" and
body_F2: "\<forall>l\<in>dom F1. body (the (F2 l))"
[PROOF STATE]
proof (prove)
using this:
\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))
goal (1 subgoal):
1. (\<And>Lb f. \<lbrakk>finite Lb; dom f = dom F1; \<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t); \<forall>l\<in>dom F1. body (the (F3 l)); \<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t); \<forall>l\<in>dom F1. body (the (F2 l))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
finite Lb
dom f = dom F1
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t)
\<forall>l\<in>dom F1. body (the (F3 l))
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t)
\<forall>l\<in>dom F1. body (the (F2 l))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from \<open>finite La\<close> \<open>finite Lb\<close>
[PROOF STATE]
proof (chain)
picking this:
finite La
finite Lb
[PROOF STEP]
have "finite (La \<union> Lb)"
[PROOF STATE]
proof (prove)
using this:
finite La
finite Lb
goal (1 subgoal):
1. finite (La \<union> Lb)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
finite (La \<union> Lb)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
finite (La \<union> Lb)
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from \<open>dom f = dom F1\<close>
[PROOF STATE]
proof (chain)
picking this:
dom f = dom F1
[PROOF STEP]
have "dom (f(x \<mapsto> t)) = dom (F1(x \<mapsto> a))"
[PROOF STATE]
proof (prove)
using this:
dom f = dom F1
goal (1 subgoal):
1. dom (f(x \<mapsto> t)) = dom (F1(x \<mapsto> a))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
dom (f(x \<mapsto> t)) = dom (F1(x \<mapsto> a))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
dom (f(x \<mapsto> t)) = dom (F1(x \<mapsto> a))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from pred_c pred_F3
[PROOF STATE]
proof (chain)
picking this:
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t)
[PROOF STEP]
have
"\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p
\<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t'
\<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')"
[PROOF STATE]
proof (prove)
using this:
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. c\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t)
goal (1 subgoal):
1. \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from bodycb(1) body_F3
[PROOF STATE]
proof (chain)
picking this:
body c
\<forall>l\<in>dom F1. body (the (F3 l))
[PROOF STEP]
have "\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))"
[PROOF STATE]
proof (prove)
using this:
body c
\<forall>l\<in>dom F1. body (the (F3 l))
goal (1 subgoal):
1. \<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from pred_b pred_F2
[PROOF STATE]
proof (chain)
picking this:
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t)
[PROOF STEP]
have
"\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p
\<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t'
\<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')"
[PROOF STATE]
proof (prove)
using this:
\<forall>s p. s \<notin> La \<and> p \<notin> La \<and> s \<noteq> p \<longrightarrow> (\<exists>u. b\<^bsup>[Fvar s,Fvar p]\<^esup> => u \<and> t = \<sigma>[s,p] u)
\<forall>l\<in>dom F1. \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (f l) = \<sigma>[s,p] t)
goal (1 subgoal):
1. \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
from bodycb(2) body_F2
[PROOF STATE]
proof (chain)
picking this:
body b
\<forall>l\<in>dom F1. body (the (F2 l))
[PROOF STEP]
have "\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))"
[PROOF STATE]
proof (prove)
using this:
body b
\<forall>l\<in>dom F1. body (the (F2 l))
goal (1 subgoal):
1. \<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))
goal (1 subgoal):
1. \<And>L1 L2. \<lbrakk>\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the (F3 l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F1 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (F2 l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom F1 \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F3 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F3 l))) \<and> (\<forall>l\<in>dom F1. \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the (F2 l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom F1. body (the (F2 l))))); dom F1 = dom F2; dom F3 = dom F1; x \<notin> dom F1; finite L1; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t); finite L2; \<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)\<rbrakk> \<Longrightarrow> \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
finite (La \<union> Lb)
dom (f(x \<mapsto> t)) = dom (F1(x \<mapsto> a))
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))
[PROOF STEP]
show
"\<exists>L'. finite L'
\<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a))
\<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)).
\<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t'
\<and> the (lu l) = \<sigma>[s,p] t'))
\<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l)))
\<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)).
\<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p
\<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> \<Rightarrow>\<^sub>\<beta> t'
\<and> the (lu l) = \<sigma>[s,p] t'))
\<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))"
[PROOF STATE]
proof (prove)
using this:
finite (La \<union> Lb)
dom (f(x \<mapsto> t)) = dom (F1(x \<mapsto> a))
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))
\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> La \<union> Lb \<and> p \<notin> La \<union> Lb \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the ((f(x \<mapsto> t)) l) = \<sigma>[s,p] t')
\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))
goal (1 subgoal):
1. \<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the (lu l) = \<sigma>[s,p] t')) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the (lu l) = \<sigma>[s,p] t')) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
[PROOF STEP]
by (rule_tac x = "La \<union> Lb" in exI,
simp (no_asm_simp) only: conjI simp_thms(22),
rule_tac x = "(f(x \<mapsto> t))" in exI, simp)
[PROOF STATE]
proof (state)
this:
\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the (lu l) = \<sigma>[s,p] t')) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t'. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t' \<and> the (lu l) = \<sigma>[s,p] t')) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l))))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<forall>L1 L2. finite L1 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L1 \<and> p \<notin> L1 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. (the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> (\<forall>z. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => z \<longrightarrow> (\<exists>u. t => u \<and> z => u))) \<and> the ((F3(x \<mapsto> c)) l) = \<sigma>[s,p] t)) \<longrightarrow> finite L2 \<longrightarrow> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L2 \<and> p \<notin> L2 \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F1(x \<mapsto> a)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the ((F2(x \<mapsto> b)) l) = \<sigma>[s,p] t)) \<longrightarrow> (\<exists>L'. finite L' \<and> (\<exists>lu. dom lu = dom (F1(x \<mapsto> a)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F3(x \<mapsto> c)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F3(x \<mapsto> c)) l))) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). \<forall>s p. s \<notin> L' \<and> p \<notin> L' \<and> s \<noteq> p \<longrightarrow> (\<exists>t. the ((F2(x \<mapsto> b)) l)\<^bsup>[Fvar s,Fvar p]\<^esup> => t \<and> the (lu l) = \<sigma>[s,p] t)) \<and> (\<forall>l\<in>dom (F1(x \<mapsto> a)). body (the ((F2(x \<mapsto> b)) l)))))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 83677, "file": "Locally-Nameless-Sigma_Sigma_ParRed", "length": 111}
|
[STATEMENT]
lemma tensor_lookup:
assumes "\<And>is. is \<lhd> dims A \<Longrightarrow> lookup A is = e is"
shows "tensor_from_lookup (dims A) e = A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. tensor_from_lookup (dims A) e = A
[PROOF STEP]
using tensor_lookup_base lookup_def length_vec tensor_from_lookup_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>length ?v = prod_list ?ds; \<And>is. is \<lhd> ?ds \<Longrightarrow> lookup_base ?ds ?v is = ?e is\<rbrakk> \<Longrightarrow> tensor_vec_from_lookup ?ds ?e = ?v
lookup ?A = lookup_base (dims ?A) (vec ?A)
length (vec ?A) = prod_list (dims ?A)
tensor_from_lookup ?ds ?e = tensor_from_vec ?ds (tensor_vec_from_lookup ?ds ?e)
goal (1 subgoal):
1. tensor_from_lookup (dims A) e = A
[PROOF STEP]
by (metis assms tensor_from_vec_simp)
|
{"llama_tokens": 316, "file": "Deep_Learning_Tensor", "length": 2}
|
import os
import uuid
from shutil import copytree
from tempfile import gettempdir
from uuid import uuid4
from os.path import join
import sys
from unittest import TestCase
from aequilibrae import Project
from aequilibrae.paths import path_computation, Graph
from aequilibrae.paths.results import PathResults
from aequilibrae.utils.create_example import create_example
from ...data import triangle_graph_blocking
import numpy as np
# Adds the folder with the data to the path and collects the paths to the files
lib_path = os.path.abspath(os.path.join("..", "../tests"))
sys.path.append(lib_path)
origin = 5
dest = 13
class TestPathResults(TestCase):
def setUp(self) -> None:
self.project = create_example(join(gettempdir(), "test_set_pce_" + uuid4().hex))
self.project.network.build_graphs()
self.g = self.project.network.graphs["c"] # type: Graph
self.g.set_graph("free_flow_time")
self.g.set_blocked_centroid_flows(False)
self.matrix = self.project.matrices.get_matrix("demand_omx")
self.matrix.computational_view()
self.r = PathResults()
self.r.prepare(self.g)
def tearDown(self) -> None:
self.project.close()
self.matrix.close()
del self.r
def test_reset(self):
self.r.compute_path(dest, origin)
self.r.reset()
self.assertEqual(self.r.path, None, "Fail to reset the Path computation object")
self.assertEqual(self.r.path_nodes, None, "Fail to reset the Path computation object")
self.assertEqual(self.r.path_link_directions, None, "Fail to reset the Path computation object")
self.assertEqual(self.r.milepost, None, "Fail to reset the Path computation object")
self.assertEqual(self.r.predecessors.max(), -1, "Fail to reset the Path computation object")
self.assertEqual(self.r.predecessors.min(), -1, "Fail to reset the Path computation object")
self.assertEqual(self.r.connectors.max(), -1, "Fail to reset the Path computation object")
self.assertEqual(self.r.connectors.min(), -1, "Fail to reset the Path computation object")
if self.r.skims is not None:
self.assertEqual(self.r.skims.max(), np.inf, "Fail to reset the Path computation object")
self.assertEqual(self.r.skims.min(), np.inf, "Fail to reset the Path computation object")
new_r = PathResults()
with self.assertRaises(ValueError):
new_r.reset()
def test_compute_paths(self):
path_computation(5, 2, self.g, self.r)
self.assertEqual(list(self.r.path), [12, 14], "Path computation failed. Wrong sequence of links")
self.assertEqual(list(self.r.path_link_directions), [1, 1], "Path computation failed. Wrong link directions")
self.assertEqual(list(self.r.path_nodes), [5, 6, 2], "Path computation failed. Wrong sequence of path nodes")
self.assertEqual(list(self.r.milepost), [0, 4, 9], "Path computation failed. Wrong milepost results")
def test_compute_with_skimming(self):
r = PathResults()
self.g.set_skimming("free_flow_time")
r.prepare(self.g)
r.compute_path(origin, dest)
self.assertEqual(r.milepost[-1], r.skims[dest], "Skims computed wrong when computing path")
def test_update_trace(self):
self.r.compute_path(origin, 2)
self.r.update_trace(10)
self.assertEqual(list(self.r.path), [13, 25], "Path update failed. Wrong sequence of links")
self.assertEqual(list(self.r.path_link_directions), [1, 1], "Path update failed. Wrong link directions")
self.assertEqual(list(self.r.path_nodes), [5, 9, 10], "Path update failed. Wrong sequence of path nodes")
self.assertEqual(list(self.r.milepost), [0, 5, 8], "Path update failed. Wrong milepost results")
class TestBlockingTrianglePathResults(TestCase):
def setUp(self) -> None:
os.environ['PATH'] = os.path.join(gettempdir(), 'temp_data') + ';' + os.environ['PATH']
self.proj_dir = os.path.join(gettempdir(), uuid.uuid4().hex)
copytree(triangle_graph_blocking, self.proj_dir)
self.project = Project()
self.project.open(self.proj_dir)
self.project.network.build_graphs(modes=["c"])
self.g = self.project.network.graphs["c"] # type: Graph
self.g.set_graph("free_flow_time")
self.g.set_blocked_centroid_flows(True)
self.r = PathResults()
self.r.prepare(self.g)
def tearDown(self) -> None:
self.project.close()
del self.r
def test_compute_paths(self):
self.r.compute_path(1, 2)
self.assertEqual(list(self.r.path_nodes), [1, 3, 2])
self.assertEqual(list(self.r.path), [1, 2])
self.r.compute_path(2, 1)
self.assertEqual(list(self.r.path_nodes), [2, 1])
self.assertEqual(list(self.r.path), [3])
self.r.compute_path(3, 1)
self.assertEqual(list(self.r.path_nodes), [3, 2, 1])
self.assertEqual(list(self.r.path), [2, 3])
self.r.compute_path(3, 2)
self.assertEqual(list(self.r.path_nodes), [3, 2])
self.assertEqual(list(self.r.path), [2])
self.r.compute_path(1, 3)
self.assertEqual(list(self.r.path_nodes), [1, 3])
self.assertEqual(list(self.r.path), [1])
self.r.compute_path(2, 3)
self.assertEqual(list(self.r.path_nodes), [2, 1, 3])
self.assertEqual(list(self.r.path), [3, 1])
def test_compute_blocking_paths(self):
self.r.compute_path(4, 5)
self.assertEqual(list(self.r.path_nodes), [4, 1, 3, 2, 5])
self.assertEqual(list(self.r.path), [4, 1, 2, 5])
self.r.compute_path(5, 4)
self.assertEqual(list(self.r.path_nodes), [5, 2, 1, 4])
self.assertEqual(list(self.r.path), [5, 3, 4])
self.r.compute_path(6, 4)
self.assertEqual(list(self.r.path_nodes), [6, 3, 2, 1, 4])
self.assertEqual(list(self.r.path), [6, 2, 3, 4])
self.r.compute_path(6, 5)
self.assertEqual(list(self.r.path_nodes), [6, 3, 2, 5])
self.assertEqual(list(self.r.path), [6, 2, 5])
self.r.compute_path(4, 6)
self.assertEqual(list(self.r.path_nodes), [4, 1, 3, 6])
self.assertEqual(list(self.r.path), [4, 1, 6])
self.r.compute_path(5, 6)
self.assertEqual(list(self.r.path_nodes), [5, 2, 1, 3, 6])
self.assertEqual(list(self.r.path), [5, 3, 1, 6])
def test_update_trace(self):
self.r.compute_path(1, 2)
self.assertEqual(list(self.r.path_nodes), [1, 3, 2])
self.assertEqual(list(self.r.path), [1, 2])
self.r.update_trace(3)
self.assertEqual(list(self.r.path_nodes), [1, 3])
self.assertEqual(list(self.r.path), [1])
def test_update_blocking_trace(self):
self.r.compute_path(4, 5)
self.assertEqual(list(self.r.path_nodes), [4, 1, 3, 2, 5])
self.assertEqual(list(self.r.path), [4, 1, 2, 5])
self.r.update_trace(6)
self.assertEqual(list(self.r.path_nodes), [4, 1, 3, 6])
self.assertEqual(list(self.r.path), [4, 1, 6])
|
{"hexsha": "dbf6f07019c57530fe12105bb3e09e622e936b7a", "size": 7120, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/aequilibrae/paths/test_pathResults.py", "max_stars_repo_name": "Art-Ev/aequilibrae", "max_stars_repo_head_hexsha": "9f438278e09c875717779bfcc99bf7ba75ed1372", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2018-07-18T09:58:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:36:25.000Z", "max_issues_repo_path": "tests/aequilibrae/paths/test_pathResults.py", "max_issues_repo_name": "Art-Ev/aequilibrae", "max_issues_repo_head_hexsha": "9f438278e09c875717779bfcc99bf7ba75ed1372", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 197, "max_issues_repo_issues_event_min_datetime": "2018-06-30T07:01:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T06:30:43.000Z", "max_forks_repo_path": "tests/aequilibrae/paths/test_pathResults.py", "max_forks_repo_name": "Art-Ev/aequilibrae", "max_forks_repo_head_hexsha": "9f438278e09c875717779bfcc99bf7ba75ed1372", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2018-07-16T18:10:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T15:36:26.000Z", "avg_line_length": 40.2259887006, "max_line_length": 117, "alphanum_fraction": 0.6481741573, "include": true, "reason": "import numpy", "num_tokens": 1805}
|
import cv2
import numpy as np
from scipy import ndimage
import os
# https://docs.opencv.org/3.4.1/d7/d4d/tutorial_py_thresholding.html
th = 127
max_val = 255
# for color do not forget to convert BGR to RBG
import cv2
cameraCapture = cv2.VideoCapture(0)
fps = 30
size = (int(cameraCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cameraCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
while cameraCapture.isOpened():
_, frame = cameraCapture.read()
ret, o1 = cv2.threshold(frame, th, max_val, cv2.THRESH_BINARY) # 0 or max_value
ret, o2 = cv2.threshold(frame, th, max_val, cv2.THRESH_BINARY_INV)
ret, o3 = cv2.threshold(frame, th, max_val, cv2.THRESH_TOZERO) # keep as it is none concern pixel
ret, o4 = cv2.threshold(frame, th, max_val, cv2.THRESH_TOZERO_INV)
ret, o5 = cv2.threshold(frame, th, max_val, cv2.THRESH_TRUNC) # all pixel > threshhold => threshold
cv2.imshow('MyWindow', frame)
cv2.imshow("binary", o1)
cv2.imshow("binary_inv", o2)
cv2.imshow("tozero", o3)
cv2.imshow("tozero_inv", o4)
cv2.imshow("trunc", o5)
if cv2.waitKey(1) == 27:
break # esc to quit
cameraCapture.release()
cv2.destroyAllWindows()
|
{"hexsha": "383b20b37551e82d4643debb4540cedfe3c59f4a", "size": 1186, "ext": "py", "lang": "Python", "max_stars_repo_path": "introduction/13_camera_thresholding.py", "max_stars_repo_name": "Tenjin0/python-opencv-base", "max_stars_repo_head_hexsha": "b9732f24de688547b6d45b9d796d0ff458902874", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "introduction/13_camera_thresholding.py", "max_issues_repo_name": "Tenjin0/python-opencv-base", "max_issues_repo_head_hexsha": "b9732f24de688547b6d45b9d796d0ff458902874", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "introduction/13_camera_thresholding.py", "max_forks_repo_name": "Tenjin0/python-opencv-base", "max_forks_repo_head_hexsha": "b9732f24de688547b6d45b9d796d0ff458902874", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2380952381, "max_line_length": 104, "alphanum_fraction": 0.6998313659, "include": true, "reason": "import numpy,from scipy", "num_tokens": 362}
|
import unicornhat as uh
import time
import colorsys
import math
from random import randint
import numpy
uh.set_layout(uh.PHAT)
uh.rotation(90)
uh.brightness(0.4)
width,height=uh.get_shape()
### Many of these were created by pimoroni and can be found here: https://github.com/pimoroni/unicorn-hat/tree/master/examples
### I much appreciate their efforts. This script consolidates some of their animations and loops them
def getThisPartyStarted(speed = 2):
uh.rotation(0)
r, g, b = [255, 0, 0]
t = speed
for x in range(69):
uh.set_pixel( 2, 2, r, g, b )
uh.set_pixel( 2, 1, r, g, b )
uh.set_pixel( 3, 2, r, g, b )
uh.set_pixel( 3, 1, r, g, b )
uh.show()
time.sleep( t )
uh.set_pixel( 2, 2, 0, 0, 0 )
uh.set_pixel( 2, 1, 0, 0, 0 )
uh.set_pixel( 3, 2, 0, 0, 0 )
uh.set_pixel( 3, 1, 0, 0, 0 )
uh.show()
time.sleep( t )
if t > .02:
t *= .5
def pause(seconds = 1):
uh.clear()
uh.show()
time.sleep(seconds)
# This is my first ever script involving blinken lights
# I refactored it a bit from the original mess I wrote
def scanner(scans = 1, speed = 1):
uh.rotation(270)
r, g, b = [255, 0, 0]
for s in range(scans):
# speed = 1.0 / bpm * 60
time.sleep( 0.22 * speed )
for h in range(height + 1):
for w in range(width):
uh.set_pixel( w, h, r, g, b )
uh.show()
time.sleep( 0.024 * speed )
uh.clear()
# Yep. I wrote this too. THIN P A C M A N
def pacman(chomps, speed = 1):
uh.rotation(0)
r, g, b = [232, 239, 35]
t = speed * .125
pacman = [
[
[0,0],
[0,1],
[0,2],
[1,1],
[1,2],
[1,3],
[2,2],
[2,3],
[3,3],
[4,2],
[4,3],
[5,1],
[5,2],
[5,3],
[6,0],
[6,1],
[6,2],
],
[
[0,1],
[0,2],
[1,0],
[1,1],
[1,2],
[1,3],
[2,1],
[2,2],
[2,3],
[3,3],
[4,1],
[4,2],
[4,3],
[5,0],
[5,1],
[5,2],
[5,3],
[6,1],
[6,2],
],
[
[0,1],
[0,2],
[1,0],
[1,1],
[1,2],
[1,3],
[2,0],
[2,1],
[2,2],
[2,3],
[3,0],
[3,1],
[3,2],
[3,3],
[4,0],
[4,1],
[4,2],
[4,3],
[5,0],
[5,1],
[5,2],
[5,3],
[6,1],
[6,2],
]
]
for c in range(chomps):
for p in range(4):
m = p
if m > 2:
m = 1
for i in range(len(pacman[m])):
uh.set_pixel(pacman[m][i][0], pacman[m][i][1], r, g, b)
uh.show()
time.sleep( t )
uh.clear()
def blinker(blinks, color = [255, 0, 0], speed = 0.02):
uh.rotation(270)
for b in range(blinks):
r, g, b = color
uh.set_all(r, g, b)
uh.show()
time.sleep( speed )
uh.clear()
uh.show()
time.sleep( speed )
def spinners(spins, speed = 0.2):
uh.rotation(270)
r, g, b = [255, 255, 255]
uh.set_pixel( 1, 1, r, g, b )
uh.set_pixel( 1, 5, r, g, b )
uh.set_pixel( 1, 0, r, g, b )
uh.set_pixel( 0, 1, r, g, b )
uh.set_pixel( 2, 1, r, g, b )
uh.set_pixel( 1, 2, r, g, b )
uh.set_pixel( 1, 4, r, g, b )
uh.set_pixel( 0, 5, r, g, b )
uh.set_pixel( 2, 5, r, g, b )
uh.set_pixel( 1, 6, r, g, b )
for s in range(spins):
uh.set_pixel( 1, 1, r, g, b )
uh.set_pixel( 1, 5, r, g, b )
uh.set_pixel( 1, 0, r, g, b )
uh.set_pixel( 0, 1, r, g, b )
uh.set_pixel( 2, 1, r, g, b )
uh.set_pixel( 1, 2, r, g, b )
uh.set_pixel( 1, 4, r, g, b )
uh.set_pixel( 0, 5, r, g, b )
uh.set_pixel( 2, 5, r, g, b )
uh.set_pixel( 1, 6, r, g, b )
uh.show()
time.sleep( speed )
uh.clear()
uh.set_pixel( 1, 1, r, g, b )
uh.set_pixel( 1, 5, r, g, b )
uh.set_pixel( 0, 0, r, g, b )
uh.set_pixel( 2, 0, r, g, b )
uh.set_pixel( 0, 2, r, g, b )
uh.set_pixel( 2, 2, r, g, b )
uh.set_pixel( 0, 4, r, g, b )
uh.set_pixel( 2, 4, r, g, b )
uh.set_pixel( 0, 6, r, g, b )
uh.set_pixel( 2, 6, r, g, b )
uh.show()
time.sleep( speed )
uh.clear()
# for s in range(spins):
uh.show()
time.sleep( 4 )
uh.clear()
# spinners(40)
def chase(laps, speed = 0.04):
for lap in range(laps):
uh.rotation(0)
r, g, b = [0, 0, 255]
t = speed
for x in range(8):
uh.set_pixel( x, 2, r, g, b )
uh.set_pixel( x, 1, r, g, b )
uh.show()
time.sleep( t )
uh.clear()
for x in range(6,0,-1):
uh.set_pixel( x, 3, r, g, b )
uh.set_pixel( x, 4, r, g, b )
uh.show()
time.sleep( t )
uh.clear()
def larson(looks, speed = 0.12):
for l in range(looks):
uh.rotation(0)
r, g, b = [255, 0, 0]
t = speed
ease = speed * .125
for x in range(8):
uh.set_pixel( x, 2, r, g, b )
uh.set_pixel( x, 1, r, g, b )
uh.show()
time.sleep( t )
uh.clear()
if x < 4:
t -= ease
else:
t += ease
for x in range(6,0,-1):
uh.set_pixel( x, 1, r, g, b )
uh.set_pixel( x, 2, r, g, b )
uh.show()
time.sleep( t )
uh.clear()
if x < 4:
t += ease
else:
t -= ease
# This isn't used here, but can be used to display ASCII art
# You can see it in use in radtag.py
def billboard(text, color = [255, 0, 0], bgcolor = [0, 0, 0]):
uh.rotation(90)
i = -1
for k in range(len(text)):
r, g, b = color
rb, gb, bb = bgcolor
i = 0 if i>=100*len(text) else i+1 # avoid overflow
for h in range(height):
for w in range(width):
hPos = (i+h) % len(text)
chr = text[hPos][w]
if chr == ' ':
uh.set_pixel(w, h, rb, gb, bb)
else:
uh.set_pixel(w, h, r, g, b)
uh.show()
time.sleep(0.15)
# time.sleep(0.05)
# HACK THE PLANET
manifestotxt = ["01010100", "01101000", "01101001", "01110011", "00100000", "01101001", "01110011", "00100000", "01101111", "01110101", "01110010", "00100000", "01110111", "01101111", "01110010", "01101100", "01100100", "00100000", "01101110", "01101111", "01110111", "00101110", "00101110", "00101110", "00100000", "01110100", "01101000", "01100101", "00100000", "01110111", "01101111", "01110010", "01101100", "01100100", "00100000", "01101111", "01100110", "00100000", "01110100", "01101000", "01100101", "00100000", "01100101", "01101100", "01100101", "01100011", "01110100", "01110010", "01101111", "01101110", "00100000", "01100001", "01101110", "01100100", "00100000", "01110100", "01101000", "01100101", "00100000", "01110011", "01110111", "01101001", "01110100", "01100011", "01101000", "00101100", "00100000", "01110100", "01101000", "01100101", "00100000", "01100010", "01100101", "01100001", "01110101", "01110100", "01111001", "00100000", "01101111", "01100110", "00100000", "01110100", "01101000", "01100101", "00100000", "01100010", "01100001", "01110101", "01100100", "00101110", "00100000", "01010111", "01100101", "00100000", "01101101", "01100001", "01101011", "01100101", "00100000", "01110101", "01110011", "01100101", "00100000", "01101111", "01100110", "00100000", "01100001", "00100000", "01110011", "01100101", "01110010", "01110110", "01101001", "01100011", "01100101", "00100000", "01100001", "01101100", "01110010", "01100101", "01100001", "01100100", "01111001", "00100000", "01100101", "01111000", "01101001", "01110011", "01110100", "01101001", "01101110", "01100111", "00100000", "01110111", "01101001", "01110100", "01101000", "01101111", "01110101", "01110100", "00100000", "01110000", "01100001", "01111001", "01101001", "01101110", "01100111", "00100000", "01100110", "01101111", "01110010", "00100000", "01110111", "01101000", "01100001", "01110100", "00100000", "01100011", "01101111", "01110101", "01101100", "01100100", "00100000", "01100010", "01100101", "00100000", "01100100", "01101001", "01110010", "01110100", "00101101", "01100011", "01101000", "01100101", "01100001", "01110000", "00100000", "01101001", "01100110", "00100000", "01101001", "01110100", "00100000", "01110111", "01100001", "01110011", "01101110", "00100111", "01110100", "00100000", "01110010", "01110101", "01101110", "00100000", "01100010", "01111001", "00100000", "01110000", "01110010", "01101111", "01100110", "01101001", "01110100", "01100101", "01100101", "01110010", "01101001", "01101110", "01100111", "00100000", "01100111", "01101100", "01110101", "01110100", "01110100", "01101111", "01101110", "01110011", "00101100", "00100000", "01100001", "01101110", "01100100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101100", "01101100", "00100000", "01110101", "01110011", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "01110011", "00101110", "00100000", "01010111", "01100101", "00100000", "01100101", "01111000", "01110000", "01101100", "01101111", "01110010", "01100101", "00101110", "00101110", "00101110", "00100000", "01100001", "01101110", "01100100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101100", "01101100", "00100000", "01110101", "01110011", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "01110011", "00101110", "00100000", "01010111", "01100101", "00100000", "01110011", "01100101", "01100101", "01101011", "00100000", "01100001", "01100110", "01110100", "01100101", "01110010", "00100000", "01101011", "01101110", "01101111", "01110111", "01101100", "01100101", "01100100", "01100111", "01100101", "00101110", "00101110", "00101110", "00100000", "01100001", "01101110", "01100100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101100", "01101100", "00100000", "01110101", "01110011", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "01110011", "00101110", "00100000", "01010111", "01100101", "00100000", "01100101", "01111000", "01101001", "01110011", "01110100", "00100000", "01110111", "01101001", "01110100", "01101000", "01101111", "01110101", "01110100", "00100000", "01110011", "01101011", "01101001", "01101110", "00100000", "01100011", "01101111", "01101100", "01101111", "01110010", "00101100", "00100000", "01110111", "01101001", "01110100", "01101000", "01101111", "01110101", "01110100", "00100000", "01101110", "01100001", "01110100", "01101001", "01101111", "01101110", "01100001", "01101100", "01101001", "01110100", "01111001", "00101100", "00100000", "01110111", "01101001", "01110100", "01101000", "01101111", "01110101", "01110100", "00100000", "01110010", "01100101", "01101100", "01101001", "01100111", "01101001", "01101111", "01110101", "01110011", "00100000", "01100010", "01101001", "01100001", "01110011", "00101110", "00101110", "00101110", "00100000", "01100001", "01101110", "01100100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101100", "01101100", "00100000", "01110101", "01110011", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "01110011", "00101110", "00100000", "01011001", "01101111", "01110101", "00100000", "01100010", "01110101", "01101001", "01101100", "01100100", "00100000", "01100001", "01110100", "01101111", "01101101", "01101001", "01100011", "00100000", "01100010", "01101111", "01101101", "01100010", "01110011", "00101100", "00100000", "01111001", "01101111", "01110101", "00100000", "01110111", "01100001", "01100111", "01100101", "00100000", "01110111", "01100001", "01110010", "01110011", "00101100", "00100000", "01111001", "01101111", "01110101", "00100000", "01101101", "01110101", "01110010", "01100100", "01100101", "01110010", "00101100", "00100000", "01100011", "01101000", "01100101", "01100001", "01110100", "00101100", "00100000", "01100001", "01101110", "01100100", "00100000", "01101100", "01101001", "01100101", "00100000", "01110100", "01101111", "00100000", "01110101", "01110011", "00100000", "01100001", "01101110", "01100100", "00100000", "01110100", "01110010", "01111001", "00100000", "01110100", "01101111", "00100000", "01101101", "01100001", "01101011", "01100101", "00100000", "01110101", "01110011", "00100000", "01100010", "01100101", "01101100", "01101001", "01100101", "01110110", "01100101", "00100000", "01101001", "01110100", "00100111", "01110011", "00100000", "01100110", "01101111", "01110010", "00100000", "01101111", "01110101", "01110010", "00100000", "01101111", "01110111", "01101110", "00100000", "01100111", "01101111", "01101111", "01100100", "00101100", "00100000", "01111001", "01100101", "01110100", "00100000", "01110111", "01100101", "00100111", "01110010", "01100101", "00100000", "01110100", "01101000", "01100101", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "01110011", "00101110", "00001101", "00001010", "00001101", "00001010", "01011001", "01100101", "01110011", "00101100", "00100000", "01001001", "00100000", "01100001", "01101101", "00100000", "01100001", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "00101110", "00100000", "01001101", "01111001", "00100000", "01100011", "01110010", "01101001", "01101101", "01100101", "00100000", "01101001", "01110011", "00100000", "01110100", "01101000", "01100001", "01110100", "00100000", "01101111", "01100110", "00100000", "01100011", "01110101", "01110010", "01101001", "01101111", "01110011", "01101001", "01110100", "01111001", "00101110", "00100000", "01001101", "01111001", "00100000", "01100011", "01110010", "01101001", "01101101", "01100101", "00100000", "01101001", "01110011", "00100000", "01110100", "01101000", "01100001", "01110100", "00100000", "01101111", "01100110", "00100000", "01101010", "01110101", "01100100", "01100111", "01101001", "01101110", "01100111", "00100000", "01110000", "01100101", "01101111", "01110000", "01101100", "01100101", "00100000", "01100010", "01111001", "00100000", "01110111", "01101000", "01100001", "01110100", "00100000", "01110100", "01101000", "01100101", "01111001", "00100000", "01110011", "01100001", "01111001", "00100000", "01100001", "01101110", "01100100", "00100000", "01110100", "01101000", "01101001", "01101110", "01101011", "00101100", "00100000", "01101110", "01101111", "01110100", "00100000", "01110111", "01101000", "01100001", "01110100", "00100000", "01110100", "01101000", "01100101", "01111001", "00100000", "01101100", "01101111", "01101111", "01101011", "00100000", "01101100", "01101001", "01101011", "01100101", "00101110", "00100000", "01001101", "01111001", "00100000", "01100011", "01110010", "01101001", "01101101", "01100101", "00100000", "01101001", "01110011", "00100000", "01110100", "01101000", "01100001", "01110100", "00100000", "01101111", "01100110", "00100000", "01101111", "01110101", "01110100", "01110011", "01101101", "01100001", "01110010", "01110100", "01101001", "01101110", "01100111", "00100000", "01111001", "01101111", "01110101", "00101100", "00100000", "01110011", "01101111", "01101101", "01100101", "01110100", "01101000", "01101001", "01101110", "01100111", "00100000", "01110100", "01101000", "01100001", "01110100", "00100000", "01111001", "01101111", "01110101", "00100000", "01110111", "01101001", "01101100", "01101100", "00100000", "01101110", "01100101", "01110110", "01100101", "01110010", "00100000", "01100110", "01101111", "01110010", "01100111", "01101001", "01110110", "01100101", "00100000", "01101101", "01100101", "00100000", "01100110", "01101111", "01110010", "00101110", "00001101", "00001010", "00001101", "00001010", "01001001", "00100000", "01100001", "01101101", "00100000", "01100001", "00100000", "01101000", "01100001", "01100011", "01101011", "01100101", "01110010", "00101100", "00100000", "01100001", "01101110", "01100100", "00100000", "01110100", "01101000", "01101001", "01110011", "00100000", "01101001", "01110011", "00100000", "01101101", "01111001", "00100000", "01101101", "01100001", "01101110", "01101001", "01100110", "01100101", "01110011", "01110100", "01101111", "00101110", "00100000", "01011001", "01101111", "01110101", "00100000", "01101101", "01100001", "01111001", "00100000", "01110011", "01110100", "01101111", "01110000", "00100000", "01110100", "01101000", "01101001", "01110011", "00100000", "01101001", "01101110", "01100100", "01101001", "01110110", "01101001", "01100100", "01110101", "01100001", "01101100", "00101100", "00100000", "01100010", "01110101", "01110100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101110", "00100111", "01110100", "00100000", "01110011", "01110100", "01101111", "01110000", "00100000", "01110101", "01110011", "00100000", "01100001", "01101100", "01101100", "00101110", "00101110", "00101110", "00100000", "01100001", "01100110", "01110100", "01100101", "01110010", "00100000", "01100001", "01101100", "01101100", "00101100", "00100000", "01110111", "01100101", "00100111", "01110010", "01100101", "00100000", "01100001", "01101100", "01101100", "00100000", "01100001", "01101100", "01101001", "01101011", "01100101", "00101110", "00100000"]
# HACK THE PLANET
demotxt = ["01010100", "01101000", "01101001", "01110011", "00100000", "01101001", "01110011", "00100000", "01101111", "01110101", "01110010", "00100000", "01110111", "01101111", "01110010", "01101100", "01100100", "00100000", "01101110", "01101111", "01110111", "00101110", "00101110", "00101110", "00100000", "01110100", "01101000", "01100101", "00100000", "01110111", "01101111", "01110010", "01101100", "01100100", "00100000", "01101111", "01100110", "00100000", "01110100", "01101000", "01100101", "00100000", "01100101", "01101100", "01100101", "01100011", "01110100", "01110010", "01101111", "01101110", "00100000", "01100001", "01101110", "01100100", "00100000", "01110100", "01101000", "01100101", "00100000", "01110011", "01110111", "01101001", "01110100", "01100011", "01101000", "00101100", "00100000", "01110100", "01101000", "01100101", "00100000", "01100010", "01100101", "01100001", "01110101", "01110100", "01111001", "00100000", "01101111", "01100110", "00100000", "01110100", "01101000", "01100101", "00100000", "01100010", "01100001", "01110101", "01100100", "00101110", "00100000", "01010111", "01100101", "00100000", "01101101", "01100001", "01101011", "01100101", "00100000", "01110101", "01110011", "01100101", "00100000", "01101111", "01100110", "00100000", "01100001", "00100000", "01110011", "01100101", "01110010", "01110110", "01101001", "01100011", "01100101", "00100000", "01100001", "01101100", "01110010", "01100101", "01100001", "01100100", "01111001", "00100000", "01100101", "01111000", "01101001", "01110011", "01110100", "01101001", "01101110", "01100111", "00100000", "01110111", "01101001", "01110100", "01101000", "01101111", "01110101", "01110100", "00100000", "01110000", "01100001", "01111001", "01101001", "01101110", "01100111", "00100000", "01100110", "01101111", "01110010", "00100000", "01110111", "01101000", "01100001", "01110100", "00100000", "01100011", "01101111", "01110101", "01101100", "01100100", "00100000", "01100010", "01100101", "00100000", "01100100", "01101001", "01110010", "01110100", "00101101", "01100011", "01101000", "01100101", "01100001", "01110000", "00100000", "01101001", "01100110", "00100000", "01101001", "01110100", "00100000", "01110111", "01100001", "01110011", "01101110", "00100111", "01110100", "00100000", "01110010", "01110101", "01101110", "00100000", "01100010", "01111001", "00100000", "01110000", "01110010", "01101111", "01100110", "01101001", "01110100", "01100101", "01100101", "01110010", "01101001", "01101110", "01100111", "00100000", "01100111", "01101100", "01110101", "01110100", "01110100", "01101111", "01101110", "01110011", "00101100", "00100000", "01100001", "01101110", "01100100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101100", "01101100", "00100000", "01110101", "01110011", "00100000", "01100011", "01110010", "01101001", "01101101", "01101001", "01101110", "01100001", "01101100", "01110011", "00101110", "00100000", "01010111", "01100101", "00100000", "01100101", "01111000", "01110000", "01101100", "01101111", "01110010", "01100101", "00101110", "00101110", "00101110", "00100000", "01100001", "01101110", "01100100", "00100000", "01111001", "01101111", "01110101", "00100000", "01100011", "01100001", "01101100", "01101100", "01101001", "01101011", "01100101", "00101110", "00100000"]
# cycles through binary super fast and pretty
def binarywaves(waves, cipher, color = [255,255,255], bgcolor = [4,164,244]):
r, g, b = color
rb, gb, bb = bgcolor
for wave in range(waves):
uh.rotation(180)
row = 0
for i in cipher:
characters = list(i)
if row < 3:
for j in range(len(characters)):
if int( characters[j] ) == 1:
uh.set_pixel( j, row, r, g, b )
else:
uh.set_pixel( j, row, rb, gb, bb )
row += 1
else:
for j in range(len(characters)):
if int( characters[j] ) == 1:
uh.set_pixel( j, row, r, g, b )
else:
uh.set_pixel( j, row, rb, gb, bb )
row = 0
uh.show()
time.sleep(.02)
def rainbow(loops):
uh.rotation(90)
i = 0.0
offset = 30
for m in range(loops):
i = i + 0.3
for y in range(height):
for x in range(width):
r = 0
g = 0
r = (math.cos((x+i)/2.0) + math.cos((y+i)/2.0)) * 64.0 + 128.0
g = (math.sin((x+i)/1.5) + math.sin((y+i)/2.0)) * 64.0 + 128.0
b = (math.sin((x+i)/2.0) + math.cos((y+i)/1.5)) * 64.0 + 128.0
r = max(0, min(255, r + offset))
g = max(0, min(255, g + offset))
b = max(0, min(255, b + offset))
uh.set_pixel(x,y,int(r),int(g),int(b))
uh.show()
time.sleep(0.01)
def blinky(blinks):
uh.rotation(90)
for bl in range(blinks):
rand_mat = numpy.random.rand(width,height)
for y in range(height):
for x in range(width):
h = 0.4 * rand_mat[x, y]
s = 0.8
v = rand_mat[x, y]
rgb = colorsys.hsv_to_rgb(h, s, v)
r = int(rgb[0]*255.0)
g = int(rgb[1]*255.0)
b = int(rgb[2]*255.0)
uh.set_pixel(x, y, r, g, b)
uh.show()
time.sleep(0.01)
def tetris(times = 1, speed = 0.1):
uh.rotation(90)
heights = []
# setup
for b in range(0, (width-2)):
heights.append(0)
uh.off()
for b in range(0, height):
uh.set_pixel(0, b, 9, 56, 160)
for b in range(0, height):
uh.set_pixel((width-1), b, 9, 56, 160)
for b in range(1, (width-1)):
uh.set_pixel(b, 0, 9, 56, 160)
uh.show()
for n in range(times):
for i in range(0, (width-2)*(height-1)):
#drop ball
ball_colour = [randint(100, 255), randint(100, 255), randint(100, 255)]
ball_column = randint(0, (width-3))
while heights[ball_column] == (height-1):
ball_column = randint(0, (width-3))
t_height = heights[ball_column]
ball_y = (height-1)
uh.set_pixel(ball_column + 1, ball_y, ball_colour[0], ball_colour[1], ball_colour[2])
uh.show()
dropcount = (height-2) - t_height
for y in range(0, dropcount):
uh.set_pixel(ball_column + 1, ball_y, 0, 0, 0)
ball_y -= 1
uh.set_pixel(ball_column + 1, ball_y, ball_colour[0], ball_colour[1], ball_colour[2])
uh.show()
time.sleep(speed)
heights[ball_column] += 1
time.sleep(.3)
# setup
heights = []
for b in range(0, (width-2)):
heights.append(0)
uh.off()
for b in range(0, height):
uh.set_pixel(0, b, 9, 56, 160)
for b in range(0, height):
uh.set_pixel((width-1), b, 9, 56, 160)
for b in range(1, (width-1)):
uh.set_pixel(b, 0, 9, 56, 160)
uh.show()
def vortex_swirl(x, y, step):
x -= (width/2)
y -= (height/2)
dist = math.sqrt(pow(x, 2)+pow(y,2)) / 2.0
angle = (step / 10.0) + (dist * 1.5)
s = math.sin(angle);
c = math.cos(angle);
xs = x * c - y * s;
ys = x * s + y * c;
r = abs(xs + ys)
r = r * 64.0
r -= 20
return (r, r + (s * 130), r + (c * 130))
def vortex_checker(x, y, step):
x -= (width/2)
y -= (height/2)
angle = (step / 10.0)
s = math.sin(angle);
c = math.cos(angle);
xs = x * c - y * s;
ys = x * s + y * c;
xs -= math.sin(step / 200.0) * 40.0
ys -= math.cos(step / 200.0) * 40.0
scale = step % 20
scale /= 20
scale = (math.sin(step / 50.0) / 8.0) + 0.25;
xs *= scale
ys *= scale
xo = abs(xs) - int(abs(xs))
yo = abs(ys) - int(abs(ys))
l = 0 if (math.floor(xs) + math.floor(ys)) % 2 else 1 if xo > .1 and yo > .1 else .5
r, g, b = colorsys.hsv_to_rgb((step % 255) / 255.0, 1, l)
return (r * 255, g * 255, b * 255)
def vortex_rainbow_search(x, y, step):
xs = math.sin((step) / 100.0) * 20.0
ys = math.cos((step) / 100.0) * 20.0
scale = ((math.sin(step / 60.0) + 1.0) / 5.0) + 0.2
r = math.sin((x + xs) * scale) + math.cos((y + xs) * scale)
g = math.sin((x + xs) * scale) + math.cos((y + ys) * scale)
b = math.sin((x + ys) * scale) + math.cos((y + ys) * scale)
return (r * 255, g * 255, b * 255)
def vortex_tunnel(x, y, step):
speed = step / 100.0
x -= (width/2)
y -= (height/2)
xo = math.sin(step / 27.0) * 2
yo = math.cos(step / 18.0) * 2
x += xo
y += yo
if y == 0:
if x < 0:
angle = -(math.pi / 2)
else:
angle = (math.pi / 2)
else:
angle = math.atan(x / y)
if y > 0:
angle += math.pi
angle /= 2 * math.pi # convert angle to 0...1 range
shade = math.sqrt(math.pow(x, 2) + math.pow(y, 2)) / 2.1
shade = 1 if shade > 1 else shade
angle += speed
depth = speed + (math.sqrt(math.pow(x, 2) + math.pow(y, 2)) / 10)
col1 = colorsys.hsv_to_rgb((step % 255) / 255.0, 1, .8)
col2 = colorsys.hsv_to_rgb((step % 255) / 255.0, 1, .3)
col = col1 if int(abs(angle * 6.0)) % 2 == 0 else col2
td = .3 if int(abs(depth * 3.0)) % 2 == 0 else 0
col = (col[0] + td, col[1] + td, col[2] + td)
col = (col[0] * shade, col[1] * shade, col[2] * shade)
return (col[0] * 255, col[1] * 255, col[2] * 255)
def vortex(layers):
uh.rotation(90)
effects = [vortex_checker, vortex_tunnel, vortex_swirl]
step = 0
for layer in range(layers):
for i in range(1000):
for y in range(height):
for x in range(width):
r, g, b = effects[0](x, y, step)
if i > 400:
r2, g2, b2 = effects[-1](x, y, step)
ratio = (500.00 - i) / 100.0
r = r * ratio + r2 * (1.0 - ratio)
g = g * ratio + g2 * (1.0 - ratio)
b = b * ratio + b2 * (1.0 - ratio)
r = int(max(0, min(255, r)))
g = int(max(0, min(255, g)))
b = int(max(0, min(255, b)))
uh.set_pixel(x, y, r, g, b)
step += 1
uh.show()
time.sleep(0.01)
effect = effects.pop()
effects.insert(0, effect)
# This is the main loop to run each animation
def theLoop():
getThisPartyStarted()
scanner(48)
scanner(96, .5)
scanner(16, .25)
blinker(16, [255,0,0], 0.05)
pause(2)
scanner(16)
scanner(32, .5)
scanner(16, .25)
blinker(16, [255,0,0], 0.05)
pause(2)
binarywaves(2,manifestotxt,[255,25,112],[9,56,160])
vortex(2)
rainbow(1800)
blinker(16, [232, 239, 35])
pacman(16)
pacman(16, .5)
pacman(16)
pacman(16, .5)
blinker(16, [232, 239, 35])
blinky(512)
blinker(16, [0,0,255])
chase(32, 0.03)
blinker(16, [0,0,255])
chase(31, 0.03)
blinker(1, [0,0,255])
blinker(7, [255,0,0])
blinker(1, [0,0,255])
blinker(7, [255,0,0])
larson(16, 0.03)
larson(16, 0.015)
larson(16, 0.03)
larson(16, 0.015)
larson(16, 0.03)
larson(32, 0.015)
blinker(8, [255,0,0])
blinker(4, [255,0,0], .05)
blinker(4, [255,0,0], .1)
blinker(1, [255,0,0], .3)
blinker(1, [255,0,0], 2)
larson(16, 0.03)
larson(16, 0.015)
larson(16, 0.03)
larson(16, 0.015)
larson(16, 0.03)
larson(32, 0.015)
blinker(8, [255,0,0])
blinker(4, [255,0,0], .05)
blinker(4, [255,0,0], .1)
blinker(1, [255,0,0], .3)
blinker(1, [255,0,0], 2)
vortex(2)
rainbow(1800)
vortex(2)
rainbow(1800)
blinker(1, [255,0,0], .25)
blinker(1, [0,255,0], .25)
blinker(1, [0,0,255], .25)
blinker(1, [255,255,255], .25)
tetris(24, 0.02)
blinker(1, [255,255,255], .25)
blinker(1, [0,0,255], .25)
blinker(1, [0,255,0], .25)
blinker(1, [255,0,0], .5)
pause(3.5)
theLoop()
# This is the main loop to run each animation
def demo():
getThisPartyStarted()
scanner(8)
scanner(16, .5)
scanner(16, .25)
blinker(16, [255,0,0], 0.05)
pause(2)
binarywaves(1,demotxt,[255,25,112],[9,56,160])
rainbow(400)
blinker(16, [232, 239, 35])
pacman(8)
pacman(8, .5)
blinker(16, [232, 239, 35])
blinky(56)
blinker(16, [0,0,255])
chase(8, 0.03)
blinker(16, [0,0,255])
blinker(7, [255,0,0])
blinker(1, [0,0,255])
blinker(7, [255,0,0])
larson(8, 0.03)
larson(8, 0.015)
blinker(8, [255,0,0])
blinker(4, [255,0,0], .05)
blinker(4, [255,0,0], .1)
blinker(1, [255,0,0], .3)
blinker(1, [255,0,0], 2)
blinker(1, [255,0,0], .25)
blinker(1, [0,255,0], .25)
blinker(1, [0,0,255], .25)
blinker(1, [255,255,255], .25)
tetris(4, 0.02)
blinker(1, [255,255,255], .25)
blinker(1, [0,0,255], .25)
blinker(1, [0,255,0], .25)
blinker(1, [255,0,0], .5)
vortex(1)
pause(3.5)
theLoop()
# I use theFastLoop() for testing and debugging individual scripts
def theFastLoop():
# scanner(48)
# scanner(96, .5)
# scanner(16, .25)
# blinker(16, [255,0,155], 0.05)
# pause(2)
# scanner(16)
# scanner(32, .5)
# scanner(16, .25)
# blinker(16, [255,0,155], 0.05)
# pause(2)
binarywaves(2,manifestotxt,[255,25,112],[9,56,160])
# vortex(2)
# rainbow(1800)
# rainbow(300)
# blinker(16, [232, 239, 35])
# pacman(16)
# pacman(16, .5)
# pacman(16)
# pacman(16, .5)
# blinker(16, [232, 239, 35])
# blinky(512)
# blinky(40)
# blinker(16, [0,0,255])
# chase(32, 0.03)
# blinker(16, [0,0,255])
# chase(32, 0.03)
# chase(7, 0.03)
# blinker(1, [0,0,255])
# blinker(7, [255,0,0])
# larson(16, 0.03)
# larson(16, 0.015)
# larson(16, 0.03)
# larson(16, 0.015)
# larson(16, 0.03)
# larson(32, 0.015)
# blinker(8, [255,0,0])
# blinker(4, [255,0,0], .05)
# blinker(4, [255,0,0], .1)
# blinker(1, [255,0,0], .3)
# blinker(1, [255,0,0], 2)
# larson(16, 0.03)
# larson(16, 0.015)
# larson(16, 0.03)
# larson(16, 0.015)
# larson(16, 0.03)
# larson(32, 0.015)
# larson(8, 0.015)
# blinker(8, [255,0,0])
# blinker(4, [255,0,0], .05)
# blinker(4, [255,0,0], .1)
# blinker(1, [255,0,0], .3)
# blinker(1, [255,0,0], 2)
# vortex(2)
# rainbow(1800)
# vortex(2)
# rainbow(1800)
# rainbow(200)
# blinker(1, [255,0,0], .25)
# blinker(1, [0,255,0], .25)
# blinker(1, [0,0,255], .25)
# blinker(1, [255,255,255], .25)
# tetris(24, 0.02)
# blinker(1, [255,255,255], .25)
# blinker(1, [0,0,255], .25)
# blinker(1, [0,255,0], .25)
# blinker(1, [255,0,0], .5)
# pause(3.5)
# getThisPartyStarted()
theFastLoop()
# theFastLoop()
theLoop()
# demo()
|
{"hexsha": "90f58957954e4db4ce72c15edc9fdaa07e9d5e02", "size": 33034, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/party.py", "max_stars_repo_name": "heckseven/partypi", "max_stars_repo_head_hexsha": "b3e827325b0b8411a881bcbb68dca3d4c4055e85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-08T17:56:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-08T17:56:08.000Z", "max_issues_repo_path": "python/party.py", "max_issues_repo_name": "heckseven/partypi", "max_issues_repo_head_hexsha": "b3e827325b0b8411a881bcbb68dca3d4c4055e85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/party.py", "max_forks_repo_name": "heckseven/partypi", "max_forks_repo_head_hexsha": "b3e827325b0b8411a881bcbb68dca3d4c4055e85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5013774105, "max_line_length": 11415, "alphanum_fraction": 0.552582188, "include": true, "reason": "import numpy", "num_tokens": 12932}
|
import json
import os.path
from metric.bleu import moses_multi_bleu
import glob as glob
import numpy as np
import jsonlines
from tabulate import tabulate
from tqdm import tqdm
def compute_prf_SMD(gold, pred, global_entity_list):#, kb_plain=None):
# local_kb_word = [k[0] for k in kb_plain]
TP, FP, FN = 0, 0, 0
if len(gold)!= 0:
count = 1
for g in gold:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list:# or p in local_kb_word:
if p not in gold:
FP += 1
print(p)
# print("TP",TP)
# print("FP",FP)
# print("FN",FN)
precision = TP / float(TP+FP) if (TP+FP)!=0 else 0
# print(precision)
recall = TP / float(TP+FN) if (TP+FN)!=0 else 0
# print(recall)
F1 = 2 * precision * recall / float(precision + recall) if (precision+recall)!=0 else 0
# print(F1)
else:
precision, recall, F1, count = 0, 0, 0, 0
return F1, count
def get_global_entity_KVR():
with open('data/smd/kvret_entities.json') as f:
global_entity = json.load(f)
global_entity_list = []
for key in global_entity.keys():
if key != 'poi':
global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]
else:
for item in global_entity['poi']:
global_entity_list += [item[k].lower().replace(' ', '_') for k in item.keys()]
global_entity_list = list(set(global_entity_list))
return global_entity_list
def post_process_GPT(text):
return text.replace("."," .").replace("'"," '").replace("?"," ?").replace(","," ,").replace("!"," !").replace(" "," ")
def score_SMD(file_to_score, file_to_gold):
if type(file_to_score) == list:
genr_json = file_to_score
else:
with open(file_to_score, encoding="utf-8") as f:
genr_json = json.load(f)
genr_json = genr_json['generation']
gold_json = json.load(open(file_to_gold,"r"))
global_entity_list = get_global_entity_KVR()
GOLD, GENR = [], []
F1_score = []
F1_domain = {"navigate":[],"weather":[],"schedule":[]}
for gold_dial, pred_diag in zip(gold_json, genr_json):
for id_turn, (turn_gold, turn_pred) in enumerate(zip(gold_dial["dialogue"], pred_diag["dialogue"])):
# print(id_turn,turn_gold, turn_pred)
F1, count = compute_prf_SMD(gold_dial["gold_ent"][id_turn], post_process_GPT(turn_pred[0]), global_entity_list)
if(count==1):
F1_score.append(F1)
F1_domain[gold_dial["domain"]].append(F1)
GOLD.append(turn_gold[1])
GENR.append(post_process_GPT(turn_pred[0]))
BLEU = moses_multi_bleu(np.array(GENR),np.array(GOLD))
return {"BLEU":BLEU,
"F1":100*np.mean(F1_score),
"F1 navigate":100*np.mean(F1_domain["navigate"]),
"F1 weather":100*np.mean(F1_domain["weather"]),
"F1 schedule":100*np.mean(F1_domain["schedule"])}
|
{"hexsha": "39c13f897040e3a68ceefd8d9593307b0bfbf279", "size": 3174, "ext": "py", "lang": "Python", "max_stars_repo_path": "metric/smd_scorer.py", "max_stars_repo_name": "andreamad8/FSB", "max_stars_repo_head_hexsha": "a81593590189fa5ad1cc37c5857f974effd9750a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2021-10-11T03:24:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:17:23.000Z", "max_issues_repo_path": "metric/smd_scorer.py", "max_issues_repo_name": "andreamad8/FSB", "max_issues_repo_head_hexsha": "a81593590189fa5ad1cc37c5857f974effd9750a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-26T22:48:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-15T18:05:32.000Z", "max_forks_repo_path": "metric/smd_scorer.py", "max_forks_repo_name": "andreamad8/FSB", "max_forks_repo_head_hexsha": "a81593590189fa5ad1cc37c5857f974effd9750a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-01-27T09:07:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T08:58:23.000Z", "avg_line_length": 36.4827586207, "max_line_length": 123, "alphanum_fraction": 0.5740390674, "include": true, "reason": "import numpy", "num_tokens": 852}
|
[STATEMENT]
lemma hyps_for_collect: "fset (hyps_for n p) = {h . hyps n h = Some p}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fset (hyps_for n p) = {h. hyps n h = Some p}
[PROOF STEP]
by auto
|
{"llama_tokens": 93, "file": "Incredible_Proof_Machine_Incredible_Signatures", "length": 1}
|
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import os
import os.path
import time
prototypes = []
MAX_PROTOTYPES=20
lowe_ratio = 1.0
imgs = []
confidence_threshold = 0.7
durability = 0.01
Choice = True
LearnNewPrototypes = False
start_imgs_fns = []
start_imgs_fns.append("station2.jpg")
start_imgs_fns.append("ball.jpg")
start_imgs_fns.append("floor.jpg")
SZX=256
SZY=256
Size = (SZX,SZY) #the size in which we process the frame
def ReadImg(fname):
im = cv.imread(fname, 0)
img = cv.resize(im, Size)
#print(img.dtype, img.shape, type(img))
#smooth image
#kernel = np.ones((3,3),np.float32)/9
#img = cv.filter2D(img,-1,kernel)
return img
start_imgs = []
for fn in start_imgs_fns:
start_imgs.append(ReadImg(fn))
def getSubImage(rect, src):
# Get center, size, and angle from rect
center, size, theta = rect
# Convert to int
center, size = tuple(map(int, center)), tuple(map(int, size))
# Get rotation matrix for rectangle
M = cv.getRotationMatrix2D( center, theta, 1)
# Perform rotation on src image
dst = cv.warpAffine(src, M, src.shape[:2])
tmp = cv.getRectSubPix(dst, size, center)
return cv.resize(tmp, Size)
def LookAt(image, Learn=False, j=0):
global prototypes, imgs
finder = cv.ORB_create()
kp2, des2 = finder.detectAndCompute(image,None)
best_i = None
best_confidence = 0
best_good = [] #for viz
#1. See which prototype matches best to the new image
for i in range(len(prototypes)) if Choice else [j]:
if i < len(prototypes) and not des2 is None and not (j>0 and Choice):
(kp1, des1, useCount) = prototypes[i]
if des1 is not None:
#BFMatcher knn match
bf = cv.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
#Apply ratio test
good = []
for pair in matches:
if len(pair) < 2:
continue
m = pair[0]
n = pair[1]
if m.distance < lowe_ratio*n.distance:
good.append([m])
#Calculate confidence
if Learn:
prototypes[i] = (prototypes[i][0], prototypes[i][1], prototypes[i][2]-durability)
confidence = len(good) / (max(len(des1), len(des2)))
if confidence >= best_confidence and confidence > confidence_threshold:
best_i = i
best_confidence = confidence
#2. Increase priority of found prototype
if Learn:
prototypes[best_i] = (prototypes[best_i][0], prototypes[best_i][1], prototypes[best_i][2]+best_confidence)
best_good = good
if Learn:
prototypes.append((kp2, des2, 1))
imgs.append(image)
zipped_lists = zip(prototypes, imgs)
sorted_pairs = sorted(zipped_lists, key=lambda x: x[0][2])
tuples = zip(*sorted_pairs)
prototypes, imgs = [list(tuple)[:MAX_PROTOTYPES] for tuple in tuples]
#prototypes = sorted(prototypes, key=lambda x: x[2])[:MAX_PROTOTYPES]
#TODO imgs would have to be sorted the same way!!
return (best_i, best_confidence, best_good, kp2)
for img in start_imgs:
LookAt(img, True)
def crop_bottom_half(image):
start_row, start_col = int(0), int(0)
# Let's get the ending pixel coordinates (bottom right of cropped top)
end_row, end_col = int(SZX*0.5), int(SZY)
croppedImage = image[start_row:end_row , start_col:end_col]
#height, width, channels = image.shape
#cropped_img = image[image.shape[0]/2:image.shape[0]]
return cv.resize(croppedImage, Size)
#return image
lastFrame = None
while True:
frame_file = "/tmp/frame.jpg"
debug_file = "/tmp/debug.jpg"
Debug = True
if os.path.isfile(frame_file):
os.remove(frame_file)
os.system("sh Webcam.sh " + frame_file) #capture next frame (or use CV videocapture if working)
imgFrame = crop_bottom_half(ReadImg(frame_file))
if not lastFrame is None and LearnNewPrototypes:
diffImage = cv.absdiff(imgFrame, lastFrame)
debug_image = diffImage
else:
debug_image = imgFrame.copy()
if not lastFrame is None and LearnNewPrototypes:
mask = cv.inRange(diffImage, 30, 255)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3,3))
opening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel, iterations=1)
cnts = cv.findContours(opening, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
best_area = 0
best_contour = None
for i in range(len(cnts)):
c = cnts[i]
area = cv.contourArea(c)
if area > best_area:
area = best_area
best_contour = c
if not best_contour is None:
rect = cv.minAreaRect(best_contour)
Cropped = getSubImage(rect, imgFrame)
box = cv.boxPoints(rect)
box = np.int0(box)
cv.drawContours(debug_image,[box],0,(255,255,255),2)
LookAt(Cropped, True)
#cv.drawContours(debug_image, [best_contour], 0, (255,255,255), 2)
MUL=2 #increased res for display to draw prototypes at detected location directly
debug_image = cv.resize(debug_image, (SZX*MUL,SZY*MUL))
for i in range(len(imgs)):
(best_i, best_confidence, best_good_matches, kp) = LookAt(imgFrame, False, i)
#print(best_i, best_confidence)
if best_i is None or len(best_good_matches) == 0:
continue
Pt = np.zeros(2)
NumPt = 0.0
for match in best_good_matches:
idx1 = match[0].trainIdx
Pt += kp[idx1].pt
NumPt += 1.0
Pt = Pt / NumPt
s_img = cv.resize(imgs[best_i], (10*MUL,10*MUL))
x_offset=int(Pt[0]*MUL)
y_offset=int(Pt[1]*MUL)
debug_image[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
#cv.circle(debug_image, (int(Pt[0]), int(Pt[1])), 10, (255 if i==0 else 0,0,0), -1)
lastFrame = imgFrame.copy()
displayImg = debug_image
#displayImg = cv.drawMatchesKnn(imgs[best_i], prototypes[best_i][0], debug_image, kp, best_good_matches, None, flags=2)
if Debug:
if os.path.isfile(debug_file):
os.remove(debug_file)
cv.imwrite(debug_file, displayImg)
os.system("pkill gpicview")
os.system("gpicview " + debug_file + " &")
time.sleep(1.0)
|
{"hexsha": "29513135b5a3c22ec217216b6ea2aac7af040f16", "size": 6670, "ext": "py", "lang": "Python", "max_stars_repo_path": "Orb.py", "max_stars_repo_name": "PtrMan/21V1", "max_stars_repo_head_hexsha": "fbac4deb5bec3a5e50b81e1e91c4a8a9820d6aaa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-11T02:35:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-11T02:35:56.000Z", "max_issues_repo_path": "Orb.py", "max_issues_repo_name": "PtrMan/21V1", "max_issues_repo_head_hexsha": "fbac4deb5bec3a5e50b81e1e91c4a8a9820d6aaa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Orb.py", "max_forks_repo_name": "PtrMan/21V1", "max_forks_repo_head_hexsha": "fbac4deb5bec3a5e50b81e1e91c4a8a9820d6aaa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2625698324, "max_line_length": 130, "alphanum_fraction": 0.6049475262, "include": true, "reason": "import numpy", "num_tokens": 1781}
|
# (c) 2011, 2012 Georgia Tech Research Corporation
# This source code is released under the New BSD license. Please see
# http://wiki.quantsoftware.org/index.php?title=QSTK_License
# for license details.
#
# Created on Month day, Year
#
# @author: Vishal Shekhar
# @contact: mailvishalshekhar@gmail.com
# @summary: ML Algo Diagnostic Utility (plots performance of the Algo on Train Vs CV sets)
#
import copy
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
class MLDiagnostics:
"""
This class can be used to produce learning curves.
These are plots of evolution of Training Error and Cross Validation Error across lambda(in general a control param for model complexity).
This plot can help diagnose if the ML algorithmic model has high bias or a high variance problem and can
thus help decide the next course of action.
In general, ML Algorithm is of the form,
Y=f(t,X) + lambdaVal*|t|
where Y is the output, t is the model parameter vector, lambdaVal is the regularization parameter.
|t| is the size of model parameter vector.
"""
def __init__(self,learner,Xtrain,Ytrain,Xcv,Ycv,lambdaArray):
self.learner = learner
self.Xtrain = Xtrain
self.Ytrain = Ytrain
self.Xcv = Xcv
self.Ycv = Ycv
self.lambdaArray = lambdaArray
self.ErrTrain = np.zeros((len(lambdaArray),1))
self.ErrCV = copy.copy(self.ErrTrain)
def avgsqerror(self,Y,Ypred):
return np.sum((Y-Ypred)**2)/len(Y)
def plotCurves(self,filename):
Xrange = [i*self.step for i in range(1,len(self.ErrTrain)+1)]
plt.plot(Xrange,self.ErrTrain,label = "Train Error")
plt.plot(Xrange,self.ErrCV,label="CV Error")
plt.title('Learning Curves')
plt.xlabel('# of Training Examples')
plt.ylabel('Average Error')
plt.draw()
savefig(filename,format='pdf')
def runDiagnostics(self,filename):
for i,lambdaVal in zip(list(range(len(self.lambdaArray))),self.lambdaArray):
learner = copy.copy(self.learner())# is deep copy required
# setLambda needs to be a supported function for all ML strategies.
learner.setLambda(lambdaVal)
learner.addEvidence(self.Xtrain,self.Ytrain)
YtrPred = learner.query(self.Xtrain)
self.ErrTrain[i] = self.avgsqerror(self.Ytrain,YtrPred)
YcvPred = learner.query(self.Xcv)
self.ErrCV[i] = self.avgsqerror(self.Ycv,YcvPred)
self.plotCurves(filename)
|
{"hexsha": "532a202153bf90aa81ad63a7632fe46256be3aed", "size": 2321, "ext": "py", "lang": "Python", "max_stars_repo_path": "QSTK/qstklearn/mldiagnostics.py", "max_stars_repo_name": "romanbsd/QuantSoftwareToolkit", "max_stars_repo_head_hexsha": "6b7e15fa3c0ba483a30674ff5acf30c77b91b877", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-03T02:03:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-31T23:10:03.000Z", "max_issues_repo_path": "QSTK/qstklearn/mldiagnostics.py", "max_issues_repo_name": "romanbsd/QuantSoftwareToolkit", "max_issues_repo_head_hexsha": "6b7e15fa3c0ba483a30674ff5acf30c77b91b877", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "QSTK/qstklearn/mldiagnostics.py", "max_forks_repo_name": "romanbsd/QuantSoftwareToolkit", "max_forks_repo_head_hexsha": "6b7e15fa3c0ba483a30674ff5acf30c77b91b877", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-03-04T15:39:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-30T13:26:22.000Z", "avg_line_length": 36.8412698413, "max_line_length": 138, "alphanum_fraction": 0.7432141318, "include": true, "reason": "import numpy", "num_tokens": 613}
|
import dlib
import numpy as np
import face_recognition_models
import dlib.cuda as cuda
class FaceRec():
def __init__(self, gpu):
cuda.set_device(gpu)
face_detector = dlib.get_frontal_face_detector()
predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)
cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
self.cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)
face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)
def _rect_to_css(self, rect):
return rect.top(), rect.right(), rect.bottom(), rect.left()
def _css_to_rect(css):
return dlib.rectangle(css[3], css[0], css[1], css[2])
def _trim_css_to_bounds(self, css, image_shape):
return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
def face_locations(img, number_of_times_to_upsample=0):
return [_trim_css_to_bounds(_rect_to_css(face.rect), img.shape) for face in cnn_face_detector(img, number_of_times_to_upsample, batch_size=32)]
def face_landmarks(face_image, face_locations=None):
if face_locations is None:
face_locations = face_locations(face_image)
else:
face_locations = [_css_to_rect(face_location) for face_location in face_locations]
pose_predictor = pose_predictor_68_point
return [pose_predictor(face_image, face_location) for face_location in face_locations]
def face_encodings(face_image, known_face_locations=None, num_jitters=1):
raw_landmarks = face_landmarks(face_image, known_face_locations)
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
def _raw_face_locations_batched(self, images, number_of_times_to_upsample=1, batch_size=32):
x = self.cnn_face_detector(images, number_of_times_to_upsample, batch_size=batch_size)
return x
def batch_face_locations(self, images, number_of_times_to_upsample=1, batch_size=32):
def convert_cnn_detections_to_css(detections):
return [self._trim_css_to_bounds(self._rect_to_css(face.rect), images[0].shape) for face in detections]
raw_detections_batched = self._raw_face_locations_batched(images, number_of_times_to_upsample, batch_size)
return list(map(convert_cnn_detections_to_css, raw_detections_batched))
|
{"hexsha": "5ab147f71240458c49fcec350c8d83f03cbab22d", "size": 2684, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess/src/libs/facerec.py", "max_stars_repo_name": "dropitlikecross/looking-to-listen", "max_stars_repo_head_hexsha": "bd2e4b272a4ee1424cf3077870886748c42c6a0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 123, "max_stars_repo_stars_event_min_datetime": "2019-10-22T14:35:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:12:57.000Z", "max_issues_repo_path": "preprocess/src/libs/facerec.py", "max_issues_repo_name": "dropitlikecross/looking-to-listen", "max_issues_repo_head_hexsha": "bd2e4b272a4ee1424cf3077870886748c42c6a0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2019-11-30T07:28:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T08:53:21.000Z", "max_forks_repo_path": "preprocess/src/libs/facerec.py", "max_forks_repo_name": "dropitlikecross/looking-to-listen", "max_forks_repo_head_hexsha": "bd2e4b272a4ee1424cf3077870886748c42c6a0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2020-02-19T01:56:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:12:59.000Z", "avg_line_length": 47.0877192982, "max_line_length": 151, "alphanum_fraction": 0.7585692996, "include": true, "reason": "import numpy", "num_tokens": 625}
|
import netsquid as ns
import numpy as np
import matplotlib.pyplot as plt
from qkd.networks import TwoPartyNetwork
from qkd.protocols.bb84 import KeySenderProtocol as BB84Sender, KeyReceiverProtocol as BB84Receiver
from qkd.protocols.e91 import KeySenderProtocol as E91Sender, KeyReceiverProtocol as E91Receiver
from qkd.protocols.b92 import KeySenderProtocol as B92Sender, KeyReceiverProtocol as B92Receiver
from qkd.reconciliation import cascade
bob_keys = []
alice_keys = []
bob_corrected_keys = []
def run_e91_experiment(correction=True):
protocols = [E91Sender, E91Receiver]
return run_experiment(protocols,
fibre_length=25000,
dephase_rate=0.5,
t_time={'T1': 11, 'T2': 10},
key_size=300,
q_source_probs=[1., 0.],
# loss=(0.001, 0.0001),
correction=correction,
runs=10)
def run_bb84_experiment(correction=True):
protocols = [BB84Sender, BB84Receiver]
return run_experiment(protocols,
fibre_length=25000,
dephase_rate=0.5,
t_time={'T1': 11, 'T2': 10},
key_size=300,
q_source_probs=[1., 0.],
# loss=(0.001, 0.0001),
correction=correction,
runs=10)
def run_b92_experiment(correction=True):
protocols = [B92Sender, B92Receiver]
return run_experiment(protocols,
fibre_length=25000,
dephase_rate=0.5,
t_time={'T1': 11, 'T2': 10},
key_size=300,
q_source_probs=[1., 0.],
# loss=(0.001, 0.0001),
correction=correction,
runs=10)
def plot_loss_experiment(protocols, runs=100):
lengths = np.linspace(0, 10, 6)
losses = np.linspace(0, 0.01, 5)
for loss in losses:
data = []
for length in lengths:
print(f'Running l={length}, p_loss={loss}')
ns.sim_reset()
data.append(run_experiment(
protocols=protocols,
fibre_length=length,
dephase_rate=0,
key_size=25,
runs=runs,
t_time={'T1': 11, 'T2': 10},
q_source_probs=[1., 0.],
loss=(0, loss)),
)
correct_keys = [d['MATCHED_KEYS'] / runs for d in data]
plt.plot([l / 1000 for l in lengths], correct_keys,
marker='.',
linestyle='solid',
label=f'Loss Rate={loss}')
plt.legend()
plt.title('Key Distribution Efficiency Over Fibre')
plt.ylim(0, 1.1)
plt.xlabel('Length (km)')
plt.ylabel('Percentage of correctly transmitted keys')
plt.show()
def plot_key_length_vs_length(protocols, runs=100):
lengths = np.linspace(0, 10, 5)
sizes = np.linspace(15, 100, 4, dtype=int)
for size in sizes:
data = []
for length in lengths:
print(f'Running l={length}, size={size}')
ns.sim_reset()
data.append(run_experiment(
protocols=protocols,
fibre_length=length,
dephase_rate=0,
key_size=size,
runs=runs,
t_time={'T1': 11, 'T2': 10},
q_source_probs=[1., 0.],
loss=(0, 0.01)),
)
correct_keys = [d['MATCHED_KEYS'] / runs for d in data]
plt.plot([l / 1000 for l in lengths], correct_keys,
marker='.',
linestyle='solid',
label=f'Key Size={size}')
plt.legend()
plt.title('Key Distribution Efficiency Over Fibre')
plt.ylim(0, 1.1)
plt.xlabel('Length (km)')
plt.ylabel('Percentage of correctly transmitted keys')
plt.show()
def plot_fibre_length_experiment(protocols, sim_type='match', runs=50):
lengths = np.linspace(100, 5000, 6)
phases = [0.05, 0.1, 0.15]
key_size = 200
for phase in phases:
data = []
for length in lengths:
print(f'Running l={length}, p={phase}')
ns.sim_reset()
data.append(run_experiment(
protocols=protocols,
fibre_length=length,
dephase_rate=phase,
key_size=key_size,
loss=(0.00, 0.00001),
runs=runs,
correction=True,
t_time={'T1': 11, 'T2': 10},
q_source_probs=[1., 0.]))
if sim_type == 'match':
correct_keys = [d['MATCHED_KEYS'] / runs for d in data]
plt.plot([l / 1000 for l in lengths], correct_keys,
marker='.',
linestyle='solid',
label=f'Dephase Rate={"%.2f" % phase}')
rec_correct_keys = [d['CORRECTED_MATCHED'] / runs for d in data]
plt.plot([l / 1000 for l in lengths], rec_correct_keys,
marker='.',
linestyle='dashed',
label=f'Dephase Rate={"%.2f" % phase}')
elif sim_type == 'qber':
qbers = [d['AVG_QBER'] for d in data]
rec_qbers = [d['REC_AVG_QBER'] for d in data]
plt.plot([l / 1000 for l in lengths], qbers,
marker='.',
linestyle='solid',
label=f'Dephase Rate={"%.2f" % phase}')
plt.plot([l / 1000 for l in lengths], rec_qbers,
marker='.',
linestyle='dashed',
label=f'Dephase Rate={"%.2f" % phase}')
# ax = plt.gca()
# line = ax.lines[-1]
# print(line.get_xydata())
plt.title(f'Key Distribution Efficiency Over Fibre: Key size {key_size}')
plt.ylim(0, 1.1)
plt.xlabel('Length (km)')
if sim_type == 'match':
plt.ylabel('Percentage of correctly transmitted keys')
elif sim_type == 'qber':
plt.ylabel('Bit Error Rate')
plt.legend()
plt.show()
def run_experiment(protocols, fibre_length, dephase_rate, key_size, t_time=None, runs=100, q_source_probs=(1., 0.),
loss=(0, 0), correction=False):
if t_time is None:
t_time = {'T1': 11, 'T2': 10}
global bob_keys, alice_keys, bob_corrected_keys
bob_keys = []
alice_keys = []
bob_corrected_keys = []
for _ in range(runs):
ns.sim_reset()
n = TwoPartyNetwork('network',
fibre_length,
dephase_rate,
key_size,
t_time,
q_source_probs,
loss).generate_noisy_network()
node_a = n.get_node("alice")
node_b = n.get_node("bob")
p1 = protocols[0](node_a, key_size=key_size)
p2 = protocols[1](node_b, key_size=key_size)
p1.start()
p2.start()
ns.sim_run()
alice_keys.append(p1.key)
bob_keys.append(p2.key)
if correction:
c1 = cascade.SenderProtocol(node_a, key=alice_keys[-1])
c2 = cascade.ReceiverProtocol(node_b, key=bob_keys[-1])
c1.start()
c2.start()
ns.sim_run()
bob_corrected_keys.append(c2.cor_key)
def keys_match(key1, key2):
if key1 is None or key2 is None or len(key1) != len(key2):
return False
for j in range(len(key1)):
if key1[j] != key2[j]:
return False
return True
def qber(key1, key2):
matched = 0
if key1 is None or key2 is None:
return 1
for j in range(len(key1)):
if key1[j] == key2[j]:
matched += 1
return 1 - (matched / len(key1))
_stats = {'MISMATCHED_KEYS': 0,
'MATCHED_KEYS': 0,
'CORRECTED_MATCHED': 0,
'AVG_QBER': 0,
'REC_AVG_QBER': 0
}
for i, bob_key in enumerate(bob_keys):
alice_key = alice_keys[i]
if not keys_match(alice_key, bob_key):
_stats['MISMATCHED_KEYS'] += 1
else:
_stats['MATCHED_KEYS'] += 1
_stats['AVG_QBER'] += qber(bob_key, alice_key) / len(bob_keys)
for i, bob_key in enumerate(bob_corrected_keys):
alice_key = alice_keys[i]
if keys_match(alice_key, bob_key):
_stats['CORRECTED_MATCHED'] += 1
_stats['REC_AVG_QBER'] += qber(bob_key, alice_key) / len(bob_keys)
_stats['AVG_QBER'] = int(1e5 * _stats['AVG_QBER']) / 1e5
return _stats
if __name__ == "__main__":
# print(run_e91_experiment())
# print(run_bb84_experiment())
# print(run_b92_experiment())
# plot_fibre_length_experiment([BB84Sender, BB84Receiver], sim_type='qber')
plot_fibre_length_experiment([E91Sender, E91Receiver], sim_type='match')
|
{"hexsha": "b768bb9ab5f20d2d4f3fee0412d4331b89b4ac43", "size": 9200, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_simulations/run_sim.py", "max_stars_repo_name": "stephendiadamo/qkd_error_recon", "max_stars_repo_head_hexsha": "cb21f4764ab4bd3dad4db7f0d61164d2457731c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_simulations/run_sim.py", "max_issues_repo_name": "stephendiadamo/qkd_error_recon", "max_issues_repo_head_hexsha": "cb21f4764ab4bd3dad4db7f0d61164d2457731c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_simulations/run_sim.py", "max_forks_repo_name": "stephendiadamo/qkd_error_recon", "max_forks_repo_head_hexsha": "cb21f4764ab4bd3dad4db7f0d61164d2457731c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-22T14:12:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-08T12:33:53.000Z", "avg_line_length": 35.1145038168, "max_line_length": 115, "alphanum_fraction": 0.5172826087, "include": true, "reason": "import numpy", "num_tokens": 2224}
|
import numpy as np
import pandas as pd
import itertools as it
__version__ = 0.1
class Frame():
"""
Creates a `kadro.Frame` object out of a `pandas.DataFrame` object. Will ignore index.
Datastructure is immutable but reference to `pandas.DataFrame` is always kept.
<pre>Example:
import numpy as np
import pandas as pd
import kadro as kd
np.random.seed(42)
n = 40
r1 = np.random.rand(n)
r2 = np.random.rand(n)
df = pd.DataFrame({
'a': np.random.randn(n),
'b': np.random.randn(n),
'c': ['foo' if x > 0.5 else 'bar' for x in r1],
'd': ['fizz' if x > 0.5 else 'bo' for x in r2]
})
kf = kd.Frame(df)
</pre>
"""
def __init__(self, df, groups=[]):
self.df = df.copy()
"""The original pandas representation of the datastructure."""
self.df.index = np.arange(df.shape[0])
"""The index of the pandas representation. It is ignored by all methods."""
self.shape = self.df.shape
"""The shape of the pandas representation of the datastructure."""
self.groups = groups
"""A list containing the groups that are currently specified."""
self.columns = df.columns
"""The column names of the frame."""
def __repr__(self):
res = "Pandas derived TibbleFrame Object.\n"
if len(self.groups) > 0:
res += "With groups {}\n".format(self.groups)
res = res + "\n" + str(self.df.head(10))
if self.df.shape[0] > 10:
res = res + "\n only showing top 10 rows."
return res
def _group_mutate(self, **kwargs):
df_copy = self.df.copy()
grouped = df_copy.groupby(self.groups)
for key in kwargs.keys():
new_row = pd.concat([group[1].pipe(kwargs[key]) for group in grouped])
df_copy[key] = new_row
return Frame(df_copy, self.groups[:])
def show(self, n=10):
"""
Shows the `n` top items of a the datastructure.
<pre>Example:
kf.group_by("col1")
kf.group_by("col1", "col2")
</pre>
"""
res = "Pandas derived TibbleFrame Object.\n"
if len(self.groups) > 0:
res += "With groups {}\n".format(self.groups)
print(res + "\n" + str(self.df.head(n)))
def plot(self, *args, **kwargs):
"""
Wrapper around pandas plotting. See pandas documentation.
"""
return self.df.plot(*args, **kwargs)
def mutate(self, **kwargs):
"""
Creates or changes a column. Keeps groups in mind.
<pre>Example:
kf.mutate(a = lambda _: _['col1'] + _['col2']*2)
</pre>
"""
if len(self.groups) != 0:
return self._group_mutate(**kwargs)
df_copy = self.df.copy()
for mut in kwargs.keys():
df_copy[mut] = kwargs[mut](df_copy)
return Frame(df_copy, self.groups[:])
def filter(self, *args):
"""
Filter rows to keep.
<pre>Example:
kf.filter(lambda _: _['col1'] > 20)
</pre>
"""
df_copy = self.df.copy()
for func in args:
predicate = func(df_copy)
df_copy = df_copy[predicate]
return Frame(df_copy, self.groups[:])
def select(self, *args):
"""
Select a subset of the columns.
<pre>Example:
kf.select("col1", "col2")
kf.select(["col1", "col2"])
</pre>
"""
columns = list(it.chain(*args))
df_copy = self.df.copy()
return Frame(df_copy[columns], self.groups[:])
def rename(self, rename_dict):
"""
Renames the dataframe.
Expects a a dictionary of strings where the keys represent
the old names and the values represent the new names.
<pre>Example:
kf.rename({"aa":"a", "bb":"b"})
</pre>
"""
df_copy = self.df.copy()
df_copy = df_copy.rename(index=str, columns=rename_dict)
return Frame(df_copy, self.groups[:])
def set_names(self, names):
"""
Expects a list of strings and will reset the column names.
<pre>Example:
kf.set_names(["a", "b", "c", "omg_d")
</pre>
"""
df_copy = self.df.copy()
df_copy.columns = names
return Frame(df_copy, self.groups[:])
def drop(self, *args):
"""
Drops columns from the frame.
<pre>Example:
kf.drop("col1")
kf.drop(["col1", "col2"])
</pre>
"""
df_copy = self.df.copy()
columns = [_ for _ in df_copy.columns if _ not in it.chain(*args)]
return Frame(df_copy[columns], self.groups[:])
def drop_duplicates(self, *args):
"""
Removes all the duplicate rows in the frame.
Works just like the pandas method.
<pre>Example:
kf.drop_duplicates()
</pre>
"""
df_copy = self.df.copy()
df_copy = df_copy.drop_duplicates(*args)
return Frame(df_copy, self.groups[:])
def complete_cases(self):
"""
Removes all the duplicate rows in the frame.
Works just like the pandas method.
<pre>Example:
kf.complete_cases()
</pre>
"""
df_copy = self.df.copy()
return Frame(df_copy.drop_na(), self.groups[:])
def sort(self, ascending=True, *args):
"""
Sort the data structure based on *args passed in.
Works just like .sort_values in pandas but keeps groups in mind.
<pre>Example:
kf.sort("col1")
kf.sort(["col1", "col2"], ascending=[True, False])
</pre>
"""
df_copy = self.df.copy()
sort_cols = self.groups + [arg for arg in args]
df_sorted = df_copy.sort_values(sort_cols, ascending=ascending)
return Frame(df_sorted, self.groups[:])
def group_by(self, *args):
"""
Add a group to the datastructure. Will have effect on .agg/.sort/.mutate methods.
Calling .agg after grouping will remove it. Otherwise you need to call .ungroup
if you want to remove the grouping on the datastructure.
<pre>Example:
kf.group_by("col1")
kf.group_by("col1", "col2")
</pre>
"""
group_names = [_ for _ in args]
if any([_ not in self.df.columns for _ in group_names]):
raise ValueError("Wrong column name in .group_by method: does not exist.")
return Frame(self.df.copy(), group_names[:])
def ungroup(self):
"""
Removes any group from the datastructure.
"""
return Frame(self.df.copy(), [])
def pipe(self, func, *args, **kwargs):
"""
Pipe the datastructure through a large function. Wrapper of `.pipe` in pandas.
<pre>Example:
def large_function1(frame):
...stuff...
def large_function2(frame):
...stuff...
kf.pipe(large_function1).pipe(large_function2)
</pre>
"""
df_copy = self.df.copy()
new_df = df_copy.pipe(func, *args, **kwargs)
return Frame(new_df, self.groups[:])
def _agg_nogroups(self, **kwargs):
new_df = pd.DataFrame({k: v(self.df) for k, v in kwargs.items()}, index=[0])
return Frame(new_df, [])
def agg(self, **kwargs):
"""
Aggregates the datastructure. Commonly works with .group_by. If no grouping
is present it will just aggregate the entire table.
<pre>Examples:
kd.group_by("col1").agg(m1 = lambda _: np.mean(_['m1']))
(kd
.group_by("col1", "col2")
.agg(m1 = lambda _: np.mean(_['m1']),
m2 = lambda _: np.mean(_['m2']),
c = lambda _: np.cov(_['m1'], _['m2'])[1,1]))
</pre>
"""
if len(self.groups) == 0:
return self._agg_nogroups(**kwargs)
df_copy = self.df.copy()
grouped = df_copy.groupby(self.groups)
res = [grouped.apply(kwargs[_]) for _ in kwargs.keys()]
res = pd.concat(res, axis = 1).reset_index()
res.columns = self.groups + list(kwargs.keys())
return Frame(res, [])
def gather(self, key="key", value="value", **kwargs):
"""
Turns a wide dataframe into a long one. Removes any grouping.
<pre>CURRENTLY UNIMPLEMENTED!</pre>
"""
pass
def spread(self, key="key", value="key", keep=[]):
"""
Turns a long dataframe into a wide one.
<pre>CURRENTLY UNIMPLEMENTED!</pre>
"""
pass
def sample_n(self, n_samples, replace=False):
"""
Samples `n_samples` rows from the datastructure. You can do it with, or without, replacement.
<pre>Example:
kf.n_sample(100)
kf.n_sample(1000, replace = True)
</pre>
"""
df_copy = self.df.copy()
idx = np.arange(df_copy.shape[0])
row_ids = np.random.choice(idx, size=n_samples, replace=replace)
return Frame(df_copy.iloc[row_ids], self.groups[:])
def head(self, n=5):
"""
Mimic of pandas head function. Selects `n` top rows.
<pre>Example:
kf.head(10)
</pre>
"""
return Frame(self.df.copy().head(n), self.groups[:])
def tail(self, n=5):
"""
Mimic of pandas tail function. Selects `n` bottom rows.
<pre>Example:
kf.tail(10)
</pre>
"""
return Frame(self.df.copy().tail(n), self.groups[:])
def slice(self, *args):
"""
Slice away rows of the dataframe based on row number.
Remember; these Frames start at 0.
<pre>Example:
kf.slice(1,2,3)
kf.slice([1,2,3,])
</pre>
"""
if len(args) > 1:
return self.slice(args)
df_copy = self.df.copy()
return Frame(df_copy.iloc[args], self.groups[:])
def _check_join_params(self, other, by):
if not by:
by = set(self.columns).intersection(other.columns)
if len(by) == 0:
raise ValueError("Columns do not overlap!")
for i in by:
if (i not in self.columns) or (i not in other.columns):
raise ValueError("Column {} does not overlap in both datastructures".format(i))
def left_join(self, other, by=None):
"""
Perform a left join with another frame.
<pre>Example:
import pandas as pd
import kadro as kd
df_age = pd.DataFrame({
'name': ['vincent', 'tim', 'anna'],
'age': [28, 30, 25]
})
df_length = pd.DataFrame({
'name': ['vincent', 'tim'],
'length': [188, 172]
})
kd_age = kd.Frame(df_age)
kd_length = kd.Frame(df_length)
kd_age.left_join(kd_length)
</pre>
"""
self._check_join_params(other, by)
new = pd.merge(self.df.copy(), other.df.copy(), how='left', on=by)
return Frame(new, self.groups[:])
def inner_join(self, other, by=None):
"""
Perform an inner join with another frame.
<pre>Example:
import pandas as pd
import kadro as kd
df_age = pd.DataFrame({
'name': ['vincent', 'tim', 'anna'],
'age': [28, 30, 25]
})
df_length = pd.DataFrame({
'name': ['vincent', 'tim'],
'length': [188, 172]
})
kd_age = kd.Frame(df_age)
kd_length = kd.Frame(df_length)
kd_age.inner_join(kd_length)
</pre>
"""
self._check_join_params(other, by)
new = pd.merge(self.df.copy(), other.df.copy(), how='inner', on=by)
return Frame(new, self.groups[:])
|
{"hexsha": "4e95b2da6a9199a4084b0f14adee6cf8aba3d7c8", "size": 11827, "ext": "py", "lang": "Python", "max_stars_repo_path": "kadro/Frame.py", "max_stars_repo_name": "koaning/kadro", "max_stars_repo_head_hexsha": "cbf993e5142d1ade26ac5922d7d15784d56b3db6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-03-01T15:19:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T21:02:27.000Z", "max_issues_repo_path": "kadro/Frame.py", "max_issues_repo_name": "koaning/kadro", "max_issues_repo_head_hexsha": "cbf993e5142d1ade26ac5922d7d15784d56b3db6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2017-02-03T10:13:22.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-28T21:26:10.000Z", "max_forks_repo_path": "kadro/Frame.py", "max_forks_repo_name": "koaning/kadro", "max_forks_repo_head_hexsha": "cbf993e5142d1ade26ac5922d7d15784d56b3db6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-03-20T03:41:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-03T06:00:16.000Z", "avg_line_length": 29.5675, "max_line_length": 101, "alphanum_fraction": 0.5403737211, "include": true, "reason": "import numpy", "num_tokens": 2874}
|
import pandas as pd
from scipy.stats import beta, norm
from scattertext.termranking.OncePerDocFrequencyRanker import OncePerDocFrequencyRanker
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class BetaPosterior(CorpusBasedTermScorer):
'''
Beta Posterior Scoring. Code adapted from
https://github.com/serinachang5/gender-associations/blob/master/score_words.py (Chang 2019).
Serina Chang and Kathleen McKeown. Automatically Inferring Gender Associations from Language. To appear
in Empirical Methods in Natural Language Processing (EMNLP) 2019 (Short Paper).
Method was originally introduced in
David Bamman, Jacob Eisenstein, and Tyler Schnoebelen. GENDER IDENTITY AND LEXICAL VARIATION IN SOCIAL MEDIA. 2014.
Direct quote from Bamman (2014)
Identifying gender markers. Our goal is to identify words that are used with
unusual frequency by authors of a single gender. Assume that each term has an
unknown likelihood fi, indicating the proportion of authors who use term i. For
gender j, there are Nj authors, of whom kji use term i; the total count of the term i
is ki. We ask whether the count kji is significantly larger than expected. Assuming
a non-informative prior distribution on fi, the posterior distribution (conditioned on
the observations ki and N) is Beta(ki, N-ki). The distribution of the gender-specific
counts can be described by an integral over all possible fi. This integral defines the
Beta-Binomial distribution (Gelman, Carlin, Stern, and Rubin 2004), and has a
closed form solution. We mark a term as having a significant gender association if
the cumulative distribution at the count kji is p < .05.
```
>>> term_scorer = BetaPosterior(corpus).set_categories('Positive', ['Negative'], ['Plot']).get_score_df()
```
'''
def __init__(self, corpus, *args, **kwargs):
CorpusBasedTermScorer.__init__(self, corpus, *args, **kwargs)
self.set_term_ranker(OncePerDocFrequencyRanker)
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['score']
def get_score_df(self):
'''
:return: pd.DataFrame
'''
term_freq_df = self.term_ranker_.get_ranks('')
cat_freq_df = pd.DataFrame({
'cat': term_freq_df[self.category_name],
'ncat': term_freq_df[self.not_category_names].sum(axis=1),
})
if self.neutral_category_names:
cat_freq_df['neut'] = term_freq_df[self.neutral_category_names].sum(axis=1)
cat_freq_df['all'] = cat_freq_df.sum(axis=1)
N = cat_freq_df['all'].sum()
catN = cat_freq_df['cat'].sum()
ncatN = cat_freq_df['ncat'].sum()
cat_freq_df['cat_pct'] = cat_freq_df['cat'] * 1. / catN
cat_freq_df['ncat_pct'] = cat_freq_df['ncat'] * 1. / ncatN
def row_beta_posterior(row):
return pd.Series({
'cat_p': beta(row['all'], N - row['all']).sf(row['cat'] * 1. / catN),
'ncat_p': beta(row['all'], N - row['all']).sf(row['ncat'] * 1. / ncatN),
})
p_val_df = cat_freq_df.apply(row_beta_posterior, axis=1)
cat_freq_df['cat_p'] = p_val_df['cat_p']
cat_freq_df['ncat_p'] = p_val_df['ncat_p']
cat_freq_df['cat_z'] = norm.ppf(p_val_df['cat_p'])
cat_freq_df['ncat_z'] = norm.ppf(p_val_df['ncat_p'])
cat_freq_df['score'] = None
cat_freq_df['score'][cat_freq_df['cat_pct'] == cat_freq_df['ncat_pct']] = 0
cat_freq_df['score'][cat_freq_df['cat_pct'] < cat_freq_df['ncat_pct']] = cat_freq_df['ncat_z']
cat_freq_df['score'][cat_freq_df['cat_pct'] > cat_freq_df['ncat_pct']] = -cat_freq_df['cat_z']
return cat_freq_df
def get_name(self):
return "Beta Posterior"
|
{"hexsha": "a5f6a5ff15152f301187cd830a49c1a2de568e21", "size": 3888, "ext": "py", "lang": "Python", "max_stars_repo_path": "scattertext/termscoring/BetaPosterior.py", "max_stars_repo_name": "shettyprithvi/scattertext", "max_stars_repo_head_hexsha": "a15613b6feef3ddc56c03aadb8e1e629d28a427d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1823, "max_stars_repo_stars_event_min_datetime": "2016-07-28T00:25:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:33:57.000Z", "max_issues_repo_path": "scattertext/termscoring/BetaPosterior.py", "max_issues_repo_name": "shettyprithvi/scattertext", "max_issues_repo_head_hexsha": "a15613b6feef3ddc56c03aadb8e1e629d28a427d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 92, "max_issues_repo_issues_event_min_datetime": "2016-07-28T23:13:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-24T03:53:38.000Z", "max_forks_repo_path": "scattertext/termscoring/BetaPosterior.py", "max_forks_repo_name": "shettyprithvi/scattertext", "max_forks_repo_head_hexsha": "a15613b6feef3ddc56c03aadb8e1e629d28a427d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 271, "max_forks_repo_forks_event_min_datetime": "2016-12-26T12:56:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T19:35:13.000Z", "avg_line_length": 42.7252747253, "max_line_length": 120, "alphanum_fraction": 0.6761831276, "include": true, "reason": "from scipy", "num_tokens": 1023}
|
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets
from tqdm import tqdm
from pixelsnail import PixelSNAIL
def train(epoch, loader, model, optimizer, device):
loader = tqdm(loader)
criterion = nn.CrossEntropyLoss()
for i, (img, label) in enumerate(loader):
model.zero_grad()
img = img.to(device)
#out = model(img)
out, _ = model(img)
#loss1 = criterion(output, img)
loss = criterion(out, img)
#loss = loss1 + 0.4*(loss2)
#loss = criterion(out, img)
loss.backward()
optimizer.step()
_, pred = out.max(1)
correct = (pred == img).float()
accuracy = correct.sum() / img.numel()
loader.set_description(
(f'epoch: {epoch + 1}; loss: {loss.item():.5f}; ' f'acc: {accuracy:.5f}')
)
class PixelTransform:
def __init__(self):
pass
def __call__(self, input):
ar = np.array(input)
return torch.from_numpy(ar).long()
if __name__ == '__main__':
device = 'cuda'
epoch = 10
dataset = datasets.MNIST('.', transform=PixelTransform(), download=True)
loader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=4)
model = PixelSNAIL([28, 28], 256, 128, 5, 2, 4, 128)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for i in range(10):
train(i, loader, model, optimizer, device)
torch.save(model.state_dict(), f'checkpoint/mnist_{str(i + 1).zfill(3)}.pt')
|
{"hexsha": "5d5af244159bf5a614ce9d0553ca94e37ce00f00", "size": 1605, "ext": "py", "lang": "Python", "max_stars_repo_path": "pixelsnail_cifar.py", "max_stars_repo_name": "eyalbetzalel/vq-vae-2-pytorch", "max_stars_repo_head_hexsha": "464c683e5049377cba7101156542168d068cf507", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pixelsnail_cifar.py", "max_issues_repo_name": "eyalbetzalel/vq-vae-2-pytorch", "max_issues_repo_head_hexsha": "464c683e5049377cba7101156542168d068cf507", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pixelsnail_cifar.py", "max_forks_repo_name": "eyalbetzalel/vq-vae-2-pytorch", "max_forks_repo_head_hexsha": "464c683e5049377cba7101156542168d068cf507", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3181818182, "max_line_length": 85, "alphanum_fraction": 0.6068535826, "include": true, "reason": "import numpy", "num_tokens": 419}
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
@copyright 2016 J.T. Lapreste
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_FUNCTION_IF_ZERO_ELSE_ONE_HPP_INCLUDED
#define BOOST_SIMD_FUNCTION_IF_ZERO_ELSE_ONE_HPP_INCLUDED
#if defined(DOXYGEN_ONLY)
namespace boost { namespace simd
{
/*!
@ingroup group-boolean
Function object implementing if_zero_else_one capabilities
If cond is @ref True returns @ref Zero else returns one
@par Semantic:
For every parameters of type C:
@code
T r = if_zero_else_one(cond);
@endcode
is similar to:
@code
T r = cond ? Zero : One;
@endcode
@par Note:
The return type is generally C except in the case where C is as_logical_t<T>. in which case
the return type is T.
**/
const boost::dispatch::functor<tag::if_zero_else_one_> if_zero_else_one = {};
} }
#endif
#include <boost/simd/function/scalar/if_zero_else_one.hpp>
#include <boost/simd/function/simd/if_zero_else_one.hpp>
#endif
|
{"hexsha": "9f1c391754116fa0455ebe2b21ef550f6fe87d13", "size": 1312, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/function/if_zero_else_one.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/function/if_zero_else_one.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/function/if_zero_else_one.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2962962963, "max_line_length": 100, "alphanum_fraction": 0.6105182927, "num_tokens": 292}
|
"""
IndicatorSOS1Bridge{T,S<:MOI.AbstractScalarSet}
The `IndicatorSOS1Bridge` replaces an indicator constraint of the following
form:
``z \\in \\mathbb{B}, z == 1 \\implies f(x) \\in S`` with a SOS1 constraint:
``z \\in \\mathbb{B}, slack \\text{ free}, f(x) + slack \\in S, SOS1(slack, z)``.
"""
struct IndicatorSOS1Bridge{T,S<:MOI.AbstractScalarSet} <: AbstractBridge
slack::MOI.VariableIndex
z::MOI.VariableIndex
sos_index::MOI.ConstraintIndex{MOI.VectorOfVariables,MOI.SOS1{T}}
affine_index::MOI.ConstraintIndex{MOI.ScalarAffineFunction{T},S}
end
function bridge_constraint(
::Type{IndicatorSOS1Bridge{T,S}},
model::MOI.ModelLike,
f::MOI.VectorAffineFunction{T},
s::MOI.Indicator{MOI.ACTIVATE_ON_ONE,S},
) where {T<:Real,S}
f_scalars = MOI.Utilities.eachscalar(f)
z = convert(MOI.VariableIndex, f_scalars[1])
slack = MOI.add_variable(model)
sos_index = MOI.add_constraint(
model,
MOI.VectorOfVariables([slack, z]),
MOI.SOS1{T}([0.4, 0.6]), # This weight vector is arbitrary!
)
new_f = MOI.Utilities.operate(+, T, f_scalars[2], slack)
affine_index = MOI.add_constraint(model, new_f, s.set)
return IndicatorSOS1Bridge{T,S}(slack, z, sos_index, affine_index)
end
function MOI.supports_constraint(
::Type{<:IndicatorSOS1Bridge},
::Type{<:MOI.AbstractVectorFunction},
::Type{<:MOI.Indicator{MOI.ACTIVATE_ON_ONE,<:MOI.AbstractScalarSet}},
)
return true
end
function MOI.get(
model::MOI.ModelLike,
attr::MOI.ConstraintSet,
b::IndicatorSOS1Bridge,
)
set = MOI.get(model, attr, b.affine_index)
return MOI.Indicator{MOI.ACTIVATE_ON_ONE}(set)
end
function MOI.get(
model::MOI.ModelLike,
attr::MOI.ConstraintFunction,
b::IndicatorSOS1Bridge{T},
) where {T}
f = MOI.get(model, attr, b.affine_index)
terms = MOI.VectorAffineTerm{T}[
MOI.VectorAffineTerm(2, t) for t in f.terms if t.variable != b.slack
]
push!(terms, MOI.VectorAffineTerm(1, MOI.ScalarAffineTerm(one(T), b.z)))
return MOI.VectorAffineFunction(terms, [zero(T), f.constant])
end
function MOI.delete(model::MOI.ModelLike, bridge::IndicatorSOS1Bridge)
MOI.delete(model, bridge.sos_index)
MOI.delete(model, bridge.affine_index)
MOI.delete(model, bridge.slack)
return
end
function MOI.Bridges.added_constrained_variable_types(
::Type{<:IndicatorSOS1Bridge},
)
return Tuple{Type}[]
end
function MOI.Bridges.added_constraint_types(
::Type{<:IndicatorSOS1Bridge{T,S}},
) where {T,S}
return Tuple{Type,Type}[
(MOI.VectorOfVariables, MOI.SOS1{T}),
(MOI.ScalarAffineFunction{T}, S),
]
end
function concrete_bridge_type(
::Type{<:IndicatorSOS1Bridge{T}},
::Type{<:MOI.AbstractVectorFunction},
::Type{MOI.Indicator{MOI.ACTIVATE_ON_ONE,S}},
) where {T,S}
return IndicatorSOS1Bridge{T,S}
end
MOI.get(::IndicatorSOS1Bridge, ::MOI.NumberOfVariables)::Int64 = 1
function MOI.get(b::IndicatorSOS1Bridge, ::MOI.ListOfVariableIndices)
return [b.slack]
end
function MOI.get(
::IndicatorSOS1Bridge,
::MOI.NumberOfConstraints{MOI.VectorOfVariables,<:MOI.SOS1},
)::Int64
return 1
end
function MOI.get(
::IndicatorSOS1Bridge{T,S},
::MOI.NumberOfConstraints{MOI.ScalarAffineFunction{T},S},
)::Int64 where {T,S}
return 1
end
function MOI.get(
b::IndicatorSOS1Bridge{T},
::MOI.ListOfConstraintIndices{MOI.VectorOfVariables,<:MOI.SOS1},
) where {T}
return [b.sos_index]
end
function MOI.get(
b::IndicatorSOS1Bridge{T,S},
::MOI.ListOfConstraintIndices{MOI.ScalarAffineFunction{T},S},
) where {T,S}
return [b.affine_index]
end
function MOI.get(
model::MOI.ModelLike,
attr::MOI.ConstraintPrimal,
bridge::IndicatorSOS1Bridge,
)
z = MOI.get(model, MOI.VariablePrimal(attr.result_index), bridge.z)
w = MOI.get(model, MOI.VariablePrimal(attr.result_index), bridge.slack)
f = MOI.get(model, attr, bridge.affine_index)
return [z, f - w]
end
function MOI.supports(
model::MOI.ModelLike,
attr::MOI.ConstraintPrimalStart,
::Type{IndicatorSOS1Bridge{T,S}},
) where {T,S}
ci = MOI.ConstraintIndex{MOI.ScalarAffineFunction{T},S}
return MOI.supports(model, MOI.VariablePrimalStart(), MOI.VariableIndex) &&
MOI.supports(model, attr, ci)
end
function MOI.get(
model::MOI.ModelLike,
attr::MOI.ConstraintPrimalStart,
bridge::IndicatorSOS1Bridge,
)
z = MOI.get(model, MOI.VariablePrimalStart(), bridge.z)
w = MOI.get(model, MOI.VariablePrimalStart(), bridge.slack)
f = MOI.get(model, attr, bridge.affine_index)
return [z, f - w]
end
function MOI.set(
model::MOI.ModelLike,
attr::MOI.ConstraintPrimalStart,
bridge::IndicatorSOS1Bridge{T},
value::AbstractVector,
) where {T}
@assert length(value) == 2
MOI.set(model, MOI.VariablePrimalStart(), bridge.z, value[1])
w = something(
MOI.get(model, MOI.VariablePrimalStart(), bridge.slack),
zero(T),
)
MOI.set(model, attr, bridge.affine_index, value[2] + w)
return
end
|
{"hexsha": "0a067a7e00b453f9185a5dbc761c82c3285ae652", "size": 5067, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Bridges/Constraint/indicator_sos.jl", "max_stars_repo_name": "manuelbb-upb/MathOptInterface.jl", "max_stars_repo_head_hexsha": "54b6bcb723acb2b2d79584e2f27ea56fd4c7777c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 132, "max_stars_repo_stars_event_min_datetime": "2020-06-20T00:45:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:06:34.000Z", "max_issues_repo_path": "src/Bridges/Constraint/indicator_sos.jl", "max_issues_repo_name": "manuelbb-upb/MathOptInterface.jl", "max_issues_repo_head_hexsha": "54b6bcb723acb2b2d79584e2f27ea56fd4c7777c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 460, "max_issues_repo_issues_event_min_datetime": "2020-06-08T14:12:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T19:03:39.000Z", "max_forks_repo_path": "src/Bridges/Constraint/indicator_sos.jl", "max_forks_repo_name": "guilhermebodin/MathOptInterface.jl", "max_forks_repo_head_hexsha": "6b50fa0f9e31ef97a06facaad60af7d0b6ae3d2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2020-06-08T01:45:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T10:36:25.000Z", "avg_line_length": 28.7897727273, "max_line_length": 81, "alphanum_fraction": 0.6929149398, "num_tokens": 1553}
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import os
import math
def undesired_objects(image):
image = image.astype('uint8')
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(image, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
return img2
pathx = 'X/XX/XXXX/XXX'
filename = os.listdir(pathx)
for name in filename:
newpath= pathx+'/'+name
orignal =cv.imread(newpath)
gray = cv.cvtColor(orignal, cv.COLOR_BGR2GRAY)
ret3, thresh2 = cv.threshold(gray, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)
thresh2 = cv.erode(thresh2, np.ones(5))
img2=1-(undesired_objects(thresh2)/255)
img2=np.uint8(img2)
# ret3, img2 = cv.threshold(img2, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)
# print(img2[0][0])
(components, _) = cv.findContours(img2, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for c in components:
if cv.contourArea(c) < 4000:
continue
currBox = cv.boundingRect(c) # returns (x, y, w, h)
(x, y, w, h) = currBox
currImg = orignal[y:y+h, x:x+w]
cv.namedWindow('final Segemented word', 0)
cv.imshow("final Segemented word",currImg)
cv.imshow("img",thresh2)
cv.waitKey(0)
# skew correction
def deskew(image):
coords = np.column_stack(np.where(image > 0))
angle = cv.minAreaRect(coords)[-1]
if(angle < -45):
angle = -(90 + angle)
else:
angle = -angle
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv.getRotationMatrix2D(center, angle, 1.0)
rotated = cv.warpAffine(image, M, (w, h), flags=cv.INTER_CUBIC, borderMode=cv.BORDER_REPLICATE)
return rotated
|
{"hexsha": "dfa1e9010884230981ab22df339c7bef3f05a71e", "size": 1987, "ext": "py", "lang": "Python", "max_stars_repo_path": "Helpers/connectedcomponents.py", "max_stars_repo_name": "AsianZeus/Digitalizing-Prescription-Image", "max_stars_repo_head_hexsha": "3e102e5fd5a6e8427153ae57c94a698b0abc06a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Helpers/connectedcomponents.py", "max_issues_repo_name": "AsianZeus/Digitalizing-Prescription-Image", "max_issues_repo_head_hexsha": "3e102e5fd5a6e8427153ae57c94a698b0abc06a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Helpers/connectedcomponents.py", "max_forks_repo_name": "AsianZeus/Digitalizing-Prescription-Image", "max_forks_repo_head_hexsha": "3e102e5fd5a6e8427153ae57c94a698b0abc06a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5396825397, "max_line_length": 101, "alphanum_fraction": 0.6114745848, "include": true, "reason": "import numpy", "num_tokens": 584}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from ...utils.testing import requires_dependency
from ...utils.random import get_random_state
from ... import stats as gammapy_stats
# TODO; change to a test dataset that doesn't use random values (just list the numbers)
# and use `pytest.fixture` to use it in the tests.
def get_test_data():
random_state = get_random_state(3)
# put factor to 100 to not run into special cases in WStat
model = random_state.rand(10) * 100
data = random_state.poisson(model)
staterror = np.sqrt(data)
off_vec = random_state.poisson(0.7 * model)
return data, model, staterror, off_vec
# TODO : Produce reference numbers outside of test (avoid sherpa dependency)
# Note: There is an independent implementation of the XSPEC wstat that can
# be used for debugging: gammapy/dev/sherpa/stats/xspec_stats.py
# Also there is the script dev/sherpa/stats/compare_stats.py that is very
# usefull for debugging
@requires_dependency('sherpa')
def test_cstat():
import sherpa.stats as ss
sherpa_stat = ss.CStat()
data, model, staterror, off_vec = get_test_data()
desired, fvec = sherpa_stat.calc_stat(data, model, staterror=staterror)
statsvec = gammapy_stats.cstat(n_on=data, mu_on=model)
actual = np.sum(statsvec)
assert_allclose(actual, desired)
@requires_dependency('sherpa')
def test_cash():
import sherpa.stats as ss
sherpa_stat = ss.Cash()
data, model, staterror, off_vec = get_test_data()
desired, fvec = sherpa_stat.calc_stat(data, model, staterror=staterror)
statsvec = gammapy_stats.cash(n_on=data, mu_on=model)
actual = np.sum(statsvec)
assert_allclose(actual, desired)
@requires_dependency('sherpa')
def test_wstat():
import sherpa.stats as ss
sherpa_stat = ss.WStat()
data, model, staterror, off_vec = get_test_data()
alpha = np.ones(len(data)) * 0.2
statsvec = gammapy_stats.wstat(n_on=data,
mu_sig=model,
n_off=off_vec,
alpha=alpha,
extra_terms=True)
# This is how sherpa wants the background (found by trial and error)
bkg = dict(bkg=off_vec,
exposure_time=[1, 1],
backscale_ratio=1. / alpha,
data_size=len(data)
)
# Check for one bin first
test_bin = 0
bkg_testbin = dict(bkg=off_vec[test_bin],
exposure_time=[1, 1],
backscale_ratio=1. / alpha[test_bin],
data_size=1)
desired_testbin, fvec = sherpa_stat.calc_stat(data[test_bin],
model[test_bin],
staterror=staterror[test_bin],
bkg=bkg_testbin)
actual_testbin = statsvec[test_bin]
# assert_allclose(actual_testbin, desired_testbin)
# Now check total stat for all bins
desired, fvec = sherpa_stat.calc_stat(data, model, staterror=staterror,
bkg=bkg)
actual = np.sum(statsvec)
print(fvec)
assert_allclose(actual, desired)
|
{"hexsha": "de9b6d05dd7f9cf02259b4fff3a7faeed38956c4", "size": 3448, "ext": "py", "lang": "Python", "max_stars_repo_path": "gammapy/stats/tests/test_fit_statistics.py", "max_stars_repo_name": "grburgess/gammapy", "max_stars_repo_head_hexsha": "609e460698caca7223afeef5e71826c7b32728d1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-01-28T12:21:14.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-10T19:58:07.000Z", "max_issues_repo_path": "gammapy/stats/tests/test_fit_statistics.py", "max_issues_repo_name": "grburgess/gammapy", "max_issues_repo_head_hexsha": "609e460698caca7223afeef5e71826c7b32728d1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gammapy/stats/tests/test_fit_statistics.py", "max_forks_repo_name": "grburgess/gammapy", "max_forks_repo_head_hexsha": "609e460698caca7223afeef5e71826c7b32728d1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2947368421, "max_line_length": 87, "alphanum_fraction": 0.6383410673, "include": true, "reason": "import numpy,from numpy,from astropy", "num_tokens": 807}
|
[STATEMENT]
lemma secureTT_iff_secure': "Orig.secureTT \<longleftrightarrow> Prime.secure"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Orig.secureTT = secure
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Orig.secureTT \<Longrightarrow> secure
2. secure \<Longrightarrow> Orig.secureTT
[PROOF STEP]
assume secure: "Orig.secureTT"
[PROOF STATE]
proof (state)
this:
Orig.secureTT
goal (2 subgoals):
1. Orig.secureTT \<Longrightarrow> secure
2. secure \<Longrightarrow> Orig.secureTT
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Orig.secureTT
[PROOF STEP]
show "Prime.secure"
[PROOF STATE]
proof (prove)
using this:
Orig.secureTT
goal (1 subgoal):
1. secure
[PROOF STEP]
proof (unfold Prime.secure_def, intro allI impI, elim conjE)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>Orig.secureTT; Orig.secureTT; Orig.secureTT; Orig.validFrom istate tr; never (\<lambda>a. False) tr; V tr = vl; B (these vl) (these vl1); never Option.is_none vl; never Option.is_none vl1\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
[PROOF STEP]
fix tr vl vl1
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>Orig.secureTT; Orig.secureTT; Orig.secureTT; Orig.validFrom istate tr; never (\<lambda>a. False) tr; V tr = vl; B (these vl) (these vl1); never Option.is_none vl; never Option.is_none vl1\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
[PROOF STEP]
assume tr: "Orig.validFrom istate tr" and V: "V tr = vl" and B: "B (these vl) (these vl1)"
and vl: "never Option.is_none vl" and vl1: "never Option.is_none vl1"
[PROOF STATE]
proof (state)
this:
Orig.validFrom istate tr
V tr = vl
B (these vl) (these vl1)
never Option.is_none vl
never Option.is_none vl1
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>Orig.secureTT; Orig.secureTT; Orig.secureTT; Orig.validFrom istate tr; never (\<lambda>a. False) tr; V tr = vl; B (these vl) (these vl1); never Option.is_none vl; never Option.is_none vl1\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
[PROOF STEP]
with secure
[PROOF STATE]
proof (chain)
picking this:
Orig.secureTT
Orig.validFrom istate tr
V tr = vl
B (these vl) (these vl1)
never Option.is_none vl
never Option.is_none vl1
[PROOF STEP]
obtain tr1 where "Orig.validFrom istate tr1" and "never T tr1"
and "Prime.O tr1 = Prime.O tr" and "Orig.V tr1 = these vl1"
[PROOF STATE]
proof (prove)
using this:
Orig.secureTT
Orig.validFrom istate tr
V tr = vl
B (these vl) (these vl1)
never Option.is_none vl
never Option.is_none vl1
goal (1 subgoal):
1. (\<And>tr1. \<lbrakk>Orig.validFrom istate tr1; never T tr1; O tr1 = O tr; Orig.V tr1 = these vl1\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (elim Orig.secureTT_E) (auto simp: V'_V)
[PROOF STATE]
proof (state)
this:
Orig.validFrom istate tr1
never T tr1
O tr1 = O tr
Orig.V tr1 = these vl1
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>Orig.secureTT; Orig.secureTT; Orig.secureTT; Orig.validFrom istate tr; never (\<lambda>a. False) tr; V tr = vl; B (these vl) (these vl1); never Option.is_none vl; never Option.is_none vl1\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Orig.validFrom istate tr1
never T tr1
O tr1 = O tr
Orig.V tr1 = these vl1
[PROOF STEP]
show "\<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1"
[PROOF STATE]
proof (prove)
using this:
Orig.validFrom istate tr1
never T tr1
O tr1 = O tr
Orig.V tr1 = these vl1
goal (1 subgoal):
1. \<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
[PROOF STEP]
using vl1
[PROOF STATE]
proof (prove)
using this:
Orig.validFrom istate tr1
never T tr1
O tr1 = O tr
Orig.V tr1 = these vl1
never Option.is_none vl1
goal (1 subgoal):
1. \<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
[PROOF STEP]
by (intro exI[of _ tr1]) (auto simp: V'_V map_Some_these iff: list_all_iff)
[PROOF STATE]
proof (state)
this:
\<exists>tr1. Orig.validFrom istate tr1 \<and> O tr1 = O tr \<and> V tr1 = vl1
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
secure
goal (1 subgoal):
1. secure \<Longrightarrow> Orig.secureTT
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. secure \<Longrightarrow> Orig.secureTT
[PROOF STEP]
assume secure': "Prime.secure"
[PROOF STATE]
proof (state)
this:
secure
goal (1 subgoal):
1. secure \<Longrightarrow> Orig.secureTT
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
secure
[PROOF STEP]
show "Orig.secureTT"
[PROOF STATE]
proof (prove)
using this:
secure
goal (1 subgoal):
1. Orig.secureTT
[PROOF STEP]
proof (unfold Orig.secureTT_def, intro allI impI, elim conjE)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>secure; secure; secure; Orig.validFrom istate tr; never T tr; B vl vl1; Orig.V tr = vl\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
fix tr vl vl1
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>secure; secure; secure; Orig.validFrom istate tr; never T tr; B vl vl1; Orig.V tr = vl\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
assume "Orig.validFrom istate tr" and "never T tr" and "B vl vl1" and "Orig.V tr = vl"
[PROOF STATE]
proof (state)
this:
Orig.validFrom istate tr
never T tr
B vl vl1
Orig.V tr = vl
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>secure; secure; secure; Orig.validFrom istate tr; never T tr; B vl vl1; Orig.V tr = vl\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
with secure'
[PROOF STATE]
proof (chain)
picking this:
secure
Orig.validFrom istate tr
never T tr
B vl vl1
Orig.V tr = vl
[PROOF STEP]
obtain tr1 where "Orig.validFrom istate tr1" and "Prime.O tr1 = Prime.O tr"
and V: "Prime.V tr1 = map Some vl1"
[PROOF STATE]
proof (prove)
using this:
secure
Orig.validFrom istate tr
never T tr
B vl vl1
Orig.V tr = vl
goal (1 subgoal):
1. (\<And>tr1. \<lbrakk>Orig.validFrom istate tr1; O tr1 = O tr; V tr1 = map Some vl1\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (elim Prime.secure_E) (auto iff: V'_V list_all_iff)
[PROOF STATE]
proof (state)
this:
Orig.validFrom istate tr1
O tr1 = O tr
V tr1 = map Some vl1
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>secure; secure; secure; Orig.validFrom istate tr; never T tr; B vl vl1; Orig.V tr = vl\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Orig.validFrom istate tr1
O tr1 = O tr
V tr1 = map Some vl1
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>secure; secure; secure; Orig.validFrom istate tr; never T tr; B vl vl1; Orig.V tr = vl\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
have "never T tr1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. never T tr1
[PROOF STEP]
using V
[PROOF STATE]
proof (prove)
using this:
V tr1 = map Some vl1
goal (1 subgoal):
1. never T tr1
[PROOF STEP]
by (intro V_Some_never_T)
[PROOF STATE]
proof (state)
this:
never T tr1
goal (1 subgoal):
1. \<And>tr vl vl1. \<lbrakk>secure; secure; secure; Orig.validFrom istate tr; never T tr; B vl vl1; Orig.V tr = vl\<rbrakk> \<Longrightarrow> \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Orig.validFrom istate tr1
O tr1 = O tr
V tr1 = map Some vl1
never T tr1
[PROOF STEP]
show "\<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1"
[PROOF STATE]
proof (prove)
using this:
Orig.validFrom istate tr1
O tr1 = O tr
V tr1 = map Some vl1
never T tr1
goal (1 subgoal):
1. \<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
[PROOF STEP]
by (intro exI[of _ tr1]) (auto simp: V'_V)
[PROOF STATE]
proof (state)
this:
\<exists>tr1. Orig.validFrom istate tr1 \<and> never T tr1 \<and> O tr1 = O tr \<and> Orig.V tr1 = vl1
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
Orig.secureTT
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3629, "file": "Bounded_Deducibility_Security_BD_Security_Triggers", "length": 34}
|
! { dg-do compile }
! { dg-options "-std=f2003" }
program a
implicit none
integer n, m(3,3)
integer(kind=8) k
integer, allocatable :: i(:), j(:)
real, allocatable :: x(:)
n = 42
m = n
k = 1_8
allocate(i(4), source=42, source=n) ! { dg-error "Redundant SOURCE tag found" }
allocate(integer(4) :: i(4), source=n) ! { dg-error "conflicts with the typespec" }
allocate(i(4), j(n), source=n) ! { dg-error "Fortran 2008: SOURCE tag at .1. with more than a single allocate object" }
allocate(x(4), source=n) ! { dg-error "type incompatible with" }
allocate(i(4), source=m) ! { dg-error "must be scalar or have the same rank" }
allocate(i(4), source=k) ! { dg-error "shall have the same kind type" }
end program a
|
{"hexsha": "ee6c3635912dbe8d2805cf2779f1c6f0ca98c3f0", "size": 744, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/allocate_alloc_opt_4.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/allocate_alloc_opt_4.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/allocate_alloc_opt_4.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 25.6551724138, "max_line_length": 121, "alphanum_fraction": 0.626344086, "num_tokens": 242}
|
import pyrender
import os
import trimesh
import numpy as np
import cv2
import os
import torch
from scipy.spatial.transform import Rotation as R
os.environ['PYOPENGL_PLATFORM'] = 'egl'
def to_homo(rotation, translation):
transform = np.eye(4)
transform[:3, :3] = rotation
transform[:3, 3] = translation
return transform
def get_corners(mesh, intrinsic, rotation, translation):
box = mesh.bounding_box.to_mesh()
vertices = np.array(box.vertices)
proj = intrinsic @ (rotation @ vertices.T + translation[:, np.newaxis])
proj[0] = proj[0] / proj[2]
proj[1] = proj[1] / proj[2]
return proj[:2].T
def add_pose_contour(mesh, intrinsic, rotation, translation, color, image, img_scaling=4, thickness=1):
image = np.copy(image)
height, width, _ = image.shape
vs = get_corners(mesh, intrinsic, rotation, translation) / img_scaling
ps = [(int(vs[i, 0]), int(vs[i, 1])) for i in range(vs.shape[0])]
# z direction
for i in range(4):
cv2.line(image, ps[2 * i], ps[2 * i + 1], color, thickness=thickness)
# y direction
for j in range(2):
for i in range(2):
cv2.line(image, ps[i + 4 * j], ps[i + 2 + 4 * j], color, thickness=thickness)
# x direction
for i in range(4):
cv2.line(image, ps[i], ps[i + 4], color, thickness=thickness)
return image
def quaternion2rotation(quat):
'''
Do not use the quat2dcm() function in the SPEED utils.py, it is not rotation
'''
assert (len(quat) == 4)
# normalize first
quat = quat / np.linalg.norm(quat)
a, b, c, d = quat
a2 = a * a
b2 = b * b
c2 = c * c
d2 = d * d
ab = a * b
ac = a * c
ad = a * d
bc = b * c
bd = b * d
cd = c * d
# s = a2 + b2 + c2 + d2
m0 = a2 + b2 - c2 - d2
m1 = 2 * (bc - ad)
m2 = 2 * (bd + ac)
m3 = 2 * (bc + ad)
m4 = a2 - b2 + c2 - d2
m5 = 2 * (cd - ab)
m6 = 2 * (bd - ac)
m7 = 2 * (cd + ab)
m8 = a2 - b2 - c2 + d2
return np.array([m0, m1, m2, m3, m4, m5, m6, m7, m8]).reshape(3, 3)
class Renderer:
def __init__(self, synthetic=False):
self.synthetic = synthetic
os.environ['PYOPENGL_PLATFORM'] = 'egl'
tscene = trimesh.load('/cvlabdata2/cvlab/datasets_protopap/deepim/data/models/swisscube/swisscube.obj')
mesh = pyrender.Mesh.from_trimesh(list(tscene.geometry.values()), smooth=False)
if synthetic:
width, height = 1024, 1024
else:
width, height = 2048, 2048
self.renderer = pyrender.OffscreenRenderer(viewport_width=width, viewport_height=height, point_size=1.0)
scene = pyrender.Scene(ambient_light=[0.02, 0.02, 0.02], bg_color=[0, 0, 0])
light = pyrender.PointLight(color=[1.0, 1.0, 1.0], intensity=1000000.0)
if synthetic:
fx, fy, cx, cy = 607, 607, 512, 512
else:
fx, fy, cx, cy = 4000, 4000, 1024, 1024
self.intrinsic = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape((3, 3))
cam = pyrender.IntrinsicsCamera(fx, fy, cx, cy, zfar=2000)
cam_rot = R.from_euler('y', 180, degrees=True).as_matrix()
cam_matrix = to_homo(cam_rot, np.zeros((3,)))
self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))
nl = pyrender.Node(light=light, matrix=np.eye(4))
nc = pyrender.Node(camera=cam, matrix=cam_matrix)
scene.add_node(self.nm)
scene.add_node(nl)
scene.add_node(nc)
self.scene = scene
def set_light_pos(self, *args):
pass
def set_light_color(self, *args):
pass
def set_projection_matrix(self, width, height, fx, fy, px, py, znear, zfar):
pass
def set_poses(self, poses):
self.set_pose(poses[0])
def set_pose(self, pose):
pose = np.array(pose)
if pose.shape[0] == 9:
pose = pose[2:]
translation, rotation_quat = pose[4:], pose[:4]
translation = np.array(translation)
if not self.synthetic:
rotation = quaternion2rotation(rotation_quat)
else:
rotation = R.from_quat(rotation_quat).as_matrix()
rotation = rotation @ R.from_euler('x', 90, degrees=True).as_matrix()
transform = to_homo(rotation, translation)
self.scene.set_pose(self.nm, pose=transform)
def render_(self):
color, depth = self.renderer.render(self.scene)
color = cv2.resize(color, (640, 640), cv2.INTER_AREA)
color = color[80:560]
depth = cv2.resize(depth, (640, 640), cv2.INTER_AREA)
depth = depth[80:560]
return np.flip(color, (0, 1)).copy(), np.flip(depth, (0, 1)).copy()
def render(self, image_tensor):
rgb = self.render_()
tensor = torch.from_numpy(np.transpose(rgb, (2, 0, 1)))
image_tensor.copy_(tensor)
def get_next(self, iteritems):
if not self.synthetic:
img, pose = next(iteritems)
img = os.path.join(img.split('/')[0], 'Test', img.split('/')[1])
img = cv2.imread(img)
img = cv2.resize(img, (640, 640), cv2.INTER_AREA)
img = img[80:560]
rotation = pose['rotation_m2c']
translation = pose['translation_m2c']
else:
img_path = next(iteritems).strip()
full_path = os.path.join('/cvlabdata2/home/yhu/data/SwissCube_1.0', img_path)
num = str(int(os.path.splitext(os.path.basename(full_path))[0]))
img = cv2.imread(full_path)
img = cv2.resize(img, (640, 640), cv2.INTER_AREA)
img = img[80:560]
seq_name = os.path.dirname(os.path.dirname(full_path))
poses_name = os.path.join(seq_name, 'scene_gt.json')
with open(poses_name, 'r') as j:
poses = json.load(j)
pose = poses[num][0]
translation = np.array(pose['cam_t_m2c'])
rotation = np.array(pose['cam_R_m2c']).reshape((3, 3))
rotation = R.from_mat(rotation).as_matrix()
return img, translation, rotation
if __name__ == '__main__':
r = Renderer(True)
import os
import json
if r.synthetic:
os.chdir('/cvlabdata2/home/yhu/data/SwissCube_1.0')
with open('training.txt', 'r') as f:
images = f.readlines()
iteritems = iter(images)
else:
os.chdir('/cvlabdata2/cvlab/datasets_protopap/SwissCubeReal')
with open('data.json', 'r') as f:
poses = json.load(f)
iteritems = iter(poses.items())
img, translation, rotation = r.get_next(iteritems)
translation = np.array(translation)
cv2.imshow('image', img)
x, y, z = translation
dx, dy, dz = 0, 0, 0
while True:
rotation = R.from_quat(rotation).as_matrix() @ R.from_euler('xyz', [dx, dy, dz], degrees=True).as_matrix()
rotation = R.from_matrix(rotation).as_quat()
a, b, c, d = rotation
pose = [a, b, c, d, x, y, z]
r.set_pose(pose)
color, depth = r.render_()
print(depth.max())
cv2.imshow('render', color)
cv2.imshow('depth', depth)
key = cv2.waitKey(0)
if key == 27:
break
elif key == 13:
img, translation, rotation = r.get_next(iteritems)
x, y, z = translation
a, b, c, d = rotation
cv2.imshow('image', img)
elif key == 119:
y += 10
elif key == 115:
y -= 10
elif key == 97:
x -= 10
elif key == 100:
x += 10
elif key == 101:
z += 10
elif key == 113:
z -= 10
elif key == 81:
dy -= 15
elif key == 83:
dy += 15
elif key == 82:
dx -= 15
elif key == 84:
dx += 15
elif key == 85:
dz += 15
elif key == 86:
dz -= 15
|
{"hexsha": "3a4dc6e029a35e3d0c046bf603883d099d674367", "size": 8167, "ext": "py", "lang": "Python", "max_stars_repo_path": "render_swisscube.py", "max_stars_repo_name": "Komod0D/eccv18-rgb_pose_refinement", "max_stars_repo_head_hexsha": "1662e044e806b5d7d8fc0d09fd6f0da333dba663", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "render_swisscube.py", "max_issues_repo_name": "Komod0D/eccv18-rgb_pose_refinement", "max_issues_repo_head_hexsha": "1662e044e806b5d7d8fc0d09fd6f0da333dba663", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "render_swisscube.py", "max_forks_repo_name": "Komod0D/eccv18-rgb_pose_refinement", "max_forks_repo_head_hexsha": "1662e044e806b5d7d8fc0d09fd6f0da333dba663", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9157509158, "max_line_length": 114, "alphanum_fraction": 0.5398555161, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2332}
|
[STATEMENT]
lemma sources_sinks_aux:
"sources_aux I D U xs = sinks_aux (I\<inverse>) D U (rev xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sources_aux I D U xs = sinks_aux (I\<inverse>) D U (rev xs)
[PROOF STEP]
by (induction xs, simp_all)
|
{"llama_tokens": 107, "file": "Noninterference_Ipurge_Unwinding_IpurgeUnwinding", "length": 1}
|
// -*- mode: c++; indent-tabs-mode: nil; -*-
//
// Copyright (c) 2009-2013 Illumina, Inc.
//
// This software is provided under the terms and conditions of the
// Illumina Open Source Software License 1.
//
// You should have received a copy of the Illumina Open Source
// Software License 1 along with this program. If not, see
// <https://github.com/sequencing/licenses/>
//
/// \file
/// \author Chris Saunders
///
#ifndef __POSITION_SOMATIC_SNV_HH
#define __POSITION_SOMATIC_SNV_HH
#include "blt_common/snp_pos_info.hh"
#include "blt_common/position_snp_call_pprob_digt.hh"
#include "strelka/strelka_shared.hh"
#include "blt_util/qscore.hh"
#include <boost/utility.hpp>
#include <iosfwd>
//#define SOMATIC_DEBUG
namespace DDIGT {
enum index_t { SIZE = DIGT::SIZE*DIGT::SIZE };
inline
unsigned
get_state(const unsigned normal_gt,
const unsigned tumor_gt) {
return normal_gt+DIGT::SIZE*tumor_gt;
}
inline
void
get_digt_states(const unsigned dgt,
unsigned& normal_gt,
unsigned& tumor_gt) {
normal_gt = (dgt%DIGT::SIZE);
tumor_gt = (dgt/DIGT::SIZE);
}
}
std::ostream& operator<<(std::ostream& os,const DDIGT::index_t dgt);
struct somatic_snv_genotype : private boost::noncopyable {
somatic_snv_genotype()
: is_snv(false), tier(0), ref_gt(0) {}
struct result_set {
result_set()
: max_gt(0), pprob(DDIGT::SIZE)
{
static const blt_float_t p(1./static_cast<blt_float_t>(DDIGT::SIZE));
static const int qp(error_prob_to_qphred((1.-p)));
snv_qphred=qp;
snv_from_ref_qphred=qp;
snv_from_het_qphred=qp;
snv_from_het_loh_qphred=qp;
snv_from_het_nonloh_qphred=qp;
snv_from_hom_qphred=qp;
snv_from_anyhom_qphred=qp;
max_gt_qphred=qp;
for (unsigned i(0); i<DDIGT::SIZE; ++i) {
pprob[i] = p;
}
}
// TODO: add marginal normal/tumor genotypes
unsigned max_gt;
int snv_qphred;
int snv_from_ref_qphred;
int snv_from_het_qphred;
int snv_from_het_loh_qphred;
int snv_from_het_nonloh_qphred;
int snv_from_hom_qphred;
int snv_from_anyhom_qphred;
int max_gt_qphred;
std::vector<blt_float_t> pprob;
};
bool is_snv;
unsigned tier;
unsigned ref_gt;
result_set genome;
result_set poly;
};
// snv call output:
//
void
write_somatic_snv_genotype(const strelka_options& opt,
const somatic_snv_genotype& sgt,
const snp_pos_info& normal_pi,
const snp_pos_info& tumor_pi,
std::ostream& os);
// object used to pre-compute priors:
struct somatic_snv_caller {
somatic_snv_caller(const strelka_options& opt,
const pprob_digt_caller& pd_caller);
//
void
position_somatic_snv_call(const extended_pos_info& normal_epi,
const extended_pos_info& tumor_epi,
const extended_pos_info* normal_epi_t2_ptr,
const extended_pos_info* tumor_epi_t2_ptr,
somatic_snv_genotype& sgt) const;
private:
const blt_float_t*
lnprior_genomic(const unsigned ref_id) const {
return _lnprior[ref_id].genome;
}
const blt_float_t*
lnprior_polymorphic(const unsigned ref_id) const {
return _lnprior[ref_id].poly;
}
struct prior_set {
blt_float_t genome[DDIGT::SIZE];
blt_float_t poly[DDIGT::SIZE];
};
const strelka_options& _opt;
prior_set _lnprior[N_BASE+1];
};
#endif
|
{"hexsha": "5ac574749208f6dad00a719f278868733c239f44", "size": 3756, "ext": "hh", "lang": "C++", "max_stars_repo_path": "isaac_variant_caller/src/lib/strelka/position_somatic_snv.hh", "max_stars_repo_name": "sequencing/isaac_variant_caller", "max_stars_repo_head_hexsha": "ed24e20b097ee04629f61014d3b81a6ea902c66b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 21.0, "max_stars_repo_stars_event_min_datetime": "2015-01-09T01:11:28.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-04T03:48:21.000Z", "max_issues_repo_path": "isaac_variant_caller/src/lib/strelka/position_somatic_snv.hh", "max_issues_repo_name": "sequencing/isaac_variant_caller", "max_issues_repo_head_hexsha": "ed24e20b097ee04629f61014d3b81a6ea902c66b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2015-07-23T09:38:39.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-01T05:37:26.000Z", "max_forks_repo_path": "isaac_variant_caller/src/lib/strelka/position_somatic_snv.hh", "max_forks_repo_name": "sequencing/isaac_variant_caller", "max_forks_repo_head_hexsha": "ed24e20b097ee04629f61014d3b81a6ea902c66b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2015-01-29T16:41:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-25T02:42:32.000Z", "avg_line_length": 24.5490196078, "max_line_length": 81, "alphanum_fraction": 0.6256656017, "num_tokens": 995}
|
/-
Copyright (c) 2020 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import measure_theory.measure.measure_space
import measure_theory.measure.regular
import topology.opens
import topology.compacts
/-!
# Contents
In this file we work with *contents*. A content `λ` is a function from a certain class of subsets
(such as the compact subsets) to `ℝ≥0` that is
* additive: If `K₁` and `K₂` are disjoint sets in the domain of `λ`,
then `λ(K₁ ∪ K₂) = λ(K₁) + λ(K₂)`;
* subadditive: If `K₁` and `K₂` are in the domain of `λ`, then `λ(K₁ ∪ K₂) ≤ λ(K₁) + λ(K₂)`;
* monotone: If `K₁ ⊆ K₂` are in the domain of `λ`, then `λ(K₁) ≤ λ(K₂)`.
We show that:
* Given a content `λ` on compact sets, let us define a function `λ*` on open sets, by letting
`λ* U` be the supremum of `λ K` for `K` included in `U`. This is a countably subadditive map that
vanishes at `∅`. In Halmos (1950) this is called the *inner content* `λ*` of `λ`, and formalized
as `inner_content`.
* Given an inner content, we define an outer measure `μ*`, by letting `μ* E` be the infimum of
`λ* U` over the open sets `U` containing `E`. This is indeed an outer measure. It is formalized
as `outer_measure`.
* Restricting this outer measure to Borel sets gives a regular measure `μ`.
We define bundled contents as `content`.
In this file we only work on contents on compact sets, and inner contents on open sets, and both
contents and inner contents map into the extended nonnegative reals. However, in other applications
other choices can be made, and it is not a priori clear what the best interface should be.
## Main definitions
For `μ : content G`, we define
* `μ.inner_content` : the inner content associated to `μ`.
* `μ.outer_measure` : the outer measure associated to `μ`.
* `μ.measure` : the Borel measure associated to `μ`.
We prove that, on a locally compact space, the measure `μ.measure` is regular.
## References
* Paul Halmos (1950), Measure Theory, §53
* <https://en.wikipedia.org/wiki/Content_(measure_theory)>
-/
universes u v w
noncomputable theory
open set topological_space
open_locale nnreal ennreal
namespace measure_theory
variables {G : Type w} [topological_space G]
/-- A content is an additive function on compact sets taking values in `ℝ≥0`. It is a device
from which one can define a measure. -/
structure content (G : Type w) [topological_space G] :=
(to_fun : compacts G → ℝ≥0)
(mono' : ∀ (K₁ K₂ : compacts G), K₁.1 ⊆ K₂.1 → to_fun K₁ ≤ to_fun K₂)
(sup_disjoint' : ∀ (K₁ K₂ : compacts G), disjoint K₁.1 K₂.1 →
to_fun (K₁ ⊔ K₂) = to_fun K₁ + to_fun K₂)
(sup_le' : ∀ (K₁ K₂ : compacts G), to_fun (K₁ ⊔ K₂) ≤ to_fun K₁ + to_fun K₂)
instance : inhabited (content G) :=
⟨{ to_fun := λ K, 0,
mono' := by simp,
sup_disjoint' := by simp,
sup_le' := by simp }⟩
/-- Although the `to_fun` field of a content takes values in `ℝ≥0`, we register a coercion to
functions taking values in `ℝ≥0∞` as most constructions below rely on taking suprs and infs, which
is more convenient in a complete lattice, and aim at constructing a measure. -/
instance : has_coe_to_fun (content G) (λ _, compacts G → ℝ≥0∞) := ⟨λ μ s, μ.to_fun s⟩
namespace content
variable (μ : content G)
lemma apply_eq_coe_to_fun (K : compacts G) : μ K = μ.to_fun K := rfl
lemma mono (K₁ K₂ : compacts G) (h : K₁.1 ⊆ K₂.1) : μ K₁ ≤ μ K₂ :=
by simp [apply_eq_coe_to_fun, μ.mono' _ _ h]
lemma sup_disjoint (K₁ K₂ : compacts G) (h : disjoint K₁.1 K₂.1) : μ (K₁ ⊔ K₂) = μ K₁ + μ K₂ :=
by simp [apply_eq_coe_to_fun, μ.sup_disjoint' _ _ h]
lemma sup_le (K₁ K₂ : compacts G) : μ (K₁ ⊔ K₂) ≤ μ K₁ + μ K₂ :=
by { simp only [apply_eq_coe_to_fun], norm_cast, exact μ.sup_le' _ _ }
lemma lt_top (K : compacts G) : μ K < ∞ :=
ennreal.coe_lt_top
lemma empty : μ ⊥ = 0 :=
begin
have := μ.sup_disjoint' ⊥ ⊥,
simpa [apply_eq_coe_to_fun] using this,
end
/-- Constructing the inner content of a content. From a content defined on the compact sets, we
obtain a function defined on all open sets, by taking the supremum of the content of all compact
subsets. -/
def inner_content (U : opens G) : ℝ≥0∞ :=
⨆ (K : compacts G) (h : K.1 ⊆ U), μ K
lemma le_inner_content (K : compacts G) (U : opens G)
(h2 : K.1 ⊆ U) : μ K ≤ μ.inner_content U :=
le_supr_of_le K $ le_supr _ h2
lemma inner_content_le (U : opens G) (K : compacts G) (h2 : (U : set G) ⊆ K.1) :
μ.inner_content U ≤ μ K :=
bsupr_le $ λ K' hK', μ.mono _ _ (subset.trans hK' h2)
lemma inner_content_of_is_compact {K : set G} (h1K : is_compact K) (h2K : is_open K) :
μ.inner_content ⟨K, h2K⟩ = μ ⟨K, h1K⟩ :=
le_antisymm (bsupr_le $ λ K' hK', μ.mono _ ⟨K, h1K⟩ hK')
(μ.le_inner_content _ _ subset.rfl)
lemma inner_content_empty :
μ.inner_content ∅ = 0 :=
begin
refine le_antisymm _ (zero_le _), rw ←μ.empty,
refine bsupr_le (λ K hK, _),
have : K = ⊥, { ext1, rw [subset_empty_iff.mp hK, compacts.bot_val] }, rw this, refl'
end
/-- This is "unbundled", because that it required for the API of `induced_outer_measure`. -/
lemma inner_content_mono ⦃U V : set G⦄ (hU : is_open U) (hV : is_open V)
(h2 : U ⊆ V) : μ.inner_content ⟨U, hU⟩ ≤ μ.inner_content ⟨V, hV⟩ :=
supr_le_supr $ λ K, supr_le_supr_const $ λ hK, subset.trans hK h2
lemma inner_content_exists_compact {U : opens G}
(hU : μ.inner_content U ≠ ∞) {ε : ℝ≥0} (hε : ε ≠ 0) :
∃ K : compacts G, K.1 ⊆ U ∧ μ.inner_content U ≤ μ K + ε :=
begin
have h'ε := ennreal.coe_ne_zero.2 hε,
cases le_or_lt (μ.inner_content U) ε,
{ exact ⟨⊥, empty_subset _, le_add_left h⟩ },
have := ennreal.sub_lt_self hU h.ne_bot h'ε,
conv at this {to_rhs, rw inner_content }, simp only [lt_supr_iff] at this,
rcases this with ⟨U, h1U, h2U⟩, refine ⟨U, h1U, _⟩,
rw [← tsub_le_iff_right], exact le_of_lt h2U
end
/-- The inner content of a supremum of opens is at most the sum of the individual inner
contents. -/
lemma inner_content_Sup_nat [t2_space G] (U : ℕ → opens G) :
μ.inner_content (⨆ (i : ℕ), U i) ≤ ∑' (i : ℕ), μ.inner_content (U i) :=
begin
have h3 : ∀ (t : finset ℕ) (K : ℕ → compacts G), μ (t.sup K) ≤ t.sum (λ i, μ (K i)),
{ intros t K, refine finset.induction_on t _ _,
{ simp only [μ.empty, nonpos_iff_eq_zero, finset.sum_empty, finset.sup_empty], },
{ intros n s hn ih, rw [finset.sup_insert, finset.sum_insert hn],
exact le_trans (μ.sup_le _ _) (add_le_add_left ih _) }},
refine bsupr_le (λ K hK, _),
rcases is_compact.elim_finite_subcover K.2 _ (λ i, (U i).prop) _ with ⟨t, ht⟩, swap,
{ convert hK, rw [opens.supr_def, subtype.coe_mk] },
rcases K.2.finite_compact_cover t (coe ∘ U) (λ i _, (U _).prop) (by simp only [ht])
with ⟨K', h1K', h2K', h3K'⟩,
let L : ℕ → compacts G := λ n, ⟨K' n, h1K' n⟩,
convert le_trans (h3 t L) _,
{ ext1, simp only [h3K', compacts.finset_sup_val, finset.sup_eq_supr, set.supr_eq_Union] },
refine le_trans (finset.sum_le_sum _) (ennreal.sum_le_tsum t),
intros i hi, refine le_trans _ (le_supr _ (L i)),
refine le_trans _ (le_supr _ (h2K' i)), refl'
end
/-- The inner content of a union of sets is at most the sum of the individual inner contents.
This is the "unbundled" version of `inner_content_Sup_nat`.
It required for the API of `induced_outer_measure`. -/
lemma inner_content_Union_nat [t2_space G] ⦃U : ℕ → set G⦄ (hU : ∀ (i : ℕ), is_open (U i)) :
μ.inner_content ⟨⋃ (i : ℕ), U i, is_open_Union hU⟩ ≤ ∑' (i : ℕ), μ.inner_content ⟨U i, hU i⟩ :=
by { have := μ.inner_content_Sup_nat (λ i, ⟨U i, hU i⟩), rwa [opens.supr_def] at this }
lemma inner_content_comap (f : G ≃ₜ G)
(h : ∀ ⦃K : compacts G⦄, μ (K.map f f.continuous) = μ K) (U : opens G) :
μ.inner_content (opens.comap f.to_continuous_map U) = μ.inner_content U :=
begin
refine supr_congr _ ((compacts.equiv f).surjective) _,
intro K, refine supr_congr_Prop image_subset_iff _,
intro hK, simp only [equiv.coe_fn_mk, subtype.mk_eq_mk, ennreal.coe_eq_coe, compacts.equiv],
apply h,
end
@[to_additive]
lemma is_mul_left_invariant_inner_content [group G] [topological_group G]
(h : ∀ (g : G) {K : compacts G}, μ (K.map _ $ continuous_mul_left g) = μ K) (g : G)
(U : opens G) :
μ.inner_content (opens.comap (homeomorph.mul_left g).to_continuous_map U) = μ.inner_content U :=
by convert μ.inner_content_comap (homeomorph.mul_left g) (λ K, h g) U
@[to_additive]
lemma inner_content_pos_of_is_mul_left_invariant [t2_space G] [group G] [topological_group G]
(h3 : ∀ (g : G) {K : compacts G}, μ (K.map _ $ continuous_mul_left g) = μ K)
(K : compacts G) (hK : μ K ≠ 0) (U : opens G) (hU : (U : set G).nonempty) :
0 < μ.inner_content U :=
begin
have : (interior (U : set G)).nonempty, rwa [U.prop.interior_eq],
rcases compact_covered_by_mul_left_translates K.2 this with ⟨s, hs⟩,
suffices : μ K ≤ s.card * μ.inner_content U,
{ exact (ennreal.mul_pos_iff.mp $ hK.bot_lt.trans_le this).2 },
have : K.1 ⊆ ↑⨆ (g ∈ s), opens.comap (homeomorph.mul_left g).to_continuous_map U,
{ simpa only [opens.supr_def, opens.coe_comap, subtype.coe_mk] },
refine (μ.le_inner_content _ _ this).trans _,
refine (rel_supr_sum (μ.inner_content) (μ.inner_content_empty) (≤)
(μ.inner_content_Sup_nat) _ _).trans _,
simp only [μ.is_mul_left_invariant_inner_content h3, finset.sum_const, nsmul_eq_mul, le_refl]
end
lemma inner_content_mono' ⦃U V : set G⦄
(hU : is_open U) (hV : is_open V) (h2 : U ⊆ V) :
μ.inner_content ⟨U, hU⟩ ≤ μ.inner_content ⟨V, hV⟩ :=
supr_le_supr $ λ K, supr_le_supr_const $ λ hK, subset.trans hK h2
/-- Extending a content on compact sets to an outer measure on all sets. -/
protected def outer_measure : outer_measure G :=
induced_outer_measure (λ U hU, μ.inner_content ⟨U, hU⟩) is_open_empty μ.inner_content_empty
variables [t2_space G]
lemma outer_measure_opens (U : opens G) : μ.outer_measure U = μ.inner_content U :=
induced_outer_measure_eq' (λ _, is_open_Union) μ.inner_content_Union_nat μ.inner_content_mono U.2
lemma outer_measure_of_is_open (U : set G) (hU : is_open U) :
μ.outer_measure U = μ.inner_content ⟨U, hU⟩ :=
μ.outer_measure_opens ⟨U, hU⟩
lemma outer_measure_le
(U : opens G) (K : compacts G) (hUK : (U : set G) ⊆ K.1) : μ.outer_measure U ≤ μ K :=
(μ.outer_measure_opens U).le.trans $ μ.inner_content_le U K hUK
lemma le_outer_measure_compacts (K : compacts G) : μ K ≤ μ.outer_measure K.1 :=
begin
rw [content.outer_measure, induced_outer_measure_eq_infi],
{ exact le_infi (λ U, le_infi $ λ hU, le_infi $ μ.le_inner_content K ⟨U, hU⟩) },
{ exact μ.inner_content_Union_nat },
{ exact μ.inner_content_mono }
end
lemma outer_measure_eq_infi (A : set G) :
μ.outer_measure A = ⨅ (U : set G) (hU : is_open U) (h : A ⊆ U), μ.inner_content ⟨U, hU⟩ :=
induced_outer_measure_eq_infi _ μ.inner_content_Union_nat μ.inner_content_mono A
lemma outer_measure_interior_compacts (K : compacts G) : μ.outer_measure (interior K.1) ≤ μ K :=
le_trans (le_of_eq $ μ.outer_measure_opens (opens.interior K.1))
(μ.inner_content_le _ _ interior_subset)
lemma outer_measure_exists_compact {U : opens G} (hU : μ.outer_measure U ≠ ∞) {ε : ℝ≥0}
(hε : ε ≠ 0) : ∃ K : compacts G, K.1 ⊆ U ∧ μ.outer_measure U ≤ μ.outer_measure K.1 + ε :=
begin
rw [μ.outer_measure_opens] at hU ⊢,
rcases μ.inner_content_exists_compact hU hε with ⟨K, h1K, h2K⟩,
exact ⟨K, h1K, le_trans h2K $ add_le_add_right (μ.le_outer_measure_compacts K) _⟩,
end
lemma outer_measure_exists_open {A : set G} (hA : μ.outer_measure A ≠ ∞) {ε : ℝ≥0} (hε : ε ≠ 0) :
∃ U : opens G, A ⊆ U ∧ μ.outer_measure U ≤ μ.outer_measure A + ε :=
begin
rcases induced_outer_measure_exists_set _ _ μ.inner_content_mono hA (ennreal.coe_ne_zero.2 hε)
with ⟨U, hU, h2U, h3U⟩,
exact ⟨⟨U, hU⟩, h2U, h3U⟩, swap, exact μ.inner_content_Union_nat
end
lemma outer_measure_preimage (f : G ≃ₜ G) (h : ∀ ⦃K : compacts G⦄, μ (K.map f f.continuous) = μ K)
(A : set G) : μ.outer_measure (f ⁻¹' A) = μ.outer_measure A :=
begin
refine induced_outer_measure_preimage _ μ.inner_content_Union_nat μ.inner_content_mono _
(λ s, f.is_open_preimage) _,
intros s hs, convert μ.inner_content_comap f h ⟨s, hs⟩
end
lemma outer_measure_lt_top_of_is_compact [locally_compact_space G]
{K : set G} (hK : is_compact K) : μ.outer_measure K < ∞ :=
begin
rcases exists_compact_superset hK with ⟨F, h1F, h2F⟩,
calc
μ.outer_measure K ≤ μ.outer_measure (interior F) : outer_measure.mono' _ h2F
... ≤ μ ⟨F, h1F⟩ :
by apply μ.outer_measure_le ⟨interior F, is_open_interior⟩ ⟨F, h1F⟩ interior_subset
... < ⊤ : μ.lt_top _
end
@[to_additive]
lemma is_mul_left_invariant_outer_measure [group G] [topological_group G]
(h : ∀ (g : G) {K : compacts G}, μ (K.map _ $ continuous_mul_left g) = μ K) (g : G)
(A : set G) : μ.outer_measure ((λ h, g * h) ⁻¹' A) = μ.outer_measure A :=
by convert μ.outer_measure_preimage (homeomorph.mul_left g) (λ K, h g) A
lemma outer_measure_caratheodory (A : set G) :
μ.outer_measure.caratheodory.measurable_set' A ↔ ∀ (U : opens G),
μ.outer_measure (U ∩ A) + μ.outer_measure (U \ A) ≤ μ.outer_measure U :=
begin
dsimp [opens], rw subtype.forall,
apply induced_outer_measure_caratheodory,
apply inner_content_Union_nat,
apply inner_content_mono'
end
@[to_additive]
lemma outer_measure_pos_of_is_mul_left_invariant [group G] [topological_group G]
(h3 : ∀ (g : G) {K : compacts G}, μ (K.map _ $ continuous_mul_left g) = μ K)
(K : compacts G) (hK : μ K ≠ 0) {U : set G} (h1U : is_open U) (h2U : U.nonempty) :
0 < μ.outer_measure U :=
by { convert μ.inner_content_pos_of_is_mul_left_invariant h3 K hK ⟨U, h1U⟩ h2U,
exact μ.outer_measure_opens ⟨U, h1U⟩ }
variables [S : measurable_space G] [borel_space G]
include S
/-- For the outer measure coming from a content, all Borel sets are measurable. -/
lemma borel_le_caratheodory : S ≤ μ.outer_measure.caratheodory :=
begin
rw [@borel_space.measurable_eq G _ _],
refine measurable_space.generate_from_le _,
intros U hU,
rw μ.outer_measure_caratheodory,
intro U',
rw μ.outer_measure_of_is_open ((U' : set G) ∩ U) (is_open.inter U'.prop hU),
simp only [inner_content, supr_subtype'], rw [opens.coe_mk],
haveI : nonempty {L : compacts G // L.1 ⊆ U' ∩ U} := ⟨⟨⊥, empty_subset _⟩⟩,
rw [ennreal.supr_add],
refine supr_le _, rintro ⟨L, hL⟩, simp only [subset_inter_iff] at hL,
have : ↑U' \ U ⊆ U' \ L.1 := diff_subset_diff_right hL.2,
refine le_trans (add_le_add_left (μ.outer_measure.mono' this) _) _,
rw μ.outer_measure_of_is_open (↑U' \ L.1) (is_open.sdiff U'.2 L.2.is_closed),
simp only [inner_content, supr_subtype'], rw [opens.coe_mk],
haveI : nonempty {M : compacts G // M.1 ⊆ ↑U' \ L.1} := ⟨⟨⊥, empty_subset _⟩⟩,
rw [ennreal.add_supr], refine supr_le _, rintro ⟨M, hM⟩, simp only [subset_diff] at hM,
have : (L ⊔ M).1 ⊆ U',
{ simp only [union_subset_iff, compacts.sup_val, hM, hL, and_self] },
rw μ.outer_measure_of_is_open ↑U' U'.2,
refine le_trans (ge_of_eq _) (μ.le_inner_content _ _ this),
exact μ.sup_disjoint _ _ hM.2.symm,
end
/-- The measure induced by the outer measure coming from a content, on the Borel sigma-algebra. -/
protected def measure : measure G := μ.outer_measure.to_measure μ.borel_le_caratheodory
lemma measure_apply {s : set G} (hs : measurable_set s) : μ.measure s = μ.outer_measure s :=
to_measure_apply _ _ hs
/-- In a locally compact space, any measure constructed from a content is regular. -/
instance regular [locally_compact_space G] : μ.measure.regular :=
begin
haveI : μ.measure.outer_regular,
{ refine ⟨λ A hA r (hr : _ < _), _⟩,
rw [μ.measure_apply hA, outer_measure_eq_infi] at hr,
simp only [infi_lt_iff] at hr,
rcases hr with ⟨U, hUo, hAU, hr⟩,
rw [← μ.outer_measure_of_is_open U hUo, ← μ.measure_apply hUo.measurable_set] at hr,
exact ⟨U, hAU, hUo, hr⟩ },
split,
{ intros K hK,
rw [measure_apply _ hK.measurable_set],
exact μ.outer_measure_lt_top_of_is_compact hK },
{ intros U hU r hr,
rw [measure_apply _ hU.measurable_set, μ.outer_measure_of_is_open U hU] at hr,
simp only [inner_content, lt_supr_iff] at hr,
rcases hr with ⟨K, hKU, hr⟩,
refine ⟨K.1, hKU, K.2, hr.trans_le _⟩,
exact (μ.le_outer_measure_compacts K).trans (le_to_measure_apply _ _ _) },
end
end content
end measure_theory
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/measure_theory/measure/content.lean"}
|
from numpy import array
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import random, math, copy, statistics
from IPython.display import clear_output
from time import sleep
import matplotlib.pyplot as plt
random.seed(1)
def F6(x,y):
return 0.5 - (((math.sin( (x**2+y**2)**0.5 ))**2 - 0.5)/(1 + ( 0.001 * ( x**2 + y**2 ) ))**2)
def calcFitness(individuo):
F6_mod = (F6(individuo[0],individuo[1]) + F6(individuo[2],individuo[3]) + F6(individuo[4],individuo[5]) + F6(individuo[6],individuo[7]) + F6(individuo[8],individuo[9]))
erro = 5-F6_mod
return F6_mod
def particlesInit(tPart=100, d=10):
particulas = []
for n in range(tPart):
parametros = np.random.uniform(-100,100,size=d)
fitness = calcFitness(parametros)
velocidade = random.uniform(0,1)
melhor = parametros
particulas.append([fitness, parametros, velocidade, melhor])
return particulas
def particlesCopy(particles):
particulas = []
for fitness, parametros, velocidade, melhor in particles:
particulas.append([copy.copy(fitness), copy.copy(parametros), copy.copy(velocidade), copy.copy(melhor)])
return particulas
def calculaMediaExecucoes(melhoresIndExec, popFitExec):
mediaMeIndExec = [0]*500
mediaPopFitExec = [0]*500
for i in range(len(melhoresIndExec[0])):
holder = []
for execucao in melhoresIndExec:
holder.append(execucao[i])
mediaMeIndExec[i] = statistics.mean(holder)
holder = []
for execucao in popFitExec:
holder.append(execucao[i])
mediaPopFitExec[i] = statistics.mean(holder)
return mediaMeIndExec, mediaPopFitExec
def calculaMediaPopulacoes(popsMeExec, popsFitPopExec):
mediaPopsMeInd = [0]*500
mediaPopsFitPop = [0]*500
for i in range(len(popsMeExec[0])):
holder = []
for execucao in popsMeExec:
holder.append(execucao[i])
mediaPopsMeInd[i] = statistics.mean(holder)
holder = []
for execucao in popsFitPopExec:
holder.append(execucao[i])
mediaPopsFitPop[i] = statistics.mean(holder)
return mediaPopsMeInd, mediaPopsFitPop
iteracoes = 500
nParticulas = 100
dimensoes = 10
c1 = 2
c2 = 2
err_crit = 0.001
w = 0.6
nPopInicial = 5
nExecucoes = 30
partsMeExec = [ [0 for i in range(iteracoes)] ]*nPopInicial
partsFitPopExec = [ [0 for i in range(iteracoes)] ]*nPopInicial
for populacaoInicial in range(nPopInicial):
particles = particlesInit(nParticulas, dimensoes)
melhoresIndExec = [0]*nExecucoes
partFitExec = [0]*nExecucoes
for ex in range(nExecucoes):
particulas = particlesCopy(particles)
melhoresIteracao = [0] * iteracoes
mediaPartFit = [0] * iteracoes
gbest = random.choice(particulas)
err = 9999
# lista_particulas = []
for i in range(iteracoes):
fPart = []
for p in particulas:
fitness = calcFitness(p[1])
fPart.append(fitness)
if fitness > p[0]:
p[0] = fitness
p[3] = p[1]
if fitness > gbest[0]:
gbest = p
v = w*p[2] + c1*random.uniform(0,1)*(p[3] - p[1]) + c2*random.uniform(0,1)*(gbest[3] - p[1])
p[1] = p[1] + v
mediaPartFit[i] = statistics.mean(fPart)
melhoresIteracao[i] = gbest[0]
# lista_particulas.append(gbest[0])
# if err < err_crit:
# break
# if i % (iteracoes/10) == 10:
# print('.')
print(f'Execução: {ex+1}')
print(f'População: {populacaoInicial+1}')
clear_output(wait=True)
melhoresIndExec[ex] = melhoresIteracao
partFitExec[ex] = mediaPartFit
partsMeExec[populacaoInicial], partsFitPopExec[populacaoInicial] = calculaMediaExecucoes(melhoresIndExec, partFitExec)
[bestInd,fitnessPopulation] = calculaMediaPopulacoes(partsMeExec, partsFitPopExec)
y = {'Época': [i for i in range(500)], 'Melhores Indivíduos': melhoresIteracao, 'Média Fitness da População': mediaPartFit}
df = pd.DataFrame(data=y, columns=['Época','Melhores Indivíduos', 'Média Fitness da População'])
y = {'Época': [i for i in range(500)], 'Melhores Indivíduos': melhoresIteracao, 'Média Fitness da População': mediaPartFit}
df = pd.DataFrame(data=y, columns=['Época','Melhores Indivíduos', 'Média Fitness da População'])
grafico1 = px.scatter(df, x='Época', y=['Melhores Indivíduos','Média Fitness da População'],
range_x=[-50,550],range_y=[2,5], title='F6',)
# grafico2 = px.scatter(x = [i+1 for i in range(epocas)], y = fitnessPopulation, range_y=[0,5])
# grafico3 = go.Figure(data = grafico1.data + grafico2.data)
grafico1.update_layout(title={
'text': "PSO F6 Modificada",
'y':0.9,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
xaxis_title="Iteração",
yaxis_title="Fitness",
legend_title="Legenda",
font=dict(
family="Courier New, monospace",
size=18,
))
grafico1.show()
|
{"hexsha": "617339213e7168f5a457fcfee6fd167dcf081071", "size": 5692, "ext": "py", "lang": "Python", "max_stars_repo_path": "pso_f6_mod.py", "max_stars_repo_name": "CaioMM/f6_mod", "max_stars_repo_head_hexsha": "34f5a0fbc8f24d62a6b593939fa1b7296a8384dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pso_f6_mod.py", "max_issues_repo_name": "CaioMM/f6_mod", "max_issues_repo_head_hexsha": "34f5a0fbc8f24d62a6b593939fa1b7296a8384dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pso_f6_mod.py", "max_forks_repo_name": "CaioMM/f6_mod", "max_forks_repo_head_hexsha": "34f5a0fbc8f24d62a6b593939fa1b7296a8384dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9202453988, "max_line_length": 173, "alphanum_fraction": 0.569395643, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1552}
|
"""Plot survey and synthetic matrices for France, Japan, and Shanghai, China as shown in figure 3."""
import numpy as np
import pandas as pd
import matplotlib as mplt
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.font_manager as font_manager
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
import matplotlib.ticker as ticker
from matplotlib.collections import PatchCollection
from matplotlib.colors import LinearSegmentedColormap, LogNorm
from matplotlib.ticker import LogLocator, LogFormatter
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.backends.backend_pdf
import cmocean
import cmasher
import seaborn
import copy
import os
# set the font family style
mplt.rcParams['font.family'] = 'Myriad Pro' # change to a font you have installed on your computer - checkout Google Fonts for free fonts available for download
# set some initial paths
# path to the directory where this script lives
thisdir = os.path.abspath('')
# path to the main directory of the repository
maindir = os.path.split(os.path.split(thisdir)[0])[0]
# path to the analysis_results subdirectory
analysisdir = os.path.split(thisdir)[0]
# path to the data subdirectory
datadir = os.path.join(os.path.split(os.path.split(thisdir)[0])[0], 'data')
# path to the figures subdirectory within analysis_results
figdir = os.path.join(analysisdir, 'figures')
def read_contact_matrix(location, country, level, setting, num_agebrackets=85):
"""
Read in the contact for each setting.
Args:
location (str) : name of the location
country (str) : name of the country
level (str) : name of level (country or subnational)
setting (str) : name of the contact setting
num_agebrackets (int) : the number of age brackets for the matrix
Returns:
np.ndarray: A numpy matrix of contact.
"""
setting_type, setting_suffix = 'F', 'setting'
if setting == 'overall':
setting_type, setting_suffix = 'M', 'contact_matrix'
if level == 'country':
file_name = country + '_' + level + '_level_' + setting_type + '_' + setting + '_' + setting_suffix + '_' + '%i' % num_agebrackets + '.csv'
else:
file_name = country + '_' + level + '_' + location + '_' + setting_type + '_' + setting + '_' + setting_suffix + '_' + '%i' % num_agebrackets + '.csv'
file_path = os.path.join(datadir, 'contact_matrices', file_name)
M = np.loadtxt(file_path, delimiter=',')
return M
def read_validation_contact_matrix(location, matrix_type):
"""
Read in either the survey or synthetic contact matrix used in the validation analysis of figure 3b.
Args:
location (str) : name of the location
matrix_type (str) : the type of contact matrix, either 'Model' for the overall contact matrix from a linear combination of the synthetic setting contact matrices, or 'Survey' for the empirical survey matrix.
Returns:
np.ndarray: A numpy matrix of contact.
"""
file_path = os.path.join(analysisdir, 'survey_model_matrix_comparisons', matrix_type + '_' + location + '.csv')
delimiter = ' '
M = np.loadtxt(file_path, delimiter=delimiter)
return M
def plot_matrix(location, matrix_type, cmap=cmocean.cm.deep_r, save=False, show=False):
"""
Plot the matrices from figure 3b.
Args:
location (str): name of the location
matrix_type (str) : the type of contact matrix, either 'Model' for the overall contact matrix from a linear combination of the synthetic setting contact matrices, or 'Survey' for the empirical survey matrix.
cmap (str or matplotlib.colors.LinearSegmentedColormap) : name of the colormap to use
save (bool) : If True, save the figure
show (bool) : If True, show the figure
Returns:
Matplotlib figure.
"""
if isinstance(cmap, str):
cmap = mplt.cm.get_cmap(cmap)
fontsizes = {'colorbar': 30, 'colorbarlabels': 22, 'title': 44, 'ylabel': 28, 'xlabel': 28, 'xticks': 24, 'yticks': 24}
# defaults without colorbar label
left = 0.155
right = 0.935
top = 0.90
bottom = 0.12
# move margins in a little
left -= 0.00
right -= 0.07
top -= 0.03
bottom += 0.03
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
ax = fig.add_subplot(111)
matrix = read_validation_contact_matrix(location, matrix_type)
num_agebrackets = len(matrix)
for a in range(num_agebrackets):
matrix[a, :] = matrix[a, :]/np.sum(matrix[a, :])
matrix = matrix/np.sum(matrix)
min_CM = 1e-1
max_CM = 1e-4
im = ax.imshow(matrix.T, origin='lower', interpolation='nearest', cmap=cmap, norm=LogNorm(vmin=min_CM, vmax=max_CM))
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='4%', pad=0.15)
cbar = fig.colorbar(im, cax=cax)
cbar.ax.tick_params(labelsize=fontsizes['colorbarlabels'])
cbar.ax.set_ylabel('Frequency of contacts', fontsize=fontsizes['ylabel'])
title_dic = {'Model': 'Synthetic', 'Survey': 'Survey'}
ax.set_title(title_dic[matrix_type] + ' Matrix', fontsize=fontsizes['title'])
ax.set_ylabel('Age of contact', fontsize=fontsizes['ylabel'])
ax.set_xlabel('Age', fontsize=fontsizes['xlabel'])
if num_agebrackets == 15:
age_brackets = [str(5*i) + '-' + str(5 * (i+1)-1) for i in range(14)] + ['70+']
elif num_agebrackets == 17:
age_brackets = ['0-2', '3-6', '7-9'] + [str(5*i) + '-' + str(5 * (i+1) - 1) for i in range(2, 15)] + ['75+']
ax.set_xticks(np.arange(len(age_brackets)))
ax.set_xticklabels(age_brackets, rotation=60)
ax.set_yticks(np.arange(len(age_brackets)))
ax.set_yticklabels(age_brackets)
ax.tick_params(labelsize=fontsizes['xticks'])
if show:
plt.show()
if save:
file_name = 'fig_3b_' + matrix_type + '_' + location + '.pdf'
fig_path = os.path.join(figdir, file_name)
fig.savefig(fig_path, format='pdf')
if __name__ == '__main__':
# location = 'France'
# location = 'Japan'
location = 'Shanghai'
# matrix_type = 'Model'
matrix_type = 'Survey'
save = True
show = False
plot_matrix(location, matrix_type, save=save, show=show)
|
{"hexsha": "9cdc6ec6e0758085fbf33008de8e3b7e53b43251", "size": 6496, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_results/scripts/fig_3b.py", "max_stars_repo_name": "DongxiaW/mixing-patterns", "max_stars_repo_head_hexsha": "e841a934b826ecc98bf443026c32e7c6b7aa75bc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2020-03-03T07:05:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T22:40:49.000Z", "max_issues_repo_path": "analysis_results/scripts/fig_3b.py", "max_issues_repo_name": "DongxiaW/mixing-patterns", "max_issues_repo_head_hexsha": "e841a934b826ecc98bf443026c32e7c6b7aa75bc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-03-03T07:22:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:51:47.000Z", "max_forks_repo_path": "analysis_results/scripts/fig_3b.py", "max_forks_repo_name": "DongxiaW/mixing-patterns", "max_forks_repo_head_hexsha": "e841a934b826ecc98bf443026c32e7c6b7aa75bc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-12-10T08:39:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T00:12:54.000Z", "avg_line_length": 34.5531914894, "max_line_length": 215, "alphanum_fraction": 0.6651785714, "include": true, "reason": "import numpy", "num_tokens": 1654}
|
#!/usr/bin/env python
"""
DataProcParams class for importing, working with, and storing data processing
parameters (e.g., PINGU's V5 processing).
"""
from __future__ import absolute_import, division
from collections import Mapping, OrderedDict, Sequence
from copy import deepcopy
from itertools import izip
import os
import re
import h5py
# Note that the form of the numpy import is intentional, so that cuts -- which
# are exectuted with `eval` -- will have access to numpy's namespace without
# explicit reference to numpy. It's a hack, but it works well.
from numpy import * # pylint: disable=wildcard-import, unused-wildcard-import, redefined-builtin
import numpy # pylint: disable=unused-import
import numpy as np # pylint: disable=reimported
from pisa.utils import jsons
from pisa.utils.flavInt import NuFlav, IntType, FlavIntData
from pisa.utils.log import logging
from pisa.utils import resources
__all__ = ['MULTI_PART_FIELDS', 'NU_PDG_CODES', 'DataProcParams']
__author__ = 'J.L. Lanfranchi'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
MULTI_PART_FIELDS = [
'I3MCTree',
]
NU_PDG_CODES = [-12, 12, -14, 14, -16, 16]
class DataProcParams(dict):
"""Class for importing, working with, and storing data processing
parameters.
Implements cutting and particle identification (PID) functionality that can
be applied to MC/data that have the specified verion of processing applied
to it.
Parameters
----------
data_proc_params : string or dict
If string: looks for the corresponding JSON resource (file) and loads
the contents as a data_proc_params dict
If dict: taken to be data_proc_params dict
The data_proc_params dict must follow the format described below.
detector : string
Converted to lower-case string which must be a detector key in
data_proc_params dict
proc_ver
Converted to lower-case string which must be a proc_ver key in
data_proc_params dict
Notes
-----
All information describing the processing version is loaded from a JSON
file with the following defined format:
Note that the following common cuts are defined in this class and so
needn't be defined in the JSON file:
'1' : Select particles
'-1' : Select anti-particles
'cc' : Select charged-current (CC) interactions
'nc' : Select neutral-current (NC) interactions
'true_upgoing_zen' : Select true-upgoing events by zenith angle
'true_upgoing_coszen' : Select true-upgoing events by cos(zenith) angle
data_proc_params dictionary format (and same for corresponding JSON file):
{
# Specify the detector name, lower case
"<lower-case detector name>": {
# Specify the processing version, lower case
# Examples in PINGU include "4", "5", and "5.1"
"<lower-case processing version>": {
# Mapping from standard names to path where these can be found in
# source HDF5 file; separate HDF5 path nodes with a forward-slash.
#
# Fields that cuts or particles in the "cuts"/"pid" sections below
# require (e.g., "cuts_step_1" for PINGU v5 processing), must be
# added here so the code knows how to extract the info from the
# source HDF5 file.
#
# Outputting a PID field to the destination PISA HDF5 file will *not*
# be done if the "pid" field is omitted below.
#
# In addition to the below-named fields, "true_coszen" and
# "reco_coszen" are generated from the data from the "true_zenith"
# and "reco_zenith" fields, respectively. So any of those fields can
# be used via the aforementioned names.
"field_map": {
"run": "<HDF5 path to corresponding node>",
"nu_code": "<HDF5 path to corresponding node>",
"true_energy": "<HDF5 path to corresponding node>",
"true_zenith": "<HDF5 path to corresponding node>",
"reco_energy": "<HDF5 path to corresponding node>",
"reco_zenith": "<HDF5 path to corresponding node>",
"one_weight": "<HDF5 path to corresponding node>",
"generator_volume": "<HDF5 path to corresponding node>",
"generator_radius": "<HDF5 path to corresponding node>",
"detection_length": "<HDF5 path to corresponding node>",
"interaction_type": "<HDF5 path to corresponding node>",
"azimuth_min": "<HDF5 path to corresponding node>",
"azimuth_max": "<HDF5 path to corresponding node>",
"zenith_min": "<HDF5 path to corresponding node>",
"zenith_max": "<HDF5 path to corresponding node>",
"energy_log_min": "<HDF5 path to corresponding node>",
"energy_log_max": "<HDF5 path to corresponding node>",
"num_events_per_file": "<HDF5 path to corresponding node>",
"sim_spectral_index": "<HDF5 path to corresponding node>",
"pid": "<HDF5 path to corresponding node>",
},
# Mapping from file's nu code to PDG nu codes (only necessary if
# nu_code values are not the PDG codes listed below)
"nu_code_to_pdg_map": {
"<source nue code>": 12,
"<source nue_bar code>": -12,
"<source numu code>": 14,
"<source numu_bar code>": -14,
"<source nutau code>": 16,
"<source nutau_bar code>": -16
},
# Specify standard cuts
"cuts": {
# Cut name; "bjrej" and "analysis" listed here are just example
# names for cuts one might specify
"bgrej": {
# Which of the fields in the field_map (and the derived fields
# such as true_coszen and reco_coszen) are required for this cut?
"fields": ["<field1>", "<field2>", ... ],
# Expression for an event to pass the cut (not get thrown away);
# see below for details on specifying an expression
"pass_if": "<expression>"
},
"analysis": {
"fields": ["<field1>", "<field2>", ... ],
"pass_if": "<expression>"
},
"<cut name>": {
"fields": ["<field1>", "<field2>", ... ],
"pass_if": "<expression>"
}
},
# Particle identification section
"pid": {
# Name of the particle (case-insensitive); e.g., in PINGU this
# would be "trck" or "cscd"
"<particle name 1>": {
# Which of the fields in the field_map (and the derived fields
# such as true_coszen and reco_coszen) are required for this cut?
"field": [<field1>, <field2>, ...],
# Expression for an event to be classified as this type of
# particle; # see below for details on specifying an expression
"criteria": "<expression>"
}
"<particle name 2>": {
"field": [<field1>, <field2>, ...],
"criteria": "<expression>"
}
}
}
}
}
Note that cuts "pass_if" and pid "criteria" expressions can make use of the
numpy namespace and have access to any columns extracted from the source
HDF5 file, by the standardized names given in the "field_map". For example,
if the following "fields" are specified for a cut in the data_proc_params
dict:
["cuts_step_1", "cuts_step_2"]
then the following is a valid "pass_if" expression:
"(reco_zenith > pi/2) & (cuts_step_1 == 1) & (cuts_step_2 == 1)"
"""
def __init__(self, detector, proc_ver, data_proc_params=None):
super(DataProcParams, self).__init__()
if data_proc_params is None:
data_proc_params = 'events/data_proc_params.json'
if isinstance(data_proc_params, basestring):
ps = jsons.from_json(resources.find_resource(data_proc_params))
elif isinstance(data_proc_params, dict):
ps = data_proc_params
else:
raise TypeError('Unhandled data_proc_params type passed in arg: ' +
type(data_proc_params))
self.detector = detector
self.proc_ver = str(proc_ver)
self.det_key = [k for k in ps.keys()
if k.lower() == self.detector.lower()][0]
for key in ps[self.det_key].keys():
lk = key.lower()
lpv = self.proc_ver.lower()
if lk == lpv or ('v'+lk == lpv) or (lk == 'v'+lpv):
self.procver_key = key
# This works for PINGU
elif ('msu_'+lk == lpv) or (lk == 'msu_'+lpv):
self.procver_key = key
elif ('nbi_'+lk == lpv) or (lk == 'nbi_'+lpv):
self.procver_key = key
# Generalising for DeepCore and different selections
ps = ps[self.det_key][self.procver_key]
self.update(ps)
self.trans_nu_code = False
if self.has_key('nu_code_to_pdg_map'):
self.trans_nu_code = True
try:
self.nu_code_to_pdg_map = {
int(code): pdg
for code, pdg in self['nu_code_to_pdg_map'].items()
}
except:
self.nu_code_to_pdg_map = self['nu_code_to_pdg_map']
# NOTE: the keys are strings so the particular string formatting is
# important for indexing into the dict!
# Add generic cuts
self['cuts'].update({
# Cut for particles only (no anti-particles)
str(NuFlav(12).bar_code).lower():
{'fields': ['nu_code'], 'pass_if': 'nu_code > 0'},
# Cut for anti-particles only (no particles)
str(NuFlav(-12).bar_code).lower():
{'fields': ['nu_code'], 'pass_if': 'nu_code < 0'},
# Cut for charged-current interactions only
str(IntType('cc')).lower():
{'fields': ['interaction_type'],
'pass_if': 'interaction_type == 1'},
# Cut for neutral-current interactions only
str(IntType('nc')).lower():
{'fields': ['interaction_type'],
'pass_if': 'interaction_type == 2'},
# True-upgoing cut usinng the zenith field
'true_upgoing_zen':
{'fields': ['true_zenith'], 'pass_if': 'true_zenith > pi/2'},
# True-upgoing cut usinng the cosine-zenith field
'true_upgoing_coszen':
{'fields': ['true_coszen'], 'pass_if': 'true_coszen < 0'},
})
# Enforce rules on cuts:
self.validate_cut_spec(self['cuts'])
@staticmethod
def validate_cut_spec(cuts):
"""Validate a cut specification dictionary"""
for cutname, cutspec in cuts.iteritems():
# Cut names are lower-case strings with no surrounding whitespace
assert isinstance(cutname, basestring)
assert cutname == cutname.lower()
assert cutname == cutname.strip()
# Has appropriate keys (and no extra)
assert len(cutspec) == 2
assert cutspec.has_key('fields')
assert cutspec.has_key('pass_if')
assert not isinstance(cutspec['fields'], basestring)
# 'fields' contains a sequence
assert hasattr(cutspec['fields'], '__iter__') and \
not isinstance(cutspec['fields'], basestring)
# 'pass_if' contains a string
assert isinstance(cutspec['pass_if'], basestring)
@staticmethod
def validate_pid_spec(pids):
"""Validate a PID specification dictionary"""
for particle_name, pidspec in pids.iteritems():
# Particle names are lower-case strings with no surrounding
# whitespace
assert isinstance(particle_name, basestring)
assert particle_name == particle_name.lower()
assert particle_name == particle_name.strip()
# Has appropriate keys (and no extra)
assert len(pidspec) == 2
assert pidspec.has_key('fields')
assert pidspec.has_key('criteria')
assert not isinstance(pidspec['fields'], basestring)
# 'fields' contains a sequence
assert hasattr(pidspec['fields'], '__iter__') and \
not isinstance(pidspec['fields'], basestring)
# 'criteria' contains a string
assert isinstance(pidspec['criteria'], basestring)
# TODO: prefix the field names with e.g. "$" such that anything that is
# _not_ prefixed by this is not replaced. This allows for righer
# expresssions (but also dangerous things...).
@staticmethod
def retrieve_expression(h5group, expression):
"""Retrieve data from an HDF5 group `h5group` according to
`expresssion`. This can apply expressions with simple mathematical
operators and numpy functions to multiple fields within the HDF5 file
to derive the output. Python keywords are _not_ allowed, since they
may alias with a name.
Refer to any numpy functions by prefixing with either "np.<func>" or
"numpy.<func>". In order to specify division, spaces must surround the
forward slash, such that it isn't interpreted as a path.
Nodes in the HDF5 hierarchy are separated by forward slashes ("/") in a
path spec. We restrict valid HDF5 node names to contain the characters
a-z, A-Z, 0-9, peroids ("."), and underscores ("_"). with the
additional restriction that the node name must not start with a period
or a number, and a path cannot start with a slash.
Parameters
----------
h5group : h5py Group
expression : string
Expression to evaluate.
Returns
-------
result : result of evaluating `expression`
Examples
--------
>>> retrieve_expression('np.sqrt(MCneutrino/x**2 + MCneutrino/y**2)')
Indexing into the data arrays can also be performed, and numpy masks
used as usual:
>>> expr = 'I3MCTree/energy[I3MCTree/event == I3EventHeader[0]
"""
h5path_re = re.compile(
r'''
([a-z_] # First character must be letter or underscore
[a-z0-9_.]* # 0 or more legal chars: letters, numbers, _, .
(?: # (Do not return the following group separately)
[/]{0,1} # Next character CAN be no or 1 front-slash
[a-z0-9_.]+ # But a slash *must* be followed by legal chars
)* # Slash+chars pattern might not occur, or repeat
)''', re.VERBOSE | re.IGNORECASE
)
numpy_re = re.compile(r'^(np|numpy)\.[a-z_.]+', re.IGNORECASE)
eval_str = expression
intermediate_data = {}
for h5path in h5path_re.findall(expression):
if numpy_re.match(h5path):
continue
intermediate_data[h5path] = DataProcParams.retrieve_node_data(
h5group, h5path
)
eval_str = eval_str.replace(h5path,
"intermediate_data['%s']"%h5path)
try:
result = eval(eval_str) # pylint: disable=eval-used
except:
logging.error('`expression` "%s" was translated into `eval_str`'
' "%s" and failed to evaluate.',
expression, eval_str)
raise
return result
@staticmethod
def retrieve_node_data(h5group, address, allow_missing=False):
"""Retrieve data from an HDF5 group `group` at address `address`.
Levels of hierarchy are separated by forward-slashes ('/').
See h5py for further details on specifying a valid `address`.
"""
subgroup = h5group
for sub_addy in address.split('/'):
try:
subgroup = subgroup[sub_addy]
except KeyError:
if allow_missing:
return None
raise
return subgroup
@staticmethod
def populate_global_namespace(h5group, field_map, allow_missing=False):
"""Populate the Python global namespace with variables named as the
keys in `field_map` and values loaded from the `h5group` at addresses
specified by the corresponding values in `field_map`.
"""
for var, h5path in field_map.items():
try:
value = DataProcParams.retrieve_node_data(
h5group, h5path, allow_missing=False
)
except KeyError:
if allow_missing:
return None
raise
globals()[var] = value
# TODO: make the following behave like `retrieve_expression` method which
# does not rely on populating globals (just a dict, the name of which gets
# substituted in where approprite to the expression) to work.
@staticmethod
def cut_bool_idx(h5group, cut_fields, keep_criteria):
"""Return numpy boolean indexing for data in `h5group` given a cut
specified using `cut_fields` in the `h5group` and evaluation criteria
`keep_criteria`
Parameters
----------
h5group : h5py node/entity
cut_fields : field_map dict
keep_criteria : string
Returns
-------
bool_idx : numpy array (1=keep, 0=reject)
"""
DataProcParams.populate_global_namespace(h5group, cut_fields)
bool_idx = eval(keep_criteria) # pylint: disable=eval-used
return bool_idx
def get_data(self, h5, run_settings=None, flav=None):
"""Get data attached to an HDF5 node, returned as a dictionary.
The returned dictionary's keys match those in the field_map and the
dict's values are the data from the HDF5's nodes found at the addresses
specified as values in the field_map
Parameters
----------
file_type : string, one of {'mc', 'data'}
"""
not_fields_in_data = ['I3MCWeightDict', 'PrimaryNu', 'trueNeutrino']
myfile = False
try:
if isinstance(h5, basestring):
myfile = True
h5 = h5py.File(os.path.expandvars(os.path.expanduser(h5)),
mode='r')
data = OrderedDict()
for name, path in self['field_map'].items():
datum = self.retrieve_expression(h5, path)
path_parts = path.split('/')
if (file_type == 'data' and 'I3MCWeightDict' in path_parts
or 'PrimaryNu' in path_parts or 'trueNeutrino' in path_parts):
continue
if path_parts[0] == 'I3MCTree' and path_parts[-1] != 'Event':
evts = self.retrieve_node_data(
h5, '/'.join(path_parts[:-1] + ['Event'])
)
pdgs = self.retrieve_node_data(
h5, '/'.join(path_parts[:-1] + ['pdg_encoding'])
)
energies = self.retrieve_node_data(
h5, '/'.join(path_parts[:-1] + ['energy'])
)
# Looping here is ugly and slow, but people don't make the
# Event field unique, so the only thing you can count on is
# that if the event number changes in sequence, you're in a
# different Event (for now, I think). The actual Event
# number can be repeated elsewhere, though.
#
# This makes for wonderfully reproducible results.
# </sardonic laughter>
new_datum = []
this_evt = np.nan
this_d = None
for d, evt, pdg, egy in izip(datum, evts, pdgs, energies):
if evt != this_evt:
if this_d is not None:
new_datum.append(this_d)
this_egy = -np.inf
this_d = None
this_evt = evt
if egy > this_egy and pdg in NU_PDG_CODES:
this_egy = egy
this_d = d
if this_d is not None:
new_datum.append(this_d)
datum = new_datum
data[name] = np.array(datum)
finally:
if myfile and isinstance(h5, h5py.File):
try:
h5.close()
except: # TODO: specify exception type(s)!
pass
self.interpret_data(data)
# TODO: enable consistency checks here & implement in run_settings
#if run_settings is not None:
# run_settings.consistency_checks(data, flav=flav)
# TODO: implement flav filtering (or not? or more advanced filtering?)
return data
def interpret_data(self, data):
"""Perform mappings from non-standard to standard values (such as
translating non-PDG neutrino flavor codes to PDG codes) and add
fields expected to be useful (such as coszen, derived from zen fields).
Attach / reattach the translated/new fields to the `data` object passed
into this methd.
"""
for k, v in data.items():
if isinstance(v, Sequence):
data[k] = v[0]
if self.trans_nu_code:
data['nu_code'] = [
self.nu_code_to_pdg_map[code] for code in data['nu_code']
]
if 'true_zenith' in data:
data['true_coszen'] = np.cos(data['true_zenith'])
if 'reco_zenith' in data:
data['reco_coszen'] = np.cos(data['reco_zenith'])
return data
@staticmethod
def subselect(data, fields, indices=None):
if isinstance(data, FlavIntData):
outdata = FlavIntData()
for flavint in data.flavints:
outdata[flavint] = DataProcParams.subselect(data[flavint],
fields=fields,
indices=indices)
elif isinstance(data, Mapping):
if indices is None:
return {k:v for k, v in data.items() if k in fields}
return {k:v[indices] for k, v in data.items() if k in fields}
def apply_cuts(self, data, cuts, boolean_op='&', return_fields=None):
"""Perform `cuts` on `data` and return a dict containing
`return_fields` from events that pass the cuts.
Parameters
----------
data : single-level dict or FlavIntData object
cuts : string or dict, or sequence thereof
boolean_op : string
return_fields : string or sequence thereof
"""
if isinstance(data, FlavIntData):
outdata = FlavIntData()
for flavint in data.flavints:
outdata[flavint] = self.apply_cuts(
data[flavint], cuts=cuts, boolean_op=boolean_op,
return_fields=return_fields
)
return outdata
if isinstance(cuts, (basestring, dict)):
cuts = [cuts]
# Default is to return all fields
if return_fields is None:
return_fields = data.keys()
# If no cuts specified, return all data from specified fields
if len(cuts) == 0:
return self.subselect(data, return_fields)
cut_strings = set()
cut_fields = set()
for cut in cuts:
if isinstance(cut, dict):
self.validate_cut_spec(cut)
elif self['cuts'].has_key(cut.lower()):
cut = self['cuts'][cut.lower()]
else:
raise Exception('Unrecognized or invalid cut: "'+str(cut)+'"')
cut_strings.add(cut['pass_if'])
cut_fields.update(cut['fields'])
# Combine cut criteria strings together with boolean operation
cut_string = boolean_op.join(['('+cs+')' for cs in cut_strings])
# Load the fields necessary for the cut into the global namespace
for field in set(cut_fields):
globals()[field] = data[field]
# Evaluate cuts, returning a boolean array
try:
bool_idx = eval(cut_string) # pylint: disable=eval-used
except:
logging.error('Failed to evaluate `cut_string` "%s"', cut_string)
raise
# Return specified (or all) fields, indexed by boolean array
return {f:np.array(data[f])[bool_idx] for f in return_fields}
|
{"hexsha": "786f7e771f19ff4a5844458dcc442f8c09f38215", "size": 25724, "ext": "py", "lang": "Python", "max_stars_repo_path": "pisa/utils/data_proc_params.py", "max_stars_repo_name": "torkjellsdatter/pisa", "max_stars_repo_head_hexsha": "7b26b0ac40c873a87786286acfd1c96abf724a99", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pisa/utils/data_proc_params.py", "max_issues_repo_name": "torkjellsdatter/pisa", "max_issues_repo_head_hexsha": "7b26b0ac40c873a87786286acfd1c96abf724a99", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pisa/utils/data_proc_params.py", "max_forks_repo_name": "torkjellsdatter/pisa", "max_forks_repo_head_hexsha": "7b26b0ac40c873a87786286acfd1c96abf724a99", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4539877301, "max_line_length": 96, "alphanum_fraction": 0.5739387343, "include": true, "reason": "import numpy,from numpy", "num_tokens": 5683}
|
[STATEMENT]
lemma "implc_get_offending_flows [ACL_not_with]
\<lparr> nodesL = [''A'', ''B'', ''C''], edgesL = [(''B'', ''A''), (''B'', ''C''), (''A'', ''B'')] \<rparr> =
[[(''B'', ''C'')], [(''A'', ''B'')]]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. implc_get_offending_flows [ACL_not_with] \<lparr>nodesL = [''A'', ''B'', ''C''], edgesL = [(''B'', ''A''), (''B'', ''C''), (''A'', ''B'')]\<rparr> = [[(''B'', ''C'')], [(''A'', ''B'')]]
[PROOF STEP]
by eval
|
{"llama_tokens": 246, "file": "Network_Security_Policy_Verification_Examples_Impl_List_Playground_ChairNetwork_statefulpolicy_example", "length": 1}
|
#!/usr/bin/env python3
import astropy.time
import astropy.coordinates
locUK608 = astropy.coordinates.EarthLocation.from_geodetic(lat=51.143833512, lon=-1.433500703, height=176.028) # UK608 LBA
locIE613 = astropy.coordinates.EarthLocation.from_geocentric(3801633.528060000, -529021.899396000, 5076997.185, unit='m') # IE613 LBA
import argparse
parser = argparse.ArgumentParser(
description='''Print the UTC and LST for an observatory''')
parser.add_argument('-o', '--observatory', help='Observatory name, default: UK608', default='UK608')
parser.add_argument('-s', '--source', help='source name, default B051+21', default='PSR B0531+21')
args = parser.parse_args()
t = astropy.time.Time.now()
if args.observatory.startswith('UK608'): t.location = locUK608
elif args.observatory.startswith('IE613'): t.location = locIE613
lst = t.sidereal_time('mean')
print('Observatory:', args.observatory, t.location.to_geodetic())
print('UTC: ', t)
print('LST: ', lst)
source = astropy.coordinates.SkyCoord.from_name(args.source)
sourcealtaz = source.transform_to(astropy.coordinates.AltAz(obstime=t,location=t.location))
print("Elevation = {0.alt:.2}".format(sourcealtaz))
|
{"hexsha": "4f8f16615caad5c78c00a0a90264992bc9606b86", "size": 1170, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/lst.py", "max_stars_repo_name": "griffinfoster/artemis", "max_stars_repo_head_hexsha": "bf445b7d2dc9127676503a4efd633d3492e6139a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/lst.py", "max_issues_repo_name": "griffinfoster/artemis", "max_issues_repo_head_hexsha": "bf445b7d2dc9127676503a4efd633d3492e6139a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/lst.py", "max_forks_repo_name": "griffinfoster/artemis", "max_forks_repo_head_hexsha": "bf445b7d2dc9127676503a4efd633d3492e6139a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3448275862, "max_line_length": 133, "alphanum_fraction": 0.7581196581, "include": true, "reason": "import astropy", "num_tokens": 326}
|
# -*- coding: utf-8 -*-
"""
somasnakes
===========
Original package is adjusted for soma detection by donghaozhang and siqiliu.
This soma submodule can be used for soma detection only, but this submodule is
currently embedded in rivuletpy. The soma mask can be generate by setting
its corresponding argument. Soma detection requires an initial soma centroid,
estimated somatic radius and grayscale neuron image. Soma growth is based on
the Morphological Active Contours without Edges algorithm. The original paper
is named as A morphological approach to curvature-based
evolution of curves and surfaces.The following papers are Rivulet papers.
The soma growth algorithm can converge by applying the sliding window.
Journal Rivulet Paper : Rivulet: 3D Neuron Morphology Tracing
with Iterative Back-Tracking Conference Rivulet Paper : Reconstruction
of 3D neuron morphology using Rivulet back-tracking
soma is a submodule of rivuletpy
"""
__author__ = "Donghao Zhang <zdhpeter1991@gmail.com>, Siqi Liu <lsqshr@gmail.com>"
from itertools import cycle
import math
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
from scipy.ndimage.measurements import center_of_mass
from scipy.ndimage.morphology import generate_binary_structure
from rivuletpy.utils.io import writetiff3d
import skfmm
class Soma(object):
def __init__(self):
self.centroid = None
self.radius = 0
self.mask = None
self.somapos = None
def simple_mask(self, bimg):
'''
Make soma binary mask with the original
binary image and its radius and position
'''
# Make a ball like mask with 2 X somaradius
ballvolume = np.zeros(bimg.shape)
ballvolume[self.centroid[0], self.centroid[1], self.centroid[2]] = 1
stt = generate_binary_structure(3, 1)
for i in range(math.ceil(self.radius * 2.5)):
ballvolume = binary_dilation(ballvolume, structure=stt)
# Make the soma mask with the intersection
# between the ball area and the original binary
self.mask = np.logical_and(ballvolume, bimg)
# Shift the centroid according to the cropped region
def crop_centroid(self, crop_region):
self.centroid[0] = self.centroid[0] - crop_region[0, 0]
self.centroid[1] = self.centroid[1] - crop_region[1, 0]
self.centroid[2] = self.centroid[2] - crop_region[2, 0]
def detect(self, bimg, simple=False, silent=False,somapos=None):
"""
Automatic detection of soma volume unless the iterations are given.
"""
# Smooth iterations
smoothing = 1
# A float number controls the weight of internal energy
lambda1 = 1
# A float number controls the weight of external energy
lambda2 = 1.5
# Manually set the number of iterations required for the soma
# The type of iterations is int
iterations = -1
bimg = bimg.astype('int') # Segment
dt = skfmm.distance(bimg, dx=1.1) # Boundary DT
# somaradius : the approximate value of
# soma radius estimated from distance transform
# the type of somaradius is float64
# somaradius is just a float number
somaradius = dt.max()
if somapos == None:
# somapos : the coordinate of estimated soma centroid
# the type of somapos is int64
# the shape of somapos is (3,)
# somapos is array-like
somapos = np.asarray(np.unravel_index(dt.argmax(), dt.shape))
else:
somapos = np.array(somapos.split(',')).astype(int)
# Soma detection is required
if not simple:
if not silent:
print('Reconstructing Soma with SRET')
ratioxz = bimg.shape[0] / bimg.shape[2]
ratioyz = bimg.shape[1] / bimg.shape[2]
sqrval = (somaradius**0.5 * max(ratioxz, ratioyz))
sqrval = np.floor(min(max(sqrval, 3), (somaradius**0.5) * 6))
startpt = somapos - 3 * sqrval
endpt = somapos + 3 * sqrval
# # To constrain the soma growth region inside the cubic region
# # Python index start from 0
startpt[0] = min(max(0, startpt[0]), bimg.shape[0] - 1)
startpt[1] = min(max(0, startpt[1]), bimg.shape[1] - 1)
startpt[2] = min(max(0, startpt[2]), bimg.shape[2] - 1)
endpt[0] = min(max(0, endpt[0]), bimg.shape[0] - 1)
endpt[1] = min(max(0, endpt[1]), bimg.shape[1] - 1)
endpt[2] = min(max(0, endpt[2]), bimg.shape[2] - 1)
startpt = startpt.astype(int) # Convert type to int for indexing
endpt = endpt.astype(int)
# # Extract soma region for fast soma detection
somaimg = bimg[startpt[0]:endpt[0], startpt[1]:endpt[1], startpt[2]:
endpt[2]]
centerpt = np.zeros(3)
centerpt[0] = somaimg.shape[0] / 2
centerpt[1] = somaimg.shape[1] / 2
centerpt[2] = somaimg.shape[2] / 2
centerpt = np.floor(centerpt)
# Morphological ACWE. Initialization of the level-set.
macwe = MorphACWE(somaimg, startpt, endpt,
smoothing, lambda1, lambda2)
macwe.levelset = circle_levelset(somaimg.shape,
np.floor(centerpt), sqrval)
# -1 means the automatic detection
# Positive integers means the number of iterations
if iterations == -1:
macwe.autoconvg() # automatic soma detection
else:
# Input the iteration number manually
for i in range(iterations):
macwe.step()
# The following achieves the automatic somtic box extension
# The maximum somatic region extension iteration
# It is set to 10 avoid infinite loops
for i in range(1, 11):
# if not silent:
# print('The somatic region extension iteration is', i)
if macwe.enlrspt is None:
break
# Copy the values to new variables for the safe purpose
startpt = macwe.enlrspt.copy()
endpt = macwe.enlrept.copy()
startpt[0] = min(max(0, startpt[0]), bimg.shape[0])
startpt[1] = min(max(0, startpt[1]), bimg.shape[1])
startpt[2] = min(max(0, startpt[2]), bimg.shape[2])
endpt[0] = min(max(0, endpt[0]), bimg.shape[0])
endpt[1] = min(max(0, endpt[1]), bimg.shape[1])
endpt[2] = min(max(0, endpt[2]), bimg.shape[2])
somaimg = bimg[startpt[0]:endpt[0], startpt[1]:endpt[1], startpt[2]:
endpt[2]]
full_soma_mask = np.zeros(
(bimg.shape[0], bimg.shape[1], bimg.shape[2]))
# Put the detected somas into the whole image
# It is either true or false
full_soma_mask[macwe.startpoint[0]:macwe.endpoint[
0], macwe.startpoint[1]:macwe.endpoint[1], macwe.startpoint[2]:
macwe.endpoint[2]] = macwe._u
# The newlevelset is the initial soma volume from previous iteration
#(the automatic converge operation)
newlevelset = full_soma_mask[startpt[0]:endpt[0], startpt[1]:endpt[1],
startpt[2]:endpt[2]]
# The previous macwe class is released
# To avoid the conflicts with the new initialisation of the
# macwe class
del macwe
# Initialisation for the new class
macwe = MorphACWE(somaimg, startpt, endpt, smoothing, lambda1,
lambda2)
del somaimg, full_soma_mask, startpt, endpt
# Reuse the soma volume from previous iteration
macwe.set_levelset(newlevelset)
# Release memory to avoid conflicts with previous newlevelset
del newlevelset
macwe.autoconvg()
# The automatic smoothing operation to remove the interferes with
# dendrites
macwe.autosmooth()
# Initialise soma mask image
full_soma_mask = np.zeros(
(bimg.shape[0], bimg.shape[1], bimg.shape[2]))
# There are two possible scenarios
# The first scenrio is that the automatic box extension is not
# necessary
if macwe.enlrspt is None:
startpt = macwe.startpoint.copy()
endpt = macwe.endpoint.copy()
# The second scenrio is that the automatic box extension operations
# has been performed
else:
startpt = macwe.enlrspt.copy()
endpt = macwe.enlrept.copy()
startpt[0] = min(max(0, startpt[0]), bimg.shape[0])
startpt[1] = min(max(0, startpt[1]), bimg.shape[1])
startpt[2] = min(max(0, startpt[2]), bimg.shape[2])
endpt[0] = min(max(0, endpt[0]), bimg.shape[0])
endpt[1] = min(max(0, endpt[1]), bimg.shape[1])
endpt[2] = min(max(0, endpt[2]), bimg.shape[2])
# The soma mask image contains only two possible values
# Each element is either 0 or 40
# Value 40 is assigned for the visualisation purpose.
full_soma_mask[startpt[0]:endpt[0], startpt[1]:endpt[1], startpt[2]:endpt[
2]] = macwe._u > 0
# Calculate the new centroid using the soma volume
newsomapos = center_of_mass(full_soma_mask)
# Round the float coordinates into integers
newsomapos = [math.floor(p) for p in newsomapos]
self.centroid = newsomapos
self.radius = somaradius
self.mask = full_soma_mask
else:
if not silent:
print('Reconstructing Soma with Simple Mask')
self.centroid = somapos
self.radius = somaradius
self.simple_mask(bimg)
def pad(self, crop_region, original_shape):
xmin = crop_region[0, 0]
ymin = crop_region[1, 0]
zmin = crop_region[2, 0]
xmax = crop_region[0, 1]
ymax = crop_region[1, 1]
zmax = crop_region[2, 1]
self.mask = np.pad(self.mask, ((xmin, original_shape[0] - xmax),
(ymin, original_shape[1] - ymax),
(zmin, original_shape[2] - zmax)),
mode='constant',
constant_values=0)
def save(self, fname):
writetiff3d(fname, self.mask * 255)
class Fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [
np.eye(3), np.array([[0, 1, 0]] * 3), np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)
]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def SI(u):
"""SI operator."""
# print('SI operator has been called')
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError(
"u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P), ) + u.shape)
for i in range(len(P)):
_aux[i] = binary_erosion(u, P[i])
return _aux.max(0)
def circle_levelset(shape, center, sqradius, scalerow=1.0):
"""Build a binary function with a circle as the 0.5-levelset."""
grid = np.mgrid[list(map(slice, shape))].T - center
phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))
u = np.float_(phi > 0)
return u
def IS(u):
"""IS operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError(
"u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P), ) + u.shape)
for i in range(len(P)):
_aux[i] = binary_dilation(u, P[i])
return _aux.min(0)
# SIoIS operator.
SIoIS = lambda u: SI(IS(u))
ISoSI = lambda u: IS(SI(u))
curvop = Fcycle([SIoIS, ISoSI])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self,
data,
startpoint,
endpoint,
imgshape,
smoothing=1,
lambda1=1,
lambda2=1.5):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
startpt, endpt : numpy int array
startpt is the initial starting point of the somatic region
endpt is the initial ending point of the somatic region
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.imgshape = imgshape
self.data = data
self.startpoint = startpoint
self.endpoint = endpoint
self.enlrspt = None
self.enlrept = None
def set_levelset(self, u):
self._u = np.double(u)
self._u[u > 0] = 1
self._u[u <= 0] = 0
levelset = property(
lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError(
"the levelset function is not set (use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = u > 0
outside = u <= 0
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1 * (data - c1)**2 - self.lambda2 *
(data - c0)**2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
res = IS(res)
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def step_sm(self):
"""A smoothing step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError(
"the levelset function is not set (use set_levelset)")
res = np.copy(u)
# Smoothing.
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological Chan-Vese method."""
for i in range(iterations):
self.step()
def autoconvg(self):
"""Soma detection converges by itself."""
# Autoconvg is the abbreviation of automatic convergence
iterations = 200
# The following vector is the number of foreground voxels
foreground_num = np.zeros(iterations)
# The following vector is initialised for storing forward difference
forward_diff_store = np.zeros(iterations)
# This is the initilization of automatic converge
for i in range(iterations):
self.step()
u = self._u
volu = sum(u[u > 0])
foreground_num[i] = volu
if i > 0:
# The variable diff_step is the current first order difference
diff_step = foreground_num[i] - foreground_num[i - 1]
forward_diff_store[i - 1] = diff_step
if i > 6:
# The variable cur_slider_diff is the sum of sliding window
# The size of sliding window is 6
cur_slider_diff = np.sum(forward_diff_store[i - 6:i - 1])
volu_thres = 0.05 * foreground_num[i]
convg_one = np.absolute(cur_slider_diff) < 20
convg_two = np.absolute(cur_slider_diff) < volu_thres
convg_criteria = np.logical_or(convg_one, convg_two)
if convg_criteria:
break
A = self._u > 0.5
slicevalarray = np.zeros(6)
# Front face along dimension 1
somaslice = A[0, :, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[0] = sliceval
# Back face along dimension 1
somaslice = A[A.shape[0] - 1, :, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[1] = sliceval
# Front face along dimension 2
somaslice = A[:, 0, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[2] = sliceval
# Back face along dimension 2
somaslice = A[:, A.shape[1] - 1, :]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[3] = sliceval
# Front face along dimension 3
somaslice = A[:, :, 0]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[4] = sliceval
# Back face along dimension 3
somaslice = A[:, :, A.shape[2] - 1]
slicearray = np.sum(somaslice, axis=0)
sliceval = np.sum(slicearray, axis=0)
slicevalarray[5] = sliceval
# The maxval is used to compare the threshold(100 mentioned later)
maxval = slicevalarray.max()
# The maxind is the index of slicevalarray.
# In addition, it determines which wall will be extended
maxind = slicevalarray.argmax()
# The size of binary data image
sz1 = self.data.shape[0]
# sz2 = self.data.shape[1]
# sz3 = self.data.shape[2]
# extend = enlrspt have value, not extend = (enlrspt=None)
# 100 : A threshold of the total number of somatic voxels on each wall
if (maxval > 100):
self.enlrspt = self.startpoint.copy()
self.enlrept = self.endpoint.copy()
# The following code determines the most possible wall(face)
# which requires the extension
if (maxind == 0):
self.enlrspt[0] = self.enlrspt[0] - (sz1 / 4)
elif (maxind == 1):
self.enlrept[0] = self.enlrept[0] + (sz1 / 4)
elif (maxind == 2):
self.enlrspt[1] = self.enlrspt[1] - (sz1 / 4)
elif (maxind == 3):
self.enlrept[1] = self.enlrept[1] + (sz1 / 4)
elif (maxind == 4):
self.enlrspt[2] = self.enlrspt[2] - (sz1 / 4)
elif (maxind == 5):
self.enlrept[2] = self.enlrept[2] + (sz1 / 4)
# To constrain new bounding box inside the image size
else:
self.enlrspt = None
self.enlrept = None
def autosmooth(self):
"""The automatic smoothing of soma volume to remove dendrites"""
# The autosmooth is the abbreviation of automatic smoothing
iterations = 20
# Calculate the initial volume
u = self._u
ini_vol = sum(u[u > 0])
# The smooth operation make
for i in range(iterations):
self.step_sm()
u = self._u
volu = sum(u[u > 0])
vol_pct = volu / ini_vol
# The criteria of the termination of soma growth
# The somatic volume underwent dramatic change
judge_one = vol_pct < 0.75
judge_two = vol_pct > 1.15
judge_criteria = np.logical_or(judge_one, judge_two)
if judge_criteria:
break
def evolve_visual(msnake, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
from matplotlib import pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
fig = ppl.gcf()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=ppl.cm.gray)
else:
ax1.imshow(background, cmap=ppl.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
ppl.pause(0.001)
# Iterate.
for i in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
# ppl.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, levelset=None, num_iters=20):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
"""
from mayavi import mlab
# import matplotlib.pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(
src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=True)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
{"hexsha": "6a6240e4845befc3c4d0d115b447c4d47fcf80b2", "size": 24264, "ext": "py", "lang": "Python", "max_stars_repo_path": "rivuletpy/soma.py", "max_stars_repo_name": "holiday01/rivuletpy", "max_stars_repo_head_hexsha": "4adf06cbe5a16d34eb1d27df80ff658073484457", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rivuletpy/soma.py", "max_issues_repo_name": "holiday01/rivuletpy", "max_issues_repo_head_hexsha": "4adf06cbe5a16d34eb1d27df80ff658073484457", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rivuletpy/soma.py", "max_forks_repo_name": "holiday01/rivuletpy", "max_forks_repo_head_hexsha": "4adf06cbe5a16d34eb1d27df80ff658073484457", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1143270622, "max_line_length": 86, "alphanum_fraction": 0.5703511375, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6366}
|
/-
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Jeremy Avigad, Minchao Wu, Mario Carneiro
! This file was ported from Lean 3 source module data.finset.basic
! leanprover-community/mathlib commit 68cc421841f2ebb8ad2b5a35a853895feb4b850a
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Multiset.FinsetOps
import Mathbin.Tactic.Apply
import Mathbin.Tactic.NthRewrite.Default
import Mathbin.Tactic.Monotonicity.Default
/-!
# Finite sets
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
Terms of type `finset α` are one way of talking about finite subsets of `α` in mathlib.
Below, `finset α` is defined as a structure with 2 fields:
1. `val` is a `multiset α` of elements;
2. `nodup` is a proof that `val` has no duplicates.
Finsets in Lean are constructive in that they have an underlying `list` that enumerates their
elements. In particular, any function that uses the data of the underlying list cannot depend on its
ordering. This is handled on the `multiset` level by multiset API, so in most cases one needn't
worry about it explicitly.
Finsets give a basic foundation for defining finite sums and products over types:
1. `∑ i in (s : finset α), f i`;
2. `∏ i in (s : finset α), f i`.
Lean refers to these operations as `big_operator`s.
More information can be found in `algebra.big_operators.basic`.
Finsets are directly used to define fintypes in Lean.
A `fintype α` instance for a type `α` consists of
a universal `finset α` containing every term of `α`, called `univ`. See `data.fintype.basic`.
There is also `univ'`, the noncomputable partner to `univ`,
which is defined to be `α` as a finset if `α` is finite,
and the empty finset otherwise. See `data.fintype.basic`.
`finset.card`, the size of a finset is defined in `data.finset.card`. This is then used to define
`fintype.card`, the size of a type.
## Main declarations
### Main definitions
* `finset`: Defines a type for the finite subsets of `α`.
Constructing a `finset` requires two pieces of data: `val`, a `multiset α` of elements,
and `nodup`, a proof that `val` has no duplicates.
* `finset.has_mem`: Defines membership `a ∈ (s : finset α)`.
* `finset.has_coe`: Provides a coercion `s : finset α` to `s : set α`.
* `finset.has_coe_to_sort`: Coerce `s : finset α` to the type of all `x ∈ s`.
* `finset.induction_on`: Induction on finsets. To prove a proposition about an arbitrary `finset α`,
it suffices to prove it for the empty finset, and to show that if it holds for some `finset α`,
then it holds for the finset obtained by inserting a new element.
* `finset.choose`: Given a proof `h` of existence and uniqueness of a certain element
satisfying a predicate, `choose s h` returns the element of `s` satisfying that predicate.
### Finset constructions
* `singleton`: Denoted by `{a}`; the finset consisting of one element.
* `finset.empty`: Denoted by `∅`. The finset associated to any type consisting of no elements.
* `finset.range`: For any `n : ℕ`, `range n` is equal to `{0, 1, ... , n - 1} ⊆ ℕ`.
This convention is consistent with other languages and normalizes `card (range n) = n`.
Beware, `n` is not in `range n`.
* `finset.attach`: Given `s : finset α`, `attach s` forms a finset of elements of the subtype
`{a // a ∈ s}`; in other words, it attaches elements to a proof of membership in the set.
### Finsets from functions
* `finset.filter`: Given a predicate `p : α → Prop`, `s.filter p` is
the finset consisting of those elements in `s` satisfying the predicate `p`.
### The lattice structure on subsets of finsets
There is a natural lattice structure on the subsets of a set.
In Lean, we use lattice notation to talk about things involving unions and intersections. See
`order.lattice`. For the lattice structure on finsets, `⊥` is called `bot` with `⊥ = ∅` and `⊤` is
called `top` with `⊤ = univ`.
* `finset.has_subset`: Lots of API about lattices, otherwise behaves exactly as one would expect.
* `finset.has_union`: Defines `s ∪ t` (or `s ⊔ t`) as the union of `s` and `t`.
See `finset.sup`/`finset.bUnion` for finite unions.
* `finset.has_inter`: Defines `s ∩ t` (or `s ⊓ t`) as the intersection of `s` and `t`.
See `finset.inf` for finite intersections.
* `finset.disj_union`: Given a hypothesis `h` which states that finsets `s` and `t` are disjoint,
`s.disj_union t h` is the set such that `a ∈ disj_union s t h` iff `a ∈ s` or `a ∈ t`; this does
not require decidable equality on the type `α`.
### Operations on two or more finsets
* `insert` and `finset.cons`: For any `a : α`, `insert s a` returns `s ∪ {a}`. `cons s a h`
returns the same except that it requires a hypothesis stating that `a` is not already in `s`.
This does not require decidable equality on the type `α`.
* `finset.has_union`: see "The lattice structure on subsets of finsets"
* `finset.has_inter`: see "The lattice structure on subsets of finsets"
* `finset.erase`: For any `a : α`, `erase s a` returns `s` with the element `a` removed.
* `finset.has_sdiff`: Defines the set difference `s \ t` for finsets `s` and `t`.
* `finset.product`: Given finsets of `α` and `β`, defines finsets of `α × β`.
For arbitrary dependent products, see `data.finset.pi`.
* `finset.bUnion`: Finite unions of finsets; given an indexing function `f : α → finset β` and a
`s : finset α`, `s.bUnion f` is the union of all finsets of the form `f a` for `a ∈ s`.
* `finset.bInter`: TODO: Implemement finite intersections.
### Maps constructed using finsets
* `finset.piecewise`: Given two functions `f`, `g`, `s.piecewise f g` is a function which is equal
to `f` on `s` and `g` on the complement.
### Predicates on finsets
* `disjoint`: defined via the lattice structure on finsets; two sets are disjoint if their
intersection is empty.
* `finset.nonempty`: A finset is nonempty if it has elements.
This is equivalent to saying `s ≠ ∅`. TODO: Decide on the simp normal form.
### Equivalences between finsets
* The `data.equiv` files describe a general type of equivalence, so look in there for any lemmas.
There is some API for rewriting sums and products from `s` to `t` given that `s ≃ t`.
TODO: examples
## Tags
finite sets, finset
-/
open Multiset Subtype Nat Function
universe u
variable {α : Type _} {β : Type _} {γ : Type _}
#print Finset /-
/-- `finset α` is the type of finite sets of elements of `α`. It is implemented
as a multiset (a list up to permutation) which has no duplicate elements. -/
structure Finset (α : Type _) where
val : Multiset α
Nodup : Nodup val
#align finset Finset
-/
#print Multiset.canLiftFinset /-
instance Multiset.canLiftFinset {α} : CanLift (Multiset α) (Finset α) Finset.val Multiset.Nodup :=
⟨fun m hm => ⟨⟨m, hm⟩, rfl⟩⟩
#align multiset.can_lift_finset Multiset.canLiftFinset
-/
namespace Finset
#print Finset.eq_of_veq /-
theorem eq_of_veq : ∀ {s t : Finset α}, s.1 = t.1 → s = t
| ⟨s, _⟩, ⟨t, _⟩, rfl => rfl
#align finset.eq_of_veq Finset.eq_of_veq
-/
#print Finset.val_injective /-
theorem val_injective : Injective (val : Finset α → Multiset α) := fun _ _ => eq_of_veq
#align finset.val_injective Finset.val_injective
-/
#print Finset.val_inj /-
@[simp]
theorem val_inj {s t : Finset α} : s.1 = t.1 ↔ s = t :=
val_injective.eq_iff
#align finset.val_inj Finset.val_inj
-/
#print Finset.dedup_eq_self /-
@[simp]
theorem dedup_eq_self [DecidableEq α] (s : Finset α) : dedup s.1 = s.1 :=
s.2.dedup
#align finset.dedup_eq_self Finset.dedup_eq_self
-/
#print Finset.decidableEq /-
instance decidableEq [DecidableEq α] : DecidableEq (Finset α)
| s₁, s₂ => decidable_of_iff _ val_inj
#align finset.has_decidable_eq Finset.decidableEq
-/
/-! ### membership -/
instance : Membership α (Finset α) :=
⟨fun a s => a ∈ s.1⟩
#print Finset.mem_def /-
theorem mem_def {a : α} {s : Finset α} : a ∈ s ↔ a ∈ s.1 :=
Iff.rfl
#align finset.mem_def Finset.mem_def
-/
#print Finset.mem_val /-
@[simp]
theorem mem_val {a : α} {s : Finset α} : a ∈ s.1 ↔ a ∈ s :=
Iff.rfl
#align finset.mem_val Finset.mem_val
-/
#print Finset.mem_mk /-
@[simp]
theorem mem_mk {a : α} {s nd} : a ∈ @Finset.mk α s nd ↔ a ∈ s :=
Iff.rfl
#align finset.mem_mk Finset.mem_mk
-/
#print Finset.decidableMem /-
instance decidableMem [h : DecidableEq α] (a : α) (s : Finset α) : Decidable (a ∈ s) :=
Multiset.decidableMem _ _
#align finset.decidable_mem Finset.decidableMem
-/
/-! ### set coercion -/
/-- Convert a finset to a set in the natural way. -/
instance : CoeTC (Finset α) (Set α) :=
⟨fun s => { x | x ∈ s }⟩
#print Finset.mem_coe /-
@[simp, norm_cast]
theorem mem_coe {a : α} {s : Finset α} : a ∈ (s : Set α) ↔ a ∈ s :=
Iff.rfl
#align finset.mem_coe Finset.mem_coe
-/
#print Finset.setOf_mem /-
@[simp]
theorem setOf_mem {α} {s : Finset α} : { a | a ∈ s } = s :=
rfl
#align finset.set_of_mem Finset.setOf_mem
-/
#print Finset.coe_mem /-
@[simp]
theorem coe_mem {s : Finset α} (x : (s : Set α)) : ↑x ∈ s :=
x.2
#align finset.coe_mem Finset.coe_mem
-/
#print Finset.mk_coe /-
@[simp]
theorem mk_coe {s : Finset α} (x : (s : Set α)) {h} : (⟨x, h⟩ : (s : Set α)) = x :=
Subtype.coe_eta _ _
#align finset.mk_coe Finset.mk_coe
-/
#print Finset.decidableMem' /-
instance decidableMem' [DecidableEq α] (a : α) (s : Finset α) : Decidable (a ∈ (s : Set α)) :=
s.decidableMem _
#align finset.decidable_mem' Finset.decidableMem'
-/
/-! ### extensionality -/
#print Finset.ext_iff /-
theorem ext_iff {s₁ s₂ : Finset α} : s₁ = s₂ ↔ ∀ a, a ∈ s₁ ↔ a ∈ s₂ :=
val_inj.symm.trans <| s₁.Nodup.ext s₂.Nodup
#align finset.ext_iff Finset.ext_iff
-/
#print Finset.ext /-
@[ext]
theorem ext {s₁ s₂ : Finset α} : (∀ a, a ∈ s₁ ↔ a ∈ s₂) → s₁ = s₂ :=
ext_iff.2
#align finset.ext Finset.ext
-/
#print Finset.coe_inj /-
@[simp, norm_cast]
theorem coe_inj {s₁ s₂ : Finset α} : (s₁ : Set α) = s₂ ↔ s₁ = s₂ :=
Set.ext_iff.trans ext_iff.symm
#align finset.coe_inj Finset.coe_inj
-/
#print Finset.coe_injective /-
theorem coe_injective {α} : Injective (coe : Finset α → Set α) := fun s t => coe_inj.1
#align finset.coe_injective Finset.coe_injective
-/
/-! ### type coercion -/
/-- Coercion from a finset to the corresponding subtype. -/
instance {α : Type u} : CoeSort (Finset α) (Type u) :=
⟨fun s => { x // x ∈ s }⟩
#print Finset.forall_coe /-
@[simp]
protected theorem forall_coe {α : Type _} (s : Finset α) (p : s → Prop) :
(∀ x : s, p x) ↔ ∀ (x : α) (h : x ∈ s), p ⟨x, h⟩ :=
Subtype.forall
#align finset.forall_coe Finset.forall_coe
-/
#print Finset.exists_coe /-
@[simp]
protected theorem exists_coe {α : Type _} (s : Finset α) (p : s → Prop) :
(∃ x : s, p x) ↔ ∃ (x : α)(h : x ∈ s), p ⟨x, h⟩ :=
Subtype.exists
#align finset.exists_coe Finset.exists_coe
-/
#print Finset.PiFinsetCoe.canLift /-
instance PiFinsetCoe.canLift (ι : Type _) (α : ∀ i : ι, Type _) [ne : ∀ i, Nonempty (α i)]
(s : Finset ι) : CanLift (∀ i : s, α i) (∀ i, α i) (fun f i => f i) fun _ => True :=
PiSubtype.canLift ι α (· ∈ s)
#align finset.pi_finset_coe.can_lift Finset.PiFinsetCoe.canLift
-/
#print Finset.PiFinsetCoe.canLift' /-
instance PiFinsetCoe.canLift' (ι α : Type _) [ne : Nonempty α] (s : Finset ι) :
CanLift (s → α) (ι → α) (fun f i => f i) fun _ => True :=
PiFinsetCoe.canLift ι (fun _ => α) s
#align finset.pi_finset_coe.can_lift' Finset.PiFinsetCoe.canLift'
-/
#print Finset.FinsetCoe.canLift /-
instance FinsetCoe.canLift (s : Finset α) : CanLift α s coe fun a => a ∈ s
where prf a ha := ⟨⟨a, ha⟩, rfl⟩
#align finset.finset_coe.can_lift Finset.FinsetCoe.canLift
-/
#print Finset.coe_sort_coe /-
@[simp, norm_cast]
theorem coe_sort_coe (s : Finset α) : ((s : Set α) : Sort _) = s :=
rfl
#align finset.coe_sort_coe Finset.coe_sort_coe
-/
/-! ### Subset and strict subset relations -/
section Subset
variable {s t : Finset α}
instance : HasSubset (Finset α) :=
⟨fun s t => ∀ ⦃a⦄, a ∈ s → a ∈ t⟩
instance : HasSSubset (Finset α) :=
⟨fun s t => s ⊆ t ∧ ¬t ⊆ s⟩
instance : PartialOrder (Finset α) where
le := (· ⊆ ·)
lt := (· ⊂ ·)
le_refl s a := id
le_trans s t u hst htu a ha := htu <| hst ha
le_antisymm s t hst hts := ext fun a => ⟨@hst _, @hts _⟩
instance : IsRefl (Finset α) (· ⊆ ·) :=
LE.le.isRefl
instance : IsTrans (Finset α) (· ⊆ ·) :=
LE.le.isTrans
instance : IsAntisymm (Finset α) (· ⊆ ·) :=
LE.le.isAntisymm
instance : IsIrrefl (Finset α) (· ⊂ ·) :=
LT.lt.isIrrefl
instance : IsTrans (Finset α) (· ⊂ ·) :=
LT.lt.isTrans
instance : IsAsymm (Finset α) (· ⊂ ·) :=
LT.lt.isAsymm
instance : IsNonstrictStrictOrder (Finset α) (· ⊆ ·) (· ⊂ ·) :=
⟨fun _ _ => Iff.rfl⟩
#print Finset.subset_def /-
theorem subset_def : s ⊆ t ↔ s.1 ⊆ t.1 :=
Iff.rfl
#align finset.subset_def Finset.subset_def
-/
#print Finset.ssubset_def /-
theorem ssubset_def : s ⊂ t ↔ s ⊆ t ∧ ¬t ⊆ s :=
Iff.rfl
#align finset.ssubset_def Finset.ssubset_def
-/
#print Finset.Subset.refl /-
@[simp]
theorem Subset.refl (s : Finset α) : s ⊆ s :=
Subset.refl _
#align finset.subset.refl Finset.Subset.refl
-/
#print Finset.Subset.rfl /-
protected theorem Subset.rfl {s : Finset α} : s ⊆ s :=
Subset.refl _
#align finset.subset.rfl Finset.Subset.rfl
-/
#print Finset.subset_of_eq /-
protected theorem subset_of_eq {s t : Finset α} (h : s = t) : s ⊆ t :=
h ▸ Subset.refl _
#align finset.subset_of_eq Finset.subset_of_eq
-/
#print Finset.Subset.trans /-
theorem Subset.trans {s₁ s₂ s₃ : Finset α} : s₁ ⊆ s₂ → s₂ ⊆ s₃ → s₁ ⊆ s₃ :=
Subset.trans
#align finset.subset.trans Finset.Subset.trans
-/
#print Finset.Superset.trans /-
theorem Superset.trans {s₁ s₂ s₃ : Finset α} : s₁ ⊇ s₂ → s₂ ⊇ s₃ → s₁ ⊇ s₃ := fun h' h =>
Subset.trans h h'
#align finset.superset.trans Finset.Superset.trans
-/
#print Finset.mem_of_subset /-
theorem mem_of_subset {s₁ s₂ : Finset α} {a : α} : s₁ ⊆ s₂ → a ∈ s₁ → a ∈ s₂ :=
mem_of_subset
#align finset.mem_of_subset Finset.mem_of_subset
-/
#print Finset.not_mem_mono /-
theorem not_mem_mono {s t : Finset α} (h : s ⊆ t) {a : α} : a ∉ t → a ∉ s :=
mt <| @h _
#align finset.not_mem_mono Finset.not_mem_mono
-/
#print Finset.Subset.antisymm /-
theorem Subset.antisymm {s₁ s₂ : Finset α} (H₁ : s₁ ⊆ s₂) (H₂ : s₂ ⊆ s₁) : s₁ = s₂ :=
ext fun a => ⟨@H₁ a, @H₂ a⟩
#align finset.subset.antisymm Finset.Subset.antisymm
-/
#print Finset.subset_iff /-
theorem subset_iff {s₁ s₂ : Finset α} : s₁ ⊆ s₂ ↔ ∀ ⦃x⦄, x ∈ s₁ → x ∈ s₂ :=
Iff.rfl
#align finset.subset_iff Finset.subset_iff
-/
#print Finset.coe_subset /-
@[simp, norm_cast]
theorem coe_subset {s₁ s₂ : Finset α} : (s₁ : Set α) ⊆ s₂ ↔ s₁ ⊆ s₂ :=
Iff.rfl
#align finset.coe_subset Finset.coe_subset
-/
/- warning: finset.val_le_iff -> Finset.val_le_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, Iff (LE.le.{u1} (Multiset.{u1} α) (Preorder.toLE.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.partialOrder.{u1} α))) (Finset.val.{u1} α s₁) (Finset.val.{u1} α s₂)) (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s₁ s₂)
but is expected to have type
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, Iff (LE.le.{u1} (Multiset.{u1} α) (Preorder.toLE.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.instPartialOrderMultiset.{u1} α))) (Finset.val.{u1} α s₁) (Finset.val.{u1} α s₂)) (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s₁ s₂)
Case conversion may be inaccurate. Consider using '#align finset.val_le_iff Finset.val_le_iffₓ'. -/
@[simp]
theorem val_le_iff {s₁ s₂ : Finset α} : s₁.1 ≤ s₂.1 ↔ s₁ ⊆ s₂ :=
le_iff_subset s₁.2
#align finset.val_le_iff Finset.val_le_iff
#print Finset.Subset.antisymm_iff /-
theorem Subset.antisymm_iff {s₁ s₂ : Finset α} : s₁ = s₂ ↔ s₁ ⊆ s₂ ∧ s₂ ⊆ s₁ :=
le_antisymm_iff
#align finset.subset.antisymm_iff Finset.Subset.antisymm_iff
-/
/- warning: finset.not_subset -> Finset.not_subset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Not (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s t)) (Exists.{succ u1} α (fun (x : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) => Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x t))))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Not (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s t)) (Exists.{succ u1} α (fun (x : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x t))))
Case conversion may be inaccurate. Consider using '#align finset.not_subset Finset.not_subsetₓ'. -/
theorem not_subset : ¬s ⊆ t ↔ ∃ x ∈ s, x ∉ t := by simp only [← coe_subset, Set.not_subset, mem_coe]
#align finset.not_subset Finset.not_subset
#print Finset.le_eq_subset /-
@[simp]
theorem le_eq_subset : ((· ≤ ·) : Finset α → Finset α → Prop) = (· ⊆ ·) :=
rfl
#align finset.le_eq_subset Finset.le_eq_subset
-/
#print Finset.lt_eq_subset /-
@[simp]
theorem lt_eq_subset : ((· < ·) : Finset α → Finset α → Prop) = (· ⊂ ·) :=
rfl
#align finset.lt_eq_subset Finset.lt_eq_subset
-/
#print Finset.le_iff_subset /-
theorem le_iff_subset {s₁ s₂ : Finset α} : s₁ ≤ s₂ ↔ s₁ ⊆ s₂ :=
Iff.rfl
#align finset.le_iff_subset Finset.le_iff_subset
-/
#print Finset.lt_iff_ssubset /-
theorem lt_iff_ssubset {s₁ s₂ : Finset α} : s₁ < s₂ ↔ s₁ ⊂ s₂ :=
Iff.rfl
#align finset.lt_iff_ssubset Finset.lt_iff_ssubset
-/
/- warning: finset.coe_ssubset -> Finset.coe_ssubset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, Iff (HasSSubset.SSubset.{u1} (Set.{u1} α) (Set.hasSsubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₁) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₂)) (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.hasSsubset.{u1} α) s₁ s₂)
but is expected to have type
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, Iff (HasSSubset.SSubset.{u1} (Set.{u1} α) (Set.instHasSSubsetSet.{u1} α) (Finset.toSet.{u1} α s₁) (Finset.toSet.{u1} α s₂)) (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.instHasSSubsetFinset.{u1} α) s₁ s₂)
Case conversion may be inaccurate. Consider using '#align finset.coe_ssubset Finset.coe_ssubsetₓ'. -/
@[simp, norm_cast]
theorem coe_ssubset {s₁ s₂ : Finset α} : (s₁ : Set α) ⊂ s₂ ↔ s₁ ⊂ s₂ :=
show (s₁ : Set α) ⊂ s₂ ↔ s₁ ⊆ s₂ ∧ ¬s₂ ⊆ s₁ by simp only [Set.ssubset_def, Finset.coe_subset]
#align finset.coe_ssubset Finset.coe_ssubset
/- warning: finset.val_lt_iff -> Finset.val_lt_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, Iff (LT.lt.{u1} (Multiset.{u1} α) (Preorder.toLT.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.partialOrder.{u1} α))) (Finset.val.{u1} α s₁) (Finset.val.{u1} α s₂)) (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.hasSsubset.{u1} α) s₁ s₂)
but is expected to have type
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, Iff (LT.lt.{u1} (Multiset.{u1} α) (Preorder.toLT.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.instPartialOrderMultiset.{u1} α))) (Finset.val.{u1} α s₁) (Finset.val.{u1} α s₂)) (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.instHasSSubsetFinset.{u1} α) s₁ s₂)
Case conversion may be inaccurate. Consider using '#align finset.val_lt_iff Finset.val_lt_iffₓ'. -/
@[simp]
theorem val_lt_iff {s₁ s₂ : Finset α} : s₁.1 < s₂.1 ↔ s₁ ⊂ s₂ :=
and_congr val_le_iff <| not_congr val_le_iff
#align finset.val_lt_iff Finset.val_lt_iff
#print Finset.ssubset_iff_subset_ne /-
theorem ssubset_iff_subset_ne {s t : Finset α} : s ⊂ t ↔ s ⊆ t ∧ s ≠ t :=
@lt_iff_le_and_ne _ _ s t
#align finset.ssubset_iff_subset_ne Finset.ssubset_iff_subset_ne
-/
/- warning: finset.ssubset_iff_of_subset -> Finset.ssubset_iff_of_subset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s₁ s₂) -> (Iff (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.hasSsubset.{u1} α) s₁ s₂) (Exists.{succ u1} α (fun (x : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s₂) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s₂) => Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s₁)))))
but is expected to have type
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s₁ s₂) -> (Iff (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.instHasSSubsetFinset.{u1} α) s₁ s₂) (Exists.{succ u1} α (fun (x : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s₂) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s₁)))))
Case conversion may be inaccurate. Consider using '#align finset.ssubset_iff_of_subset Finset.ssubset_iff_of_subsetₓ'. -/
theorem ssubset_iff_of_subset {s₁ s₂ : Finset α} (h : s₁ ⊆ s₂) : s₁ ⊂ s₂ ↔ ∃ x ∈ s₂, x ∉ s₁ :=
Set.ssubset_iff_of_subset h
#align finset.ssubset_iff_of_subset Finset.ssubset_iff_of_subset
#print Finset.ssubset_of_ssubset_of_subset /-
theorem ssubset_of_ssubset_of_subset {s₁ s₂ s₃ : Finset α} (hs₁s₂ : s₁ ⊂ s₂) (hs₂s₃ : s₂ ⊆ s₃) :
s₁ ⊂ s₃ :=
Set.ssubset_of_ssubset_of_subset hs₁s₂ hs₂s₃
#align finset.ssubset_of_ssubset_of_subset Finset.ssubset_of_ssubset_of_subset
-/
#print Finset.ssubset_of_subset_of_ssubset /-
theorem ssubset_of_subset_of_ssubset {s₁ s₂ s₃ : Finset α} (hs₁s₂ : s₁ ⊆ s₂) (hs₂s₃ : s₂ ⊂ s₃) :
s₁ ⊂ s₃ :=
Set.ssubset_of_subset_of_ssubset hs₁s₂ hs₂s₃
#align finset.ssubset_of_subset_of_ssubset Finset.ssubset_of_subset_of_ssubset
-/
/- warning: finset.exists_of_ssubset -> Finset.exists_of_ssubset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.hasSsubset.{u1} α) s₁ s₂) -> (Exists.{succ u1} α (fun (x : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s₂) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s₂) => Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s₁))))
but is expected to have type
forall {α : Type.{u1}} {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α}, (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.instHasSSubsetFinset.{u1} α) s₁ s₂) -> (Exists.{succ u1} α (fun (x : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s₂) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s₁))))
Case conversion may be inaccurate. Consider using '#align finset.exists_of_ssubset Finset.exists_of_ssubsetₓ'. -/
theorem exists_of_ssubset {s₁ s₂ : Finset α} (h : s₁ ⊂ s₂) : ∃ x ∈ s₂, x ∉ s₁ :=
Set.exists_of_ssubset h
#align finset.exists_of_ssubset Finset.exists_of_ssubset
#print Finset.isWellFounded_ssubset /-
instance isWellFounded_ssubset : IsWellFounded (Finset α) (· ⊂ ·) :=
Subrelation.isWellFounded (InvImage _ _) fun _ _ => val_lt_iff.2
#align finset.is_well_founded_ssubset Finset.isWellFounded_ssubset
-/
#print Finset.wellFoundedLT /-
instance wellFoundedLT : WellFoundedLT (Finset α) :=
Finset.isWellFounded_ssubset
#align finset.is_well_founded_lt Finset.wellFoundedLT
-/
end Subset
-- TODO: these should be global attributes, but this will require fixing other files
attribute [local trans] subset.trans superset.trans
/-! ### Order embedding from `finset α` to `set α` -/
#print Finset.coeEmb /-
/-- Coercion to `set α` as an `order_embedding`. -/
def coeEmb : Finset α ↪o Set α :=
⟨⟨coe, coe_injective⟩, fun s t => coe_subset⟩
#align finset.coe_emb Finset.coeEmb
-/
/- warning: finset.coe_coe_emb -> Finset.coe_coeEmb is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}}, Eq.{succ u1} ((Finset.{u1} α) -> (Set.{u1} α)) (coeFn.{succ u1, succ u1} (OrderEmbedding.{u1, u1} (Finset.{u1} α) (Set.{u1} α) (Preorder.toLE.{u1} (Finset.{u1} α) (PartialOrder.toPreorder.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α))) (Set.hasLe.{u1} α)) (fun (_x : RelEmbedding.{u1, u1} (Finset.{u1} α) (Set.{u1} α) (LE.le.{u1} (Finset.{u1} α) (Preorder.toLE.{u1} (Finset.{u1} α) (PartialOrder.toPreorder.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α)))) (LE.le.{u1} (Set.{u1} α) (Set.hasLe.{u1} α))) => (Finset.{u1} α) -> (Set.{u1} α)) (RelEmbedding.hasCoeToFun.{u1, u1} (Finset.{u1} α) (Set.{u1} α) (LE.le.{u1} (Finset.{u1} α) (Preorder.toLE.{u1} (Finset.{u1} α) (PartialOrder.toPreorder.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α)))) (LE.le.{u1} (Set.{u1} α) (Set.hasLe.{u1} α))) (Finset.coeEmb.{u1} α)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))))
but is expected to have type
forall {α : Type.{u1}}, Eq.{succ u1} (forall (ᾰ : Finset.{u1} α), (fun (x._@.Mathlib.Data.FunLike.Embedding._hyg.19 : Finset.{u1} α) => Set.{u1} α) ᾰ) (FunLike.coe.{succ u1, succ u1, succ u1} (Function.Embedding.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α)) (Finset.{u1} α) (fun (_x : Finset.{u1} α) => (fun (x._@.Mathlib.Data.FunLike.Embedding._hyg.19 : Finset.{u1} α) => Set.{u1} α) _x) (EmbeddingLike.toFunLike.{succ u1, succ u1, succ u1} (Function.Embedding.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α)) (Finset.{u1} α) (Set.{u1} α) (Function.instEmbeddingLikeEmbedding.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α))) (RelEmbedding.toEmbedding.{u1, u1} (Finset.{u1} α) (Set.{u1} α) (fun (x._@.Mathlib.Order.Hom.Basic._hyg.680 : Finset.{u1} α) (x._@.Mathlib.Order.Hom.Basic._hyg.682 : Finset.{u1} α) => LE.le.{u1} (Finset.{u1} α) (Preorder.toLE.{u1} (Finset.{u1} α) (PartialOrder.toPreorder.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α))) x._@.Mathlib.Order.Hom.Basic._hyg.680 x._@.Mathlib.Order.Hom.Basic._hyg.682) (fun (x._@.Mathlib.Order.Hom.Basic._hyg.695 : Set.{u1} α) (x._@.Mathlib.Order.Hom.Basic._hyg.697 : Set.{u1} α) => LE.le.{u1} (Set.{u1} α) (Set.instLESet.{u1} α) x._@.Mathlib.Order.Hom.Basic._hyg.695 x._@.Mathlib.Order.Hom.Basic._hyg.697) (Finset.coeEmb.{u1} α))) (Finset.toSet.{u1} α)
Case conversion may be inaccurate. Consider using '#align finset.coe_coe_emb Finset.coe_coeEmbₓ'. -/
@[simp]
theorem coe_coeEmb : ⇑(coeEmb : Finset α ↪o Set α) = coe :=
rfl
#align finset.coe_coe_emb Finset.coe_coeEmb
/-! ### Nonempty -/
#print Finset.Nonempty /-
/-- The property `s.nonempty` expresses the fact that the finset `s` is not empty. It should be used
in theorem assumptions instead of `∃ x, x ∈ s` or `s ≠ ∅` as it gives access to a nice API thanks
to the dot notation. -/
protected def Nonempty (s : Finset α) : Prop :=
∃ x : α, x ∈ s
#align finset.nonempty Finset.Nonempty
-/
#print Finset.decidableNonempty /-
instance decidableNonempty {s : Finset α} : Decidable s.Nonempty :=
decidable_of_iff (∃ a ∈ s, True) <| by simp_rw [exists_prop, and_true_iff, Finset.Nonempty]
#align finset.decidable_nonempty Finset.decidableNonempty
-/
#print Finset.coe_nonempty /-
@[simp, norm_cast]
theorem coe_nonempty {s : Finset α} : (s : Set α).Nonempty ↔ s.Nonempty :=
Iff.rfl
#align finset.coe_nonempty Finset.coe_nonempty
-/
#print Finset.nonempty_coe_sort /-
@[simp]
theorem nonempty_coe_sort {s : Finset α} : Nonempty ↥s ↔ s.Nonempty :=
nonempty_subtype
#align finset.nonempty_coe_sort Finset.nonempty_coe_sort
-/
alias coe_nonempty ↔ _ nonempty.to_set
#align finset.nonempty.to_set Finset.Nonempty.to_set
alias nonempty_coe_sort ↔ _ nonempty.coe_sort
#align finset.nonempty.coe_sort Finset.Nonempty.coe_sort
#print Finset.Nonempty.bex /-
theorem Nonempty.bex {s : Finset α} (h : s.Nonempty) : ∃ x : α, x ∈ s :=
h
#align finset.nonempty.bex Finset.Nonempty.bex
-/
#print Finset.Nonempty.mono /-
theorem Nonempty.mono {s t : Finset α} (hst : s ⊆ t) (hs : s.Nonempty) : t.Nonempty :=
Set.Nonempty.mono hst hs
#align finset.nonempty.mono Finset.Nonempty.mono
-/
#print Finset.Nonempty.forall_const /-
theorem Nonempty.forall_const {s : Finset α} (h : s.Nonempty) {p : Prop} : (∀ x ∈ s, p) ↔ p :=
let ⟨x, hx⟩ := h
⟨fun h => h x hx, fun h x hx => h⟩
#align finset.nonempty.forall_const Finset.Nonempty.forall_const
-/
#print Finset.Nonempty.to_subtype /-
theorem Nonempty.to_subtype {s : Finset α} : s.Nonempty → Nonempty s :=
nonempty_coe_sort.2
#align finset.nonempty.to_subtype Finset.Nonempty.to_subtype
-/
#print Finset.Nonempty.to_type /-
theorem Nonempty.to_type {s : Finset α} : s.Nonempty → Nonempty α := fun ⟨x, hx⟩ => ⟨x⟩
#align finset.nonempty.to_type Finset.Nonempty.to_type
-/
/-! ### empty -/
section Empty
variable {s : Finset α}
#print Finset.empty /-
/-- The empty finset -/
protected def empty : Finset α :=
⟨0, nodup_zero⟩
#align finset.empty Finset.empty
-/
instance : EmptyCollection (Finset α) :=
⟨Finset.empty⟩
#print Finset.inhabitedFinset /-
instance inhabitedFinset : Inhabited (Finset α) :=
⟨∅⟩
#align finset.inhabited_finset Finset.inhabitedFinset
-/
#print Finset.empty_val /-
@[simp]
theorem empty_val : (∅ : Finset α).1 = 0 :=
rfl
#align finset.empty_val Finset.empty_val
-/
#print Finset.not_mem_empty /-
@[simp]
theorem not_mem_empty (a : α) : a ∉ (∅ : Finset α) :=
id
#align finset.not_mem_empty Finset.not_mem_empty
-/
#print Finset.not_nonempty_empty /-
@[simp]
theorem not_nonempty_empty : ¬(∅ : Finset α).Nonempty := fun ⟨x, hx⟩ => not_mem_empty x hx
#align finset.not_nonempty_empty Finset.not_nonempty_empty
-/
#print Finset.mk_zero /-
@[simp]
theorem mk_zero : (⟨0, nodup_zero⟩ : Finset α) = ∅ :=
rfl
#align finset.mk_zero Finset.mk_zero
-/
#print Finset.ne_empty_of_mem /-
theorem ne_empty_of_mem {a : α} {s : Finset α} (h : a ∈ s) : s ≠ ∅ := fun e =>
not_mem_empty a <| e ▸ h
#align finset.ne_empty_of_mem Finset.ne_empty_of_mem
-/
#print Finset.Nonempty.ne_empty /-
theorem Nonempty.ne_empty {s : Finset α} (h : s.Nonempty) : s ≠ ∅ :=
Exists.elim h fun a => ne_empty_of_mem
#align finset.nonempty.ne_empty Finset.Nonempty.ne_empty
-/
#print Finset.empty_subset /-
@[simp]
theorem empty_subset (s : Finset α) : ∅ ⊆ s :=
zero_subset _
#align finset.empty_subset Finset.empty_subset
-/
#print Finset.eq_empty_of_forall_not_mem /-
theorem eq_empty_of_forall_not_mem {s : Finset α} (H : ∀ x, x ∉ s) : s = ∅ :=
eq_of_veq (eq_zero_of_forall_not_mem H)
#align finset.eq_empty_of_forall_not_mem Finset.eq_empty_of_forall_not_mem
-/
#print Finset.eq_empty_iff_forall_not_mem /-
theorem eq_empty_iff_forall_not_mem {s : Finset α} : s = ∅ ↔ ∀ x, x ∉ s :=
⟨by rintro rfl x <;> exact id, fun h => eq_empty_of_forall_not_mem h⟩
#align finset.eq_empty_iff_forall_not_mem Finset.eq_empty_iff_forall_not_mem
-/
#print Finset.val_eq_zero /-
@[simp]
theorem val_eq_zero {s : Finset α} : s.1 = 0 ↔ s = ∅ :=
@val_inj _ s ∅
#align finset.val_eq_zero Finset.val_eq_zero
-/
#print Finset.subset_empty /-
theorem subset_empty {s : Finset α} : s ⊆ ∅ ↔ s = ∅ :=
subset_zero.trans val_eq_zero
#align finset.subset_empty Finset.subset_empty
-/
#print Finset.not_ssubset_empty /-
@[simp]
theorem not_ssubset_empty (s : Finset α) : ¬s ⊂ ∅ := fun h =>
let ⟨x, he, hs⟩ := exists_of_ssubset h
he
#align finset.not_ssubset_empty Finset.not_ssubset_empty
-/
#print Finset.nonempty_of_ne_empty /-
theorem nonempty_of_ne_empty {s : Finset α} (h : s ≠ ∅) : s.Nonempty :=
exists_mem_of_ne_zero (mt val_eq_zero.1 h)
#align finset.nonempty_of_ne_empty Finset.nonempty_of_ne_empty
-/
#print Finset.nonempty_iff_ne_empty /-
theorem nonempty_iff_ne_empty {s : Finset α} : s.Nonempty ↔ s ≠ ∅ :=
⟨Nonempty.ne_empty, nonempty_of_ne_empty⟩
#align finset.nonempty_iff_ne_empty Finset.nonempty_iff_ne_empty
-/
#print Finset.not_nonempty_iff_eq_empty /-
@[simp]
theorem not_nonempty_iff_eq_empty {s : Finset α} : ¬s.Nonempty ↔ s = ∅ :=
nonempty_iff_ne_empty.Not.trans Classical.not_not
#align finset.not_nonempty_iff_eq_empty Finset.not_nonempty_iff_eq_empty
-/
#print Finset.eq_empty_or_nonempty /-
theorem eq_empty_or_nonempty (s : Finset α) : s = ∅ ∨ s.Nonempty :=
by_cases Or.inl fun h => Or.inr (nonempty_of_ne_empty h)
#align finset.eq_empty_or_nonempty Finset.eq_empty_or_nonempty
-/
#print Finset.coe_empty /-
@[simp, norm_cast]
theorem coe_empty : ((∅ : Finset α) : Set α) = ∅ :=
rfl
#align finset.coe_empty Finset.coe_empty
-/
#print Finset.coe_eq_empty /-
@[simp, norm_cast]
theorem coe_eq_empty {s : Finset α} : (s : Set α) = ∅ ↔ s = ∅ := by rw [← coe_empty, coe_inj]
#align finset.coe_eq_empty Finset.coe_eq_empty
-/
#print Finset.isEmpty_coe_sort /-
@[simp]
theorem isEmpty_coe_sort {s : Finset α} : IsEmpty ↥s ↔ s = ∅ := by
simpa using @Set.isEmpty_coe_sort α s
#align finset.is_empty_coe_sort Finset.isEmpty_coe_sort
-/
instance : IsEmpty (∅ : Finset α) :=
isEmpty_coe_sort.2 rfl
#print Finset.eq_empty_of_isEmpty /-
/-- A `finset` for an empty type is empty. -/
theorem eq_empty_of_isEmpty [IsEmpty α] (s : Finset α) : s = ∅ :=
Finset.eq_empty_of_forall_not_mem isEmptyElim
#align finset.eq_empty_of_is_empty Finset.eq_empty_of_isEmpty
-/
instance : OrderBot (Finset α) where
bot := ∅
bot_le := empty_subset
/- warning: finset.bot_eq_empty -> Finset.bot_eq_empty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}}, Eq.{succ u1} (Finset.{u1} α) (Bot.bot.{u1} (Finset.{u1} α) (OrderBot.toHasBot.{u1} (Finset.{u1} α) (Preorder.toLE.{u1} (Finset.{u1} α) (PartialOrder.toPreorder.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α))) (Finset.orderBot.{u1} α))) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α))
but is expected to have type
forall {α : Type.{u1}}, Eq.{succ u1} (Finset.{u1} α) (Bot.bot.{u1} (Finset.{u1} α) (OrderBot.toBot.{u1} (Finset.{u1} α) (Preorder.toLE.{u1} (Finset.{u1} α) (PartialOrder.toPreorder.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α))) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α))) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α))
Case conversion may be inaccurate. Consider using '#align finset.bot_eq_empty Finset.bot_eq_emptyₓ'. -/
@[simp]
theorem bot_eq_empty : (⊥ : Finset α) = ∅ :=
rfl
#align finset.bot_eq_empty Finset.bot_eq_empty
#print Finset.empty_ssubset /-
@[simp]
theorem empty_ssubset : ∅ ⊂ s ↔ s.Nonempty :=
(@bot_lt_iff_ne_bot (Finset α) _ _ _).trans nonempty_iff_ne_empty.symm
#align finset.empty_ssubset Finset.empty_ssubset
-/
alias empty_ssubset ↔ _ nonempty.empty_ssubset
#align finset.nonempty.empty_ssubset Finset.Nonempty.empty_ssubset
end Empty
/-! ### singleton -/
section Singleton
variable {s : Finset α} {a b : α}
/-- `{a} : finset a` is the set `{a}` containing `a` and nothing else.
This differs from `insert a ∅` in that it does not require a `decidable_eq` instance for `α`.
-/
instance : Singleton α (Finset α) :=
⟨fun a => ⟨{a}, nodup_singleton a⟩⟩
#print Finset.singleton_val /-
@[simp]
theorem singleton_val (a : α) : ({a} : Finset α).1 = {a} :=
rfl
#align finset.singleton_val Finset.singleton_val
-/
#print Finset.mem_singleton /-
@[simp]
theorem mem_singleton {a b : α} : b ∈ ({a} : Finset α) ↔ b = a :=
mem_singleton
#align finset.mem_singleton Finset.mem_singleton
-/
#print Finset.eq_of_mem_singleton /-
theorem eq_of_mem_singleton {x y : α} (h : x ∈ ({y} : Finset α)) : x = y :=
mem_singleton.1 h
#align finset.eq_of_mem_singleton Finset.eq_of_mem_singleton
-/
#print Finset.not_mem_singleton /-
theorem not_mem_singleton {a b : α} : a ∉ ({b} : Finset α) ↔ a ≠ b :=
not_congr mem_singleton
#align finset.not_mem_singleton Finset.not_mem_singleton
-/
#print Finset.mem_singleton_self /-
theorem mem_singleton_self (a : α) : a ∈ ({a} : Finset α) :=
Or.inl rfl
#align finset.mem_singleton_self Finset.mem_singleton_self
-/
#print Finset.val_eq_singleton_iff /-
@[simp]
theorem val_eq_singleton_iff {a : α} {s : Finset α} : s.val = {a} ↔ s = {a} :=
by
rw [← val_inj]
rfl
#align finset.val_eq_singleton_iff Finset.val_eq_singleton_iff
-/
#print Finset.singleton_injective /-
theorem singleton_injective : Injective (singleton : α → Finset α) := fun a b h =>
mem_singleton.1 (h ▸ mem_singleton_self _)
#align finset.singleton_injective Finset.singleton_injective
-/
#print Finset.singleton_inj /-
@[simp]
theorem singleton_inj : ({a} : Finset α) = {b} ↔ a = b :=
singleton_injective.eq_iff
#align finset.singleton_inj Finset.singleton_inj
-/
#print Finset.singleton_nonempty /-
@[simp]
theorem singleton_nonempty (a : α) : ({a} : Finset α).Nonempty :=
⟨a, mem_singleton_self a⟩
#align finset.singleton_nonempty Finset.singleton_nonempty
-/
#print Finset.singleton_ne_empty /-
@[simp]
theorem singleton_ne_empty (a : α) : ({a} : Finset α) ≠ ∅ :=
(singleton_nonempty a).ne_empty
#align finset.singleton_ne_empty Finset.singleton_ne_empty
-/
#print Finset.empty_ssubset_singleton /-
theorem empty_ssubset_singleton : (∅ : Finset α) ⊂ {a} :=
(singleton_nonempty _).empty_ssubset
#align finset.empty_ssubset_singleton Finset.empty_ssubset_singleton
-/
#print Finset.coe_singleton /-
@[simp, norm_cast]
theorem coe_singleton (a : α) : (({a} : Finset α) : Set α) = {a} :=
by
ext
simp
#align finset.coe_singleton Finset.coe_singleton
-/
#print Finset.coe_eq_singleton /-
@[simp, norm_cast]
theorem coe_eq_singleton {s : Finset α} {a : α} : (s : Set α) = {a} ↔ s = {a} := by
rw [← coe_singleton, coe_inj]
#align finset.coe_eq_singleton Finset.coe_eq_singleton
-/
#print Finset.eq_singleton_iff_unique_mem /-
theorem eq_singleton_iff_unique_mem {s : Finset α} {a : α} : s = {a} ↔ a ∈ s ∧ ∀ x ∈ s, x = a :=
by
constructor <;> intro t
rw [t]
refine' ⟨Finset.mem_singleton_self _, fun _ => Finset.mem_singleton.1⟩
ext; rw [Finset.mem_singleton]
refine' ⟨t.right _, fun r => r.symm ▸ t.left⟩
#align finset.eq_singleton_iff_unique_mem Finset.eq_singleton_iff_unique_mem
-/
#print Finset.eq_singleton_iff_nonempty_unique_mem /-
theorem eq_singleton_iff_nonempty_unique_mem {s : Finset α} {a : α} :
s = {a} ↔ s.Nonempty ∧ ∀ x ∈ s, x = a :=
by
constructor
· rintro rfl
simp
· rintro ⟨hne, h_uniq⟩
rw [eq_singleton_iff_unique_mem]
refine' ⟨_, h_uniq⟩
rw [← h_uniq hne.some hne.some_spec]
exact hne.some_spec
#align finset.eq_singleton_iff_nonempty_unique_mem Finset.eq_singleton_iff_nonempty_unique_mem
-/
#print Finset.nonempty_iff_eq_singleton_default /-
theorem nonempty_iff_eq_singleton_default [Unique α] {s : Finset α} : s.Nonempty ↔ s = {default} :=
by simp [eq_singleton_iff_nonempty_unique_mem]
#align finset.nonempty_iff_eq_singleton_default Finset.nonempty_iff_eq_singleton_default
-/
alias nonempty_iff_eq_singleton_default ↔ nonempty.eq_singleton_default _
#align finset.nonempty.eq_singleton_default Finset.Nonempty.eq_singleton_default
#print Finset.singleton_iff_unique_mem /-
theorem singleton_iff_unique_mem (s : Finset α) : (∃ a, s = {a}) ↔ ∃! a, a ∈ s := by
simp only [eq_singleton_iff_unique_mem, ExistsUnique]
#align finset.singleton_iff_unique_mem Finset.singleton_iff_unique_mem
-/
#print Finset.singleton_subset_set_iff /-
theorem singleton_subset_set_iff {s : Set α} {a : α} : ↑({a} : Finset α) ⊆ s ↔ a ∈ s := by
rw [coe_singleton, Set.singleton_subset_iff]
#align finset.singleton_subset_set_iff Finset.singleton_subset_set_iff
-/
#print Finset.singleton_subset_iff /-
@[simp]
theorem singleton_subset_iff {s : Finset α} {a : α} : {a} ⊆ s ↔ a ∈ s :=
singleton_subset_set_iff
#align finset.singleton_subset_iff Finset.singleton_subset_iff
-/
#print Finset.subset_singleton_iff /-
@[simp]
theorem subset_singleton_iff {s : Finset α} {a : α} : s ⊆ {a} ↔ s = ∅ ∨ s = {a} := by
rw [← coe_subset, coe_singleton, Set.subset_singleton_iff_eq, coe_eq_empty, coe_eq_singleton]
#align finset.subset_singleton_iff Finset.subset_singleton_iff
-/
#print Finset.singleton_subset_singleton /-
theorem singleton_subset_singleton : ({a} : Finset α) ⊆ {b} ↔ a = b := by simp
#align finset.singleton_subset_singleton Finset.singleton_subset_singleton
-/
#print Finset.Nonempty.subset_singleton_iff /-
protected theorem Nonempty.subset_singleton_iff {s : Finset α} {a : α} (h : s.Nonempty) :
s ⊆ {a} ↔ s = {a} :=
subset_singleton_iff.trans <| or_iff_right h.ne_empty
#align finset.nonempty.subset_singleton_iff Finset.Nonempty.subset_singleton_iff
-/
#print Finset.subset_singleton_iff' /-
theorem subset_singleton_iff' {s : Finset α} {a : α} : s ⊆ {a} ↔ ∀ b ∈ s, b = a :=
forall₂_congr fun _ _ => mem_singleton
#align finset.subset_singleton_iff' Finset.subset_singleton_iff'
-/
#print Finset.ssubset_singleton_iff /-
@[simp]
theorem ssubset_singleton_iff {s : Finset α} {a : α} : s ⊂ {a} ↔ s = ∅ := by
rw [← coe_ssubset, coe_singleton, Set.ssubset_singleton_iff, coe_eq_empty]
#align finset.ssubset_singleton_iff Finset.ssubset_singleton_iff
-/
#print Finset.eq_empty_of_ssubset_singleton /-
theorem eq_empty_of_ssubset_singleton {s : Finset α} {x : α} (hs : s ⊂ {x}) : s = ∅ :=
ssubset_singleton_iff.1 hs
#align finset.eq_empty_of_ssubset_singleton Finset.eq_empty_of_ssubset_singleton
-/
#print Finset.eq_singleton_or_nontrivial /-
theorem eq_singleton_or_nontrivial (ha : a ∈ s) : s = {a} ∨ (s : Set α).Nontrivial :=
by
rw [← coe_eq_singleton]
exact Set.eq_singleton_or_nontrivial ha
#align finset.eq_singleton_or_nontrivial Finset.eq_singleton_or_nontrivial
-/
#print Finset.Nonempty.exists_eq_singleton_or_nontrivial /-
theorem Nonempty.exists_eq_singleton_or_nontrivial :
s.Nonempty → (∃ a, s = {a}) ∨ (s : Set α).Nontrivial := fun ⟨a, ha⟩ =>
(eq_singleton_or_nontrivial ha).imp_left <| Exists.intro a
#align finset.nonempty.exists_eq_singleton_or_nontrivial Finset.Nonempty.exists_eq_singleton_or_nontrivial
-/
instance [Nonempty α] : Nontrivial (Finset α) :=
‹Nonempty α›.elim fun a => ⟨⟨{a}, ∅, singleton_ne_empty _⟩⟩
instance [IsEmpty α] : Unique (Finset α)
where
default := ∅
uniq s := eq_empty_of_forall_not_mem isEmptyElim
end Singleton
/-! ### cons -/
section Cons
variable {s t : Finset α} {a b : α}
#print Finset.cons /-
/-- `cons a s h` is the set `{a} ∪ s` containing `a` and the elements of `s`. It is the same as
`insert a s` when it is defined, but unlike `insert a s` it does not require `decidable_eq α`,
and the union is guaranteed to be disjoint. -/
def cons (a : α) (s : Finset α) (h : a ∉ s) : Finset α :=
⟨a ::ₘ s.1, nodup_cons.2 ⟨h, s.2⟩⟩
#align finset.cons Finset.cons
-/
#print Finset.mem_cons /-
@[simp]
theorem mem_cons {h} : b ∈ s.cons a h ↔ b = a ∨ b ∈ s :=
mem_cons
#align finset.mem_cons Finset.mem_cons
-/
#print Finset.mem_cons_self /-
@[simp]
theorem mem_cons_self (a : α) (s : Finset α) {h} : a ∈ cons a s h :=
mem_cons_self _ _
#align finset.mem_cons_self Finset.mem_cons_self
-/
#print Finset.cons_val /-
@[simp]
theorem cons_val (h : a ∉ s) : (cons a s h).1 = a ::ₘ s.1 :=
rfl
#align finset.cons_val Finset.cons_val
-/
#print Finset.forall_mem_cons /-
theorem forall_mem_cons (h : a ∉ s) (p : α → Prop) :
(∀ x, x ∈ cons a s h → p x) ↔ p a ∧ ∀ x, x ∈ s → p x := by
simp only [mem_cons, or_imp, forall_and, forall_eq]
#align finset.forall_mem_cons Finset.forall_mem_cons
-/
#print Finset.mk_cons /-
@[simp]
theorem mk_cons {s : Multiset α} (h : (a ::ₘ s).Nodup) :
(⟨a ::ₘ s, h⟩ : Finset α) = cons a ⟨s, (nodup_cons.1 h).2⟩ (nodup_cons.1 h).1 :=
rfl
#align finset.mk_cons Finset.mk_cons
-/
#print Finset.nonempty_cons /-
@[simp]
theorem nonempty_cons (h : a ∉ s) : (cons a s h).Nonempty :=
⟨a, mem_cons.2 <| Or.inl rfl⟩
#align finset.nonempty_cons Finset.nonempty_cons
-/
#print Finset.nonempty_mk /-
@[simp]
theorem nonempty_mk {m : Multiset α} {hm} : (⟨m, hm⟩ : Finset α).Nonempty ↔ m ≠ 0 := by
induction m using Multiset.induction_on <;> simp
#align finset.nonempty_mk Finset.nonempty_mk
-/
#print Finset.coe_cons /-
@[simp]
theorem coe_cons {a s h} : (@cons α a s h : Set α) = insert a s :=
by
ext
simp
#align finset.coe_cons Finset.coe_cons
-/
#print Finset.subset_cons /-
theorem subset_cons (h : a ∉ s) : s ⊆ s.cons a h :=
subset_cons _ _
#align finset.subset_cons Finset.subset_cons
-/
#print Finset.ssubset_cons /-
theorem ssubset_cons (h : a ∉ s) : s ⊂ s.cons a h :=
ssubset_cons h
#align finset.ssubset_cons Finset.ssubset_cons
-/
#print Finset.cons_subset /-
theorem cons_subset {h : a ∉ s} : s.cons a h ⊆ t ↔ a ∈ t ∧ s ⊆ t :=
cons_subset
#align finset.cons_subset Finset.cons_subset
-/
#print Finset.cons_subset_cons /-
@[simp]
theorem cons_subset_cons {hs ht} : s.cons a hs ⊆ t.cons a ht ↔ s ⊆ t := by
rwa [← coe_subset, coe_cons, coe_cons, Set.insert_subset_insert_iff, coe_subset]
#align finset.cons_subset_cons Finset.cons_subset_cons
-/
#print Finset.ssubset_iff_exists_cons_subset /-
theorem ssubset_iff_exists_cons_subset : s ⊂ t ↔ ∃ (a : _)(h : a ∉ s), s.cons a h ⊆ t :=
by
refine' ⟨fun h => _, fun ⟨a, ha, h⟩ => ssubset_of_ssubset_of_subset (ssubset_cons _) h⟩
obtain ⟨a, hs, ht⟩ := not_subset.1 h.2
exact ⟨a, ht, cons_subset.2 ⟨hs, h.subset⟩⟩
#align finset.ssubset_iff_exists_cons_subset Finset.ssubset_iff_exists_cons_subset
-/
end Cons
/-! ### disjoint -/
section Disjoint
variable {f : α → β} {s t u : Finset α} {a b : α}
/- warning: finset.disjoint_left -> Finset.disjoint_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) (forall {{a : α}}, (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) -> (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t)))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) (forall {{a : α}}, (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) -> (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_left Finset.disjoint_leftₓ'. -/
theorem disjoint_left : Disjoint s t ↔ ∀ ⦃a⦄, a ∈ s → a ∉ t :=
⟨fun h a hs ht =>
singleton_subset_iff.mp (h (singleton_subset_iff.mpr hs) (singleton_subset_iff.mpr ht)),
fun h x hs ht a ha => h (hs ha) (ht ha)⟩
#align finset.disjoint_left Finset.disjoint_left
/- warning: finset.disjoint_right -> Finset.disjoint_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) (forall {{a : α}}, (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t) -> (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s)))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) (forall {{a : α}}, (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t) -> (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_right Finset.disjoint_rightₓ'. -/
theorem disjoint_right : Disjoint s t ↔ ∀ ⦃a⦄, a ∈ t → a ∉ s := by rw [disjoint_comm, disjoint_left]
#align finset.disjoint_right Finset.disjoint_right
/- warning: finset.disjoint_iff_ne -> Finset.disjoint_iff_ne is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) (forall (a : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) -> (forall (b : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) b t) -> (Ne.{succ u1} α a b)))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) (forall (a : α), (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) -> (forall (b : α), (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) b t) -> (Ne.{succ u1} α a b)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_iff_ne Finset.disjoint_iff_neₓ'. -/
theorem disjoint_iff_ne : Disjoint s t ↔ ∀ a ∈ s, ∀ b ∈ t, a ≠ b := by
simp only [disjoint_left, imp_not_comm, forall_eq']
#align finset.disjoint_iff_ne Finset.disjoint_iff_ne
/- warning: finset.disjoint_val -> Finset.disjoint_val is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Multiset.Disjoint.{u1} α (Finset.val.{u1} α s) (Finset.val.{u1} α t)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Multiset.Disjoint.{u1} α (Finset.val.{u1} α s) (Finset.val.{u1} α t)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_val Finset.disjoint_valₓ'. -/
@[simp]
theorem disjoint_val : s.1.Disjoint t.1 ↔ Disjoint s t :=
disjoint_left.symm
#align finset.disjoint_val Finset.disjoint_val
/- warning: disjoint.forall_ne_finset -> Disjoint.forall_ne_finset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α} {b : α}, (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) -> (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) -> (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) b t) -> (Ne.{succ u1} α a b)
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α} {b : α}, (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) -> (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) -> (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) b t) -> (Ne.{succ u1} α a b)
Case conversion may be inaccurate. Consider using '#align disjoint.forall_ne_finset Disjoint.forall_ne_finsetₓ'. -/
theorem Disjoint.forall_ne_finset (h : Disjoint s t) (ha : a ∈ s) (hb : b ∈ t) : a ≠ b :=
disjoint_iff_ne.1 h _ ha _ hb
#align disjoint.forall_ne_finset Disjoint.forall_ne_finset
/- warning: finset.not_disjoint_iff -> Finset.not_disjoint_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Not (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)) (Exists.{succ u1} α (fun (a : α) => And (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t)))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Not (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)) (Exists.{succ u1} α (fun (a : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t)))
Case conversion may be inaccurate. Consider using '#align finset.not_disjoint_iff Finset.not_disjoint_iffₓ'. -/
theorem not_disjoint_iff : ¬Disjoint s t ↔ ∃ a, a ∈ s ∧ a ∈ t :=
disjoint_left.Not.trans <|
not_forall.trans <| exists_congr fun _ => by rw [not_imp, Classical.not_not]
#align finset.not_disjoint_iff Finset.not_disjoint_iff
/- warning: finset.disjoint_of_subset_left -> Finset.disjoint_of_subset_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s u) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) u t) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s u) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) u t) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_of_subset_left Finset.disjoint_of_subset_leftₓ'. -/
theorem disjoint_of_subset_left (h : s ⊆ u) (d : Disjoint u t) : Disjoint s t :=
disjoint_left.2 fun x m₁ => (disjoint_left.1 d) (h m₁)
#align finset.disjoint_of_subset_left Finset.disjoint_of_subset_left
/- warning: finset.disjoint_of_subset_right -> Finset.disjoint_of_subset_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) t u) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s u) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) t u) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s u) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_of_subset_right Finset.disjoint_of_subset_rightₓ'. -/
theorem disjoint_of_subset_right (h : t ⊆ u) (d : Disjoint s u) : Disjoint s t :=
disjoint_right.2 fun x m₁ => (disjoint_right.1 d) (h m₁)
#align finset.disjoint_of_subset_right Finset.disjoint_of_subset_right
/- warning: finset.disjoint_empty_left -> Finset.disjoint_empty_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α), Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)) s
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α), Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α)) s
Case conversion may be inaccurate. Consider using '#align finset.disjoint_empty_left Finset.disjoint_empty_leftₓ'. -/
@[simp]
theorem disjoint_empty_left (s : Finset α) : Disjoint ∅ s :=
disjoint_bot_left
#align finset.disjoint_empty_left Finset.disjoint_empty_left
/- warning: finset.disjoint_empty_right -> Finset.disjoint_empty_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α), Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α))
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α), Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_empty_right Finset.disjoint_empty_rightₓ'. -/
@[simp]
theorem disjoint_empty_right (s : Finset α) : Disjoint s ∅ :=
disjoint_bot_right
#align finset.disjoint_empty_right Finset.disjoint_empty_right
/- warning: finset.disjoint_singleton_left -> Finset.disjoint_singleton_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) s) (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) s) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_singleton_left Finset.disjoint_singleton_leftₓ'. -/
@[simp]
theorem disjoint_singleton_left : Disjoint (singleton a) s ↔ a ∉ s := by
simp only [disjoint_left, mem_singleton, forall_eq]
#align finset.disjoint_singleton_left Finset.disjoint_singleton_left
/- warning: finset.disjoint_singleton_right -> Finset.disjoint_singleton_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a)) (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a)) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_singleton_right Finset.disjoint_singleton_rightₓ'. -/
@[simp]
theorem disjoint_singleton_right : Disjoint s (singleton a) ↔ a ∉ s :=
disjoint_comm.trans disjoint_singleton_left
#align finset.disjoint_singleton_right Finset.disjoint_singleton_right
/- warning: finset.disjoint_singleton -> Finset.disjoint_singleton is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {a : α} {b : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) b)) (Ne.{succ u1} α a b)
but is expected to have type
forall {α : Type.{u1}} {a : α} {b : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) b)) (Ne.{succ u1} α a b)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_singleton Finset.disjoint_singletonₓ'. -/
@[simp]
theorem disjoint_singleton : Disjoint ({a} : Finset α) {b} ↔ a ≠ b := by
rw [disjoint_singleton_left, mem_singleton]
#align finset.disjoint_singleton Finset.disjoint_singleton
/- warning: finset.disjoint_self_iff_empty -> Finset.disjoint_self_iff_empty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α), Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s s) (Eq.{succ u1} (Finset.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)))
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α), Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s s) (Eq.{succ u1} (Finset.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_self_iff_empty Finset.disjoint_self_iff_emptyₓ'. -/
theorem disjoint_self_iff_empty (s : Finset α) : Disjoint s s ↔ s = ∅ :=
disjoint_self
#align finset.disjoint_self_iff_empty Finset.disjoint_self_iff_empty
/- warning: finset.disjoint_coe -> Finset.disjoint_coe is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} α) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) t)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} α) (Preorder.toLE.{u1} (Set.{u1} α) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) (Finset.toSet.{u1} α s) (Finset.toSet.{u1} α t)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_coe Finset.disjoint_coeₓ'. -/
@[simp, norm_cast]
theorem disjoint_coe : Disjoint (s : Set α) t ↔ Disjoint s t :=
by
rw [Finset.disjoint_left, Set.disjoint_left]
rfl
#align finset.disjoint_coe Finset.disjoint_coe
/- warning: finset.pairwise_disjoint_coe -> Finset.pairwiseDisjoint_coe is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {ι : Type.{u2}} {s : Set.{u2} ι} {f : ι -> (Finset.{u1} α)}, Iff (Set.PairwiseDisjoint.{u1, u2} (Set.{u1} α) ι (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α)))))) (GeneralizedBooleanAlgebra.toOrderBot.{u1} (Set.{u1} α) (BooleanAlgebra.toGeneralizedBooleanAlgebra.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α))) s (fun (i : ι) => (fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (f i))) (Set.PairwiseDisjoint.{u1, u2} (Finset.{u1} α) ι (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s f)
but is expected to have type
forall {α : Type.{u1}} {ι : Type.{u2}} {s : Set.{u2} ι} {f : ι -> (Finset.{u1} α)}, Iff (Set.PairwiseDisjoint.{u1, u2} (Set.{u1} α) ι (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) (BoundedOrder.toOrderBot.{u1} (Set.{u1} α) (Preorder.toLE.{u1} (Set.{u1} α) (PartialOrder.toPreorder.{u1} (Set.{u1} α) (CompleteSemilatticeInf.toPartialOrder.{u1} (Set.{u1} α) (CompleteLattice.toCompleteSemilatticeInf.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))))) (CompleteLattice.toBoundedOrder.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α)))))) s (fun (i : ι) => Finset.toSet.{u1} α (f i))) (Set.PairwiseDisjoint.{u1, u2} (Finset.{u1} α) ι (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s f)
Case conversion may be inaccurate. Consider using '#align finset.pairwise_disjoint_coe Finset.pairwiseDisjoint_coeₓ'. -/
@[simp, norm_cast]
theorem pairwiseDisjoint_coe {ι : Type _} {s : Set ι} {f : ι → Finset α} :
s.PairwiseDisjoint (fun i => f i : ι → Set α) ↔ s.PairwiseDisjoint f :=
forall₅_congr fun _ _ _ _ _ => disjoint_coe
#align finset.pairwise_disjoint_coe Finset.pairwiseDisjoint_coe
end Disjoint
/-! ### disjoint union -/
/- warning: finset.disj_union -> Finset.disjUnion is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α), (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) -> (Finset.{u1} α)
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α), (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) -> (Finset.{u1} α)
Case conversion may be inaccurate. Consider using '#align finset.disj_union Finset.disjUnionₓ'. -/
/-- `disj_union s t h` is the set such that `a ∈ disj_union s t h` iff `a ∈ s` or `a ∈ t`.
It is the same as `s ∪ t`, but it does not require decidable equality on the type. The hypothesis
ensures that the sets are disjoint. -/
def disjUnion (s t : Finset α) (h : Disjoint s t) : Finset α :=
⟨s.1 + t.1, Multiset.nodup_add.2 ⟨s.2, t.2, disjoint_val.2 h⟩⟩
#align finset.disj_union Finset.disjUnion
/- warning: finset.mem_disj_union -> Finset.mem_disjUnion is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t} {a : α}, Iff (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a (Finset.disjUnion.{u1} α s t h)) (Or (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t} {a : α}, Iff (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a (Finset.disjUnion.{u1} α s t h)) (Or (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t))
Case conversion may be inaccurate. Consider using '#align finset.mem_disj_union Finset.mem_disjUnionₓ'. -/
@[simp]
theorem mem_disjUnion {α s t h a} : a ∈ @disjUnion α s t h ↔ a ∈ s ∨ a ∈ t := by
rcases s with ⟨⟨s⟩⟩ <;> rcases t with ⟨⟨t⟩⟩ <;> apply List.mem_append
#align finset.mem_disj_union Finset.mem_disjUnion
/- warning: finset.disj_union_comm -> Finset.disjUnion_comm is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s t h) (Finset.disjUnion.{u1} α t s (Disjoint.symm.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t h))
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s t h) (Finset.disjUnion.{u1} α t s (Disjoint.symm.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t h))
Case conversion may be inaccurate. Consider using '#align finset.disj_union_comm Finset.disjUnion_commₓ'. -/
theorem disjUnion_comm (s t : Finset α) (h : Disjoint s t) :
disjUnion s t h = disjUnion t s h.symm :=
eq_of_veq <| add_comm _ _
#align finset.disj_union_comm Finset.disjUnion_comm
/- warning: finset.empty_disj_union -> Finset.empty_disjUnion is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (t : Finset.{u1} α) (h : optParam.{0} (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)) t) (disjoint_bot_left.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) t)), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)) t h) t
but is expected to have type
forall {α : Type.{u1}} (t : Finset.{u1} α) (h : optParam.{0} (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α)) t) (disjoint_bot_left.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) t)), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α)) t h) t
Case conversion may be inaccurate. Consider using '#align finset.empty_disj_union Finset.empty_disjUnionₓ'. -/
@[simp]
theorem empty_disjUnion (t : Finset α) (h : Disjoint ∅ t := disjoint_bot_left) :
disjUnion ∅ t h = t :=
eq_of_veq <| zero_add _
#align finset.empty_disj_union Finset.empty_disjUnion
/- warning: finset.disj_union_empty -> Finset.disjUnion_empty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α) (h : optParam.{0} (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α))) (disjoint_bot_right.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s)), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)) h) s
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α) (h : optParam.{0} (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α))) (disjoint_bot_right.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s)), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α)) h) s
Case conversion may be inaccurate. Consider using '#align finset.disj_union_empty Finset.disjUnion_emptyₓ'. -/
@[simp]
theorem disjUnion_empty (s : Finset α) (h : Disjoint s ∅ := disjoint_bot_right) :
disjUnion s ∅ h = s :=
eq_of_veq <| add_zero _
#align finset.disj_union_empty Finset.disjUnion_empty
/- warning: finset.singleton_disj_union -> Finset.singleton_disjUnion is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (a : α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) t), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) t h) (Finset.cons.{u1} α a t (Iff.mp (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) t) (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t)) (Finset.disjoint_singleton_left.{u1} α t a) h))
but is expected to have type
forall {α : Type.{u1}} (a : α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) t), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) t h) (Finset.cons.{u1} α a t (Iff.mp (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) t) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t)) (Finset.disjoint_singleton_left.{u1} α t a) h))
Case conversion may be inaccurate. Consider using '#align finset.singleton_disj_union Finset.singleton_disjUnionₓ'. -/
theorem singleton_disjUnion (a : α) (t : Finset α) (h : Disjoint {a} t) :
disjUnion {a} t h = cons a t (disjoint_singleton_left.mp h) :=
eq_of_veq <| Multiset.singleton_add _ _
#align finset.singleton_disj_union Finset.singleton_disjUnion
/- warning: finset.disj_union_singleton -> Finset.disjUnion_singleton is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α) (a : α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a)), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) h) (Finset.cons.{u1} α a s (Iff.mp (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a)) (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s)) (Finset.disjoint_singleton_right.{u1} α s a) h))
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α) (a : α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a)), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) h) (Finset.cons.{u1} α a s (Iff.mp (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a)) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s)) (Finset.disjoint_singleton_right.{u1} α s a) h))
Case conversion may be inaccurate. Consider using '#align finset.disj_union_singleton Finset.disjUnion_singletonₓ'. -/
theorem disjUnion_singleton (s : Finset α) (a : α) (h : Disjoint s {a}) :
disjUnion s {a} h = cons a s (disjoint_singleton_right.mp h) := by
rw [disj_union_comm, singleton_disj_union]
#align finset.disj_union_singleton Finset.disjUnion_singleton
/-! ### insert -/
section Insert
variable [DecidableEq α] {s t u v : Finset α} {a b : α}
/-- `insert a s` is the set `{a} ∪ s` containing `a` and the elements of `s`. -/
instance : Insert α (Finset α) :=
⟨fun a s => ⟨_, s.2.ndinsert a⟩⟩
#print Finset.insert_def /-
theorem insert_def (a : α) (s : Finset α) : insert a s = ⟨_, s.2.ndinsert a⟩ :=
rfl
#align finset.insert_def Finset.insert_def
-/
#print Finset.insert_val /-
@[simp]
theorem insert_val (a : α) (s : Finset α) : (insert a s).1 = ndinsert a s.1 :=
rfl
#align finset.insert_val Finset.insert_val
-/
#print Finset.insert_val' /-
theorem insert_val' (a : α) (s : Finset α) : (insert a s).1 = dedup (a ::ₘ s.1) := by
rw [dedup_cons, dedup_eq_self] <;> rfl
#align finset.insert_val' Finset.insert_val'
-/
#print Finset.insert_val_of_not_mem /-
theorem insert_val_of_not_mem {a : α} {s : Finset α} (h : a ∉ s) : (insert a s).1 = a ::ₘ s.1 := by
rw [insert_val, ndinsert_of_not_mem h]
#align finset.insert_val_of_not_mem Finset.insert_val_of_not_mem
-/
#print Finset.mem_insert /-
@[simp]
theorem mem_insert : a ∈ insert b s ↔ a = b ∨ a ∈ s :=
mem_ndinsert
#align finset.mem_insert Finset.mem_insert
-/
#print Finset.mem_insert_self /-
theorem mem_insert_self (a : α) (s : Finset α) : a ∈ insert a s :=
mem_ndinsert_self a s.1
#align finset.mem_insert_self Finset.mem_insert_self
-/
#print Finset.mem_insert_of_mem /-
theorem mem_insert_of_mem (h : a ∈ s) : a ∈ insert b s :=
mem_ndinsert_of_mem h
#align finset.mem_insert_of_mem Finset.mem_insert_of_mem
-/
#print Finset.mem_of_mem_insert_of_ne /-
theorem mem_of_mem_insert_of_ne (h : b ∈ insert a s) : b ≠ a → b ∈ s :=
(mem_insert.1 h).resolve_left
#align finset.mem_of_mem_insert_of_ne Finset.mem_of_mem_insert_of_ne
-/
#print Finset.eq_of_not_mem_of_mem_insert /-
theorem eq_of_not_mem_of_mem_insert (ha : b ∈ insert a s) (hb : b ∉ s) : b = a :=
(mem_insert.1 ha).resolve_right hb
#align finset.eq_of_not_mem_of_mem_insert Finset.eq_of_not_mem_of_mem_insert
-/
#print Finset.cons_eq_insert /-
@[simp]
theorem cons_eq_insert (a s h) : @cons α a s h = insert a s :=
ext fun a => by simp
#align finset.cons_eq_insert Finset.cons_eq_insert
-/
#print Finset.coe_insert /-
@[simp, norm_cast]
theorem coe_insert (a : α) (s : Finset α) : ↑(insert a s) = (insert a s : Set α) :=
Set.ext fun x => by simp only [mem_coe, mem_insert, Set.mem_insert_iff]
#align finset.coe_insert Finset.coe_insert
-/
#print Finset.mem_insert_coe /-
theorem mem_insert_coe {s : Finset α} {x y : α} : x ∈ insert y s ↔ x ∈ insert y (s : Set α) := by
simp
#align finset.mem_insert_coe Finset.mem_insert_coe
-/
instance : IsLawfulSingleton α (Finset α) :=
⟨fun a => by
ext
simp⟩
#print Finset.insert_eq_of_mem /-
@[simp]
theorem insert_eq_of_mem (h : a ∈ s) : insert a s = s :=
eq_of_veq <| ndinsert_of_mem h
#align finset.insert_eq_of_mem Finset.insert_eq_of_mem
-/
#print Finset.insert_eq_self /-
@[simp]
theorem insert_eq_self : insert a s = s ↔ a ∈ s :=
⟨fun h => h ▸ mem_insert_self _ _, insert_eq_of_mem⟩
#align finset.insert_eq_self Finset.insert_eq_self
-/
#print Finset.insert_ne_self /-
theorem insert_ne_self : insert a s ≠ s ↔ a ∉ s :=
insert_eq_self.Not
#align finset.insert_ne_self Finset.insert_ne_self
-/
#print Finset.pair_eq_singleton /-
@[simp]
theorem pair_eq_singleton (a : α) : ({a, a} : Finset α) = {a} :=
insert_eq_of_mem <| mem_singleton_self _
#align finset.pair_eq_singleton Finset.pair_eq_singleton
-/
#print Finset.Insert.comm /-
theorem Insert.comm (a b : α) (s : Finset α) : insert a (insert b s) = insert b (insert a s) :=
ext fun x => by simp only [mem_insert, or_left_comm]
#align finset.insert.comm Finset.Insert.comm
-/
#print Finset.coe_pair /-
@[simp, norm_cast]
theorem coe_pair {a b : α} : (({a, b} : Finset α) : Set α) = {a, b} :=
by
ext
simp
#align finset.coe_pair Finset.coe_pair
-/
#print Finset.coe_eq_pair /-
@[simp, norm_cast]
theorem coe_eq_pair {s : Finset α} {a b : α} : (s : Set α) = {a, b} ↔ s = {a, b} := by
rw [← coe_pair, coe_inj]
#align finset.coe_eq_pair Finset.coe_eq_pair
-/
#print Finset.pair_comm /-
theorem pair_comm (a b : α) : ({a, b} : Finset α) = {b, a} :=
Insert.comm a b ∅
#align finset.pair_comm Finset.pair_comm
-/
#print Finset.insert_idem /-
@[simp]
theorem insert_idem (a : α) (s : Finset α) : insert a (insert a s) = insert a s :=
ext fun x => by simp only [mem_insert, or.assoc.symm, or_self_iff]
#align finset.insert_idem Finset.insert_idem
-/
#print Finset.insert_nonempty /-
@[simp]
theorem insert_nonempty (a : α) (s : Finset α) : (insert a s).Nonempty :=
⟨a, mem_insert_self a s⟩
#align finset.insert_nonempty Finset.insert_nonempty
-/
#print Finset.insert_ne_empty /-
@[simp]
theorem insert_ne_empty (a : α) (s : Finset α) : insert a s ≠ ∅ :=
(insert_nonempty a s).ne_empty
#align finset.insert_ne_empty Finset.insert_ne_empty
-/
/-!
The universe annotation is required for the following instance, possibly this is a bug in Lean. See
leanprover.zulipchat.com/#narrow/stream/113488-general/topic/strange.20error.20(universe.20issue.3F)
-/
instance {α : Type u} [DecidableEq α] (i : α) (s : Finset α) :
Nonempty.{u + 1} ((insert i s : Finset α) : Set α) :=
(Finset.coe_nonempty.mpr (s.insert_nonempty i)).to_subtype
#print Finset.ne_insert_of_not_mem /-
theorem ne_insert_of_not_mem (s t : Finset α) {a : α} (h : a ∉ s) : s ≠ insert a t :=
by
contrapose! h
simp [h]
#align finset.ne_insert_of_not_mem Finset.ne_insert_of_not_mem
-/
#print Finset.insert_subset /-
theorem insert_subset : insert a s ⊆ t ↔ a ∈ t ∧ s ⊆ t := by
simp only [subset_iff, mem_insert, forall_eq, or_imp, forall_and]
#align finset.insert_subset Finset.insert_subset
-/
#print Finset.subset_insert /-
theorem subset_insert (a : α) (s : Finset α) : s ⊆ insert a s := fun b => mem_insert_of_mem
#align finset.subset_insert Finset.subset_insert
-/
#print Finset.insert_subset_insert /-
theorem insert_subset_insert (a : α) {s t : Finset α} (h : s ⊆ t) : insert a s ⊆ insert a t :=
insert_subset.2 ⟨mem_insert_self _ _, Subset.trans h (subset_insert _ _)⟩
#align finset.insert_subset_insert Finset.insert_subset_insert
-/
#print Finset.insert_inj /-
theorem insert_inj (ha : a ∉ s) : insert a s = insert b s ↔ a = b :=
⟨fun h => eq_of_not_mem_of_mem_insert (h.subst <| mem_insert_self _ _) ha, congr_arg _⟩
#align finset.insert_inj Finset.insert_inj
-/
/- warning: finset.insert_inj_on -> Finset.insert_inj_on is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α), Set.InjOn.{u1, u1} α (Finset.{u1} α) (fun (a : α) => Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) a s) (HasCompl.compl.{u1} (Set.{u1} α) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α), Set.InjOn.{u1, u1} α (Finset.{u1} α) (fun (a : α) => Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.instInsertFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) a s) (HasCompl.compl.{u1} (Set.{u1} α) (BooleanAlgebra.toHasCompl.{u1} (Set.{u1} α) (Set.instBooleanAlgebraSet.{u1} α)) (Finset.toSet.{u1} α s))
Case conversion may be inaccurate. Consider using '#align finset.insert_inj_on Finset.insert_inj_onₓ'. -/
theorem insert_inj_on (s : Finset α) : Set.InjOn (fun a => insert a s) (sᶜ) := fun a h b _ =>
(insert_inj h).1
#align finset.insert_inj_on Finset.insert_inj_on
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (a «expr ∉ » s) -/
#print Finset.ssubset_iff /-
theorem ssubset_iff : s ⊂ t ↔ ∃ (a : _)(_ : a ∉ s), insert a s ⊆ t := by
exact_mod_cast @Set.ssubset_iff_insert α s t
#align finset.ssubset_iff Finset.ssubset_iff
-/
#print Finset.ssubset_insert /-
theorem ssubset_insert (h : a ∉ s) : s ⊂ insert a s :=
ssubset_iff.mpr ⟨a, h, Subset.rfl⟩
#align finset.ssubset_insert Finset.ssubset_insert
-/
#print Finset.cons_induction /-
@[elab_as_elim]
theorem cons_induction {α : Type _} {p : Finset α → Prop} (h₁ : p ∅)
(h₂ : ∀ ⦃a : α⦄ {s : Finset α} (h : a ∉ s), p s → p (cons a s h)) : ∀ s, p s
| ⟨s, nd⟩ =>
Multiset.induction_on s (fun _ => h₁)
(fun a s IH nd => by
cases' nodup_cons.1 nd with m nd'
rw [← (eq_of_veq _ : cons a (Finset.mk s _) m = ⟨a ::ₘ s, nd⟩)]
· exact h₂ m (IH nd')
· rw [cons_val])
nd
#align finset.cons_induction Finset.cons_induction
-/
#print Finset.cons_induction_on /-
@[elab_as_elim]
theorem cons_induction_on {α : Type _} {p : Finset α → Prop} (s : Finset α) (h₁ : p ∅)
(h₂ : ∀ ⦃a : α⦄ {s : Finset α} (h : a ∉ s), p s → p (cons a s h)) : p s :=
cons_induction h₁ h₂ s
#align finset.cons_induction_on Finset.cons_induction_on
-/
#print Finset.induction /-
@[elab_as_elim]
protected theorem induction {α : Type _} {p : Finset α → Prop} [DecidableEq α] (h₁ : p ∅)
(h₂ : ∀ ⦃a : α⦄ {s : Finset α}, a ∉ s → p s → p (insert a s)) : ∀ s, p s :=
cons_induction h₁ fun a s ha => (s.cons_eq_insert a ha).symm ▸ h₂ ha
#align finset.induction Finset.induction
-/
#print Finset.induction_on /-
/-- To prove a proposition about an arbitrary `finset α`,
it suffices to prove it for the empty `finset`,
and to show that if it holds for some `finset α`,
then it holds for the `finset` obtained by inserting a new element.
-/
@[elab_as_elim]
protected theorem induction_on {α : Type _} {p : Finset α → Prop} [DecidableEq α] (s : Finset α)
(h₁ : p ∅) (h₂ : ∀ ⦃a : α⦄ {s : Finset α}, a ∉ s → p s → p (insert a s)) : p s :=
Finset.induction h₁ h₂ s
#align finset.induction_on Finset.induction_on
-/
#print Finset.induction_on' /-
/-- To prove a proposition about `S : finset α`,
it suffices to prove it for the empty `finset`,
and to show that if it holds for some `finset α ⊆ S`,
then it holds for the `finset` obtained by inserting a new element of `S`.
-/
@[elab_as_elim]
theorem induction_on' {α : Type _} {p : Finset α → Prop} [DecidableEq α] (S : Finset α) (h₁ : p ∅)
(h₂ : ∀ {a s}, a ∈ S → s ⊆ S → a ∉ s → p s → p (insert a s)) : p S :=
@Finset.induction_on α (fun T => T ⊆ S → p T) _ S (fun _ => h₁)
(fun a s has hqs hs =>
let ⟨hS, sS⟩ := Finset.insert_subset.1 hs
h₂ hS sS has (hqs sS))
(Finset.Subset.refl S)
#align finset.induction_on' Finset.induction_on'
-/
#print Finset.Nonempty.cons_induction /-
/-- To prove a proposition about a nonempty `s : finset α`, it suffices to show it holds for all
singletons and that if it holds for nonempty `t : finset α`, then it also holds for the `finset`
obtained by inserting an element in `t`. -/
@[elab_as_elim]
theorem Nonempty.cons_induction {α : Type _} {p : ∀ s : Finset α, s.Nonempty → Prop}
(h₀ : ∀ a, p {a} (singleton_nonempty _))
(h₁ : ∀ ⦃a⦄ (s) (h : a ∉ s) (hs), p s hs → p (Finset.cons a s h) (nonempty_cons h))
{s : Finset α} (hs : s.Nonempty) : p s hs :=
by
induction' s using Finset.cons_induction with a t ha h
· exact (not_nonempty_empty hs).elim
obtain rfl | ht := t.eq_empty_or_nonempty
· exact h₀ a
· exact h₁ t ha ht (h ht)
#align finset.nonempty.cons_induction Finset.Nonempty.cons_induction
-/
#print Finset.subtypeInsertEquivOption /-
/-- Inserting an element to a finite set is equivalent to the option type. -/
def subtypeInsertEquivOption {t : Finset α} {x : α} (h : x ∉ t) :
{ i // i ∈ insert x t } ≃ Option { i // i ∈ t } :=
by
refine'
{ toFun := fun y => if h : ↑y = x then none else some ⟨y, (mem_insert.mp y.2).resolve_left h⟩
invFun := fun y => y.elim ⟨x, mem_insert_self _ _⟩ fun z => ⟨z, mem_insert_of_mem z.2⟩.. }
· intro y
by_cases h : ↑y = x
simp only [Subtype.ext_iff, h, Option.elim', dif_pos, Subtype.coe_mk]
simp only [h, Option.elim', dif_neg, not_false_iff, Subtype.coe_eta, Subtype.coe_mk]
· rintro (_ | y)
simp only [Option.elim', dif_pos, Subtype.coe_mk]
have : ↑y ≠ x := by
rintro ⟨⟩
exact h y.2
simp only [this, Option.elim', Subtype.eta, dif_neg, not_false_iff, Subtype.coe_eta,
Subtype.coe_mk]
#align finset.subtype_insert_equiv_option Finset.subtypeInsertEquivOption
-/
/- warning: finset.disjoint_insert_left -> Finset.disjoint_insert_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) a s) t) (And (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.instInsertFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) a s) t) (And (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_insert_left Finset.disjoint_insert_leftₓ'. -/
@[simp]
theorem disjoint_insert_left : Disjoint (insert a s) t ↔ a ∉ t ∧ Disjoint s t := by
simp only [disjoint_left, mem_insert, or_imp, forall_and, forall_eq]
#align finset.disjoint_insert_left Finset.disjoint_insert_left
/- warning: finset.disjoint_insert_right -> Finset.disjoint_insert_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) a t)) (And (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.instInsertFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) a t)) (And (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s)) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_insert_right Finset.disjoint_insert_rightₓ'. -/
@[simp]
theorem disjoint_insert_right : Disjoint s (insert a t) ↔ a ∉ s ∧ Disjoint s t :=
disjoint_comm.trans <| by rw [disjoint_insert_left, disjoint_comm]
#align finset.disjoint_insert_right Finset.disjoint_insert_right
end Insert
/-! ### Lattice structure -/
section Lattice
variable [DecidableEq α] {s t u v : Finset α} {a b : α}
/-- `s ∪ t` is the set such that `a ∈ s ∪ t` iff `a ∈ s` or `a ∈ t`. -/
instance : Union (Finset α) :=
⟨fun s t => ⟨_, t.2.ndunion s.1⟩⟩
/-- `s ∩ t` is the set such that `a ∈ s ∩ t` iff `a ∈ s` and `a ∈ t`. -/
instance : Inter (Finset α) :=
⟨fun s t => ⟨_, s.2.ndinter t.1⟩⟩
instance : Lattice (Finset α) :=
{ Finset.partialOrder with
sup := (· ∪ ·)
sup_le := fun s t u hs ht a ha => (mem_ndunion.1 ha).elim (fun h => hs h) fun h => ht h
le_sup_left := fun s t a h => mem_ndunion.2 <| Or.inl h
le_sup_right := fun s t a h => mem_ndunion.2 <| Or.inr h
inf := (· ∩ ·)
le_inf := fun s t u ht hu a h => mem_ndinter.2 ⟨ht h, hu h⟩
inf_le_left := fun s t a h => (mem_ndinter.1 h).1
inf_le_right := fun s t a h => (mem_ndinter.1 h).2 }
/- warning: finset.sup_eq_union -> Finset.sup_eq_union is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α], Eq.{succ u1} ((Finset.{u1} α) -> (Finset.{u1} α) -> (Finset.{u1} α)) (Sup.sup.{u1} (Finset.{u1} α) (SemilatticeSup.toHasSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.lattice.{u1} α (fun (a : α) (b : α) => _inst_1 a b))))) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α], Eq.{succ u1} ((Finset.{u1} α) -> (Finset.{u1} α) -> (Finset.{u1} α)) (Sup.sup.{u1} (Finset.{u1} α) (SemilatticeSup.toSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.instLatticeFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b))))) (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))
Case conversion may be inaccurate. Consider using '#align finset.sup_eq_union Finset.sup_eq_unionₓ'. -/
@[simp]
theorem sup_eq_union : ((· ⊔ ·) : Finset α → Finset α → Finset α) = (· ∪ ·) :=
rfl
#align finset.sup_eq_union Finset.sup_eq_union
/- warning: finset.inf_eq_inter -> Finset.inf_eq_inter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α], Eq.{succ u1} ((Finset.{u1} α) -> (Finset.{u1} α) -> (Finset.{u1} α)) (Inf.inf.{u1} (Finset.{u1} α) (SemilatticeInf.toHasInf.{u1} (Finset.{u1} α) (Lattice.toSemilatticeInf.{u1} (Finset.{u1} α) (Finset.lattice.{u1} α (fun (a : α) (b : α) => _inst_1 a b))))) (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α], Eq.{succ u1} ((Finset.{u1} α) -> (Finset.{u1} α) -> (Finset.{u1} α)) (Inf.inf.{u1} (Finset.{u1} α) (Lattice.toInf.{u1} (Finset.{u1} α) (Finset.instLatticeFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))
Case conversion may be inaccurate. Consider using '#align finset.inf_eq_inter Finset.inf_eq_interₓ'. -/
@[simp]
theorem inf_eq_inter : ((· ⊓ ·) : Finset α → Finset α → Finset α) = (· ∩ ·) :=
rfl
#align finset.inf_eq_inter Finset.inf_eq_inter
/- warning: finset.disjoint_iff_inter_eq_empty -> Finset.disjoint_iff_inter_eq_empty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) (Eq.{succ u1} (Finset.{u1} α) (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) (Eq.{succ u1} (Finset.{u1} α) (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_iff_inter_eq_empty Finset.disjoint_iff_inter_eq_emptyₓ'. -/
theorem disjoint_iff_inter_eq_empty : Disjoint s t ↔ s ∩ t = ∅ :=
disjoint_iff
#align finset.disjoint_iff_inter_eq_empty Finset.disjoint_iff_inter_eq_empty
/- warning: finset.decidable_disjoint -> Finset.decidableDisjoint is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (U : Finset.{u1} α) (V : Finset.{u1} α), Decidable (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) U V)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (U : Finset.{u1} α) (V : Finset.{u1} α), Decidable (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) U V)
Case conversion may be inaccurate. Consider using '#align finset.decidable_disjoint Finset.decidableDisjointₓ'. -/
instance decidableDisjoint (U V : Finset α) : Decidable (Disjoint U V) :=
decidable_of_iff _ disjoint_left.symm
#align finset.decidable_disjoint Finset.decidableDisjoint
/-! #### union -/
#print Finset.union_val_nd /-
theorem union_val_nd (s t : Finset α) : (s ∪ t).1 = ndunion s.1 t.1 :=
rfl
#align finset.union_val_nd Finset.union_val_nd
-/
#print Finset.union_val /-
@[simp]
theorem union_val (s t : Finset α) : (s ∪ t).1 = s.1 ∪ t.1 :=
ndunion_eq_union s.2
#align finset.union_val Finset.union_val
-/
#print Finset.mem_union /-
@[simp]
theorem mem_union : a ∈ s ∪ t ↔ a ∈ s ∨ a ∈ t :=
mem_ndunion
#align finset.mem_union Finset.mem_union
-/
/- warning: finset.disj_union_eq_union -> Finset.disjUnion_eq_union is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s t h) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t), Eq.{succ u1} (Finset.{u1} α) (Finset.disjUnion.{u1} α s t h) (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)
Case conversion may be inaccurate. Consider using '#align finset.disj_union_eq_union Finset.disjUnion_eq_unionₓ'. -/
@[simp]
theorem disjUnion_eq_union (s t h) : @disjUnion α s t h = s ∪ t :=
ext fun a => by simp
#align finset.disj_union_eq_union Finset.disjUnion_eq_union
#print Finset.mem_union_left /-
theorem mem_union_left (t : Finset α) (h : a ∈ s) : a ∈ s ∪ t :=
mem_union.2 <| Or.inl h
#align finset.mem_union_left Finset.mem_union_left
-/
#print Finset.mem_union_right /-
theorem mem_union_right (s : Finset α) (h : a ∈ t) : a ∈ s ∪ t :=
mem_union.2 <| Or.inr h
#align finset.mem_union_right Finset.mem_union_right
-/
#print Finset.forall_mem_union /-
theorem forall_mem_union {p : α → Prop} : (∀ a ∈ s ∪ t, p a) ↔ (∀ a ∈ s, p a) ∧ ∀ a ∈ t, p a :=
⟨fun h => ⟨fun a => h a ∘ mem_union_left _, fun b => h b ∘ mem_union_right _⟩, fun h ab hab =>
(mem_union.mp hab).elim (h.1 _) (h.2 _)⟩
#align finset.forall_mem_union Finset.forall_mem_union
-/
#print Finset.not_mem_union /-
theorem not_mem_union : a ∉ s ∪ t ↔ a ∉ s ∧ a ∉ t := by rw [mem_union, not_or]
#align finset.not_mem_union Finset.not_mem_union
-/
/- warning: finset.coe_union -> Finset.coe_union is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s₁ : Finset.{u1} α) (s₂ : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s₁ s₂)) (Union.union.{u1} (Set.{u1} α) (Set.hasUnion.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₁) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₂))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s₁ : Finset.{u1} α) (s₂ : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) (Finset.toSet.{u1} α (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s₁ s₂)) (Union.union.{u1} (Set.{u1} α) (Set.instUnionSet.{u1} α) (Finset.toSet.{u1} α s₁) (Finset.toSet.{u1} α s₂))
Case conversion may be inaccurate. Consider using '#align finset.coe_union Finset.coe_unionₓ'. -/
@[simp, norm_cast]
theorem coe_union (s₁ s₂ : Finset α) : ↑(s₁ ∪ s₂) = (s₁ ∪ s₂ : Set α) :=
Set.ext fun x => mem_union
#align finset.coe_union Finset.coe_union
#print Finset.union_subset /-
theorem union_subset (hs : s ⊆ u) : t ⊆ u → s ∪ t ⊆ u :=
sup_le <| le_iff_subset.2 hs
#align finset.union_subset Finset.union_subset
-/
#print Finset.subset_union_left /-
theorem subset_union_left (s₁ s₂ : Finset α) : s₁ ⊆ s₁ ∪ s₂ := fun x => mem_union_left _
#align finset.subset_union_left Finset.subset_union_left
-/
#print Finset.subset_union_right /-
theorem subset_union_right (s₁ s₂ : Finset α) : s₂ ⊆ s₁ ∪ s₂ := fun x => mem_union_right _
#align finset.subset_union_right Finset.subset_union_right
-/
#print Finset.union_subset_union /-
theorem union_subset_union (hsu : s ⊆ u) (htv : t ⊆ v) : s ∪ t ⊆ u ∪ v :=
sup_le_sup (le_iff_subset.2 hsu) htv
#align finset.union_subset_union Finset.union_subset_union
-/
#print Finset.union_comm /-
theorem union_comm (s₁ s₂ : Finset α) : s₁ ∪ s₂ = s₂ ∪ s₁ :=
sup_comm
#align finset.union_comm Finset.union_comm
-/
instance : IsCommutative (Finset α) (· ∪ ·) :=
⟨union_comm⟩
#print Finset.union_assoc /-
@[simp]
theorem union_assoc (s₁ s₂ s₃ : Finset α) : s₁ ∪ s₂ ∪ s₃ = s₁ ∪ (s₂ ∪ s₃) :=
sup_assoc
#align finset.union_assoc Finset.union_assoc
-/
instance : IsAssociative (Finset α) (· ∪ ·) :=
⟨union_assoc⟩
#print Finset.union_idempotent /-
@[simp]
theorem union_idempotent (s : Finset α) : s ∪ s = s :=
sup_idem
#align finset.union_idempotent Finset.union_idempotent
-/
instance : IsIdempotent (Finset α) (· ∪ ·) :=
⟨union_idempotent⟩
#print Finset.union_subset_left /-
theorem union_subset_left (h : s ∪ t ⊆ u) : s ⊆ u :=
(subset_union_left _ _).trans h
#align finset.union_subset_left Finset.union_subset_left
-/
#print Finset.union_subset_right /-
theorem union_subset_right {s t u : Finset α} (h : s ∪ t ⊆ u) : t ⊆ u :=
Subset.trans (subset_union_right _ _) h
#align finset.union_subset_right Finset.union_subset_right
-/
#print Finset.union_left_comm /-
theorem union_left_comm (s t u : Finset α) : s ∪ (t ∪ u) = t ∪ (s ∪ u) :=
ext fun _ => by simp only [mem_union, or_left_comm]
#align finset.union_left_comm Finset.union_left_comm
-/
#print Finset.union_right_comm /-
theorem union_right_comm (s t u : Finset α) : s ∪ t ∪ u = s ∪ u ∪ t :=
ext fun x => by simp only [mem_union, or_assoc', or_comm' (x ∈ t)]
#align finset.union_right_comm Finset.union_right_comm
-/
#print Finset.union_self /-
theorem union_self (s : Finset α) : s ∪ s = s :=
union_idempotent s
#align finset.union_self Finset.union_self
-/
#print Finset.union_empty /-
@[simp]
theorem union_empty (s : Finset α) : s ∪ ∅ = s :=
ext fun x => mem_union.trans <| or_false_iff _
#align finset.union_empty Finset.union_empty
-/
#print Finset.empty_union /-
@[simp]
theorem empty_union (s : Finset α) : ∅ ∪ s = s :=
ext fun x => mem_union.trans <| false_or_iff _
#align finset.empty_union Finset.empty_union
-/
#print Finset.insert_eq /-
theorem insert_eq (a : α) (s : Finset α) : insert a s = {a} ∪ s :=
rfl
#align finset.insert_eq Finset.insert_eq
-/
#print Finset.insert_union /-
@[simp]
theorem insert_union (a : α) (s t : Finset α) : insert a s ∪ t = insert a (s ∪ t) := by
simp only [insert_eq, union_assoc]
#align finset.insert_union Finset.insert_union
-/
#print Finset.union_insert /-
@[simp]
theorem union_insert (a : α) (s t : Finset α) : s ∪ insert a t = insert a (s ∪ t) := by
simp only [insert_eq, union_left_comm]
#align finset.union_insert Finset.union_insert
-/
#print Finset.insert_union_distrib /-
theorem insert_union_distrib (a : α) (s t : Finset α) :
insert a (s ∪ t) = insert a s ∪ insert a t := by
simp only [insert_union, union_insert, insert_idem]
#align finset.insert_union_distrib Finset.insert_union_distrib
-/
#print Finset.union_eq_left_iff_subset /-
@[simp]
theorem union_eq_left_iff_subset {s t : Finset α} : s ∪ t = s ↔ t ⊆ s :=
sup_eq_left
#align finset.union_eq_left_iff_subset Finset.union_eq_left_iff_subset
-/
#print Finset.left_eq_union_iff_subset /-
@[simp]
theorem left_eq_union_iff_subset {s t : Finset α} : s = s ∪ t ↔ t ⊆ s := by
rw [← union_eq_left_iff_subset, eq_comm]
#align finset.left_eq_union_iff_subset Finset.left_eq_union_iff_subset
-/
#print Finset.union_eq_right_iff_subset /-
@[simp]
theorem union_eq_right_iff_subset {s t : Finset α} : s ∪ t = t ↔ s ⊆ t :=
sup_eq_right
#align finset.union_eq_right_iff_subset Finset.union_eq_right_iff_subset
-/
#print Finset.right_eq_union_iff_subset /-
@[simp]
theorem right_eq_union_iff_subset {s t : Finset α} : s = t ∪ s ↔ t ⊆ s := by
rw [← union_eq_right_iff_subset, eq_comm]
#align finset.right_eq_union_iff_subset Finset.right_eq_union_iff_subset
-/
/- warning: finset.union_congr_left -> Finset.union_congr_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) t (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s u)) -> (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) u (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) -> (Eq.{succ u1} (Finset.{u1} α) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) (Sup.sup.{u1} (Finset.{u1} α) (SemilatticeSup.toHasSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.lattice.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) s u))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) t (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s u)) -> (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) u (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) -> (Eq.{succ u1} (Finset.{u1} α) (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) (Sup.sup.{u1} (Finset.{u1} α) (SemilatticeSup.toSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.instLatticeFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) s u))
Case conversion may be inaccurate. Consider using '#align finset.union_congr_left Finset.union_congr_leftₓ'. -/
theorem union_congr_left (ht : t ⊆ s ∪ u) (hu : u ⊆ s ∪ t) : s ∪ t = s ⊔ u :=
sup_congr_left ht hu
#align finset.union_congr_left Finset.union_congr_left
#print Finset.union_congr_right /-
theorem union_congr_right (hs : s ⊆ t ∪ u) (ht : t ⊆ s ∪ u) : s ∪ u = t ∪ u :=
sup_congr_right hs ht
#align finset.union_congr_right Finset.union_congr_right
-/
#print Finset.union_eq_union_iff_left /-
theorem union_eq_union_iff_left : s ∪ t = s ∪ u ↔ t ⊆ s ∪ u ∧ u ⊆ s ∪ t :=
sup_eq_sup_iff_left
#align finset.union_eq_union_iff_left Finset.union_eq_union_iff_left
-/
#print Finset.union_eq_union_iff_right /-
theorem union_eq_union_iff_right : s ∪ u = t ∪ u ↔ s ⊆ t ∪ u ∧ t ⊆ s ∪ u :=
sup_eq_sup_iff_right
#align finset.union_eq_union_iff_right Finset.union_eq_union_iff_right
-/
/- warning: finset.disjoint_union_left -> Finset.disjoint_union_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) u) (And (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s u) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) t u))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) u) (And (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s u) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) t u))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_union_left Finset.disjoint_union_leftₓ'. -/
@[simp]
theorem disjoint_union_left : Disjoint (s ∪ t) u ↔ Disjoint s u ∧ Disjoint t u := by
simp only [disjoint_left, mem_union, or_imp, forall_and]
#align finset.disjoint_union_left Finset.disjoint_union_left
/- warning: finset.disjoint_union_right -> Finset.disjoint_union_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t u)) (And (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s u))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t u)) (And (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s u))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_union_right Finset.disjoint_union_rightₓ'. -/
@[simp]
theorem disjoint_union_right : Disjoint s (t ∪ u) ↔ Disjoint s t ∧ Disjoint s u := by
simp only [disjoint_right, mem_union, or_imp, forall_and]
#align finset.disjoint_union_right Finset.disjoint_union_right
#print Finset.induction_on_union /-
/-- To prove a relation on pairs of `finset X`, it suffices to show that it is
* symmetric,
* it holds when one of the `finset`s is empty,
* it holds for pairs of singletons,
* if it holds for `[a, c]` and for `[b, c]`, then it holds for `[a ∪ b, c]`.
-/
theorem induction_on_union (P : Finset α → Finset α → Prop) (symm : ∀ {a b}, P a b → P b a)
(empty_right : ∀ {a}, P a ∅) (singletons : ∀ {a b}, P {a} {b})
(union_of : ∀ {a b c}, P a c → P b c → P (a ∪ b) c) : ∀ a b, P a b :=
by
intro a b
refine' Finset.induction_on b empty_right fun x s xs hi => symm _
rw [Finset.insert_eq]
apply union_of _ (symm hi)
refine' Finset.induction_on a empty_right fun a t ta hi => symm _
rw [Finset.insert_eq]
exact union_of singletons (symm hi)
#align finset.induction_on_union Finset.induction_on_union
-/
/- warning: directed.exists_mem_subset_of_finset_subset_bUnion -> Directed.exists_mem_subset_of_finset_subset_bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {ι : Type.{u2}} [hn : Nonempty.{succ u2} ι] {f : ι -> (Set.{u1} α)}, (Directed.{u1, succ u2} (Set.{u1} α) ι (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α)) f) -> (forall {s : Finset.{u1} α}, (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (Set.unionᵢ.{u1, succ u2} α ι (fun (i : ι) => f i))) -> (Exists.{succ u2} ι (fun (i : ι) => HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (f i))))
but is expected to have type
forall {α : Type.{u2}} {ι : Type.{u1}} [hn : Nonempty.{succ u1} ι] {f : ι -> (Set.{u2} α)}, (Directed.{u2, succ u1} (Set.{u2} α) ι (fun (x._@.Mathlib.Data.Finset.Basic._hyg.13359 : Set.{u2} α) (x._@.Mathlib.Data.Finset.Basic._hyg.13361 : Set.{u2} α) => HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) x._@.Mathlib.Data.Finset.Basic._hyg.13359 x._@.Mathlib.Data.Finset.Basic._hyg.13361) f) -> (forall {s : Finset.{u2} α}, (HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) (Finset.toSet.{u2} α s) (Set.unionᵢ.{u2, succ u1} α ι (fun (i : ι) => f i))) -> (Exists.{succ u1} ι (fun (i : ι) => HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) (Finset.toSet.{u2} α s) (f i))))
Case conversion may be inaccurate. Consider using '#align directed.exists_mem_subset_of_finset_subset_bUnion Directed.exists_mem_subset_of_finset_subset_bunionᵢₓ'. -/
theorem Directed.exists_mem_subset_of_finset_subset_bunionᵢ {α ι : Type _} [hn : Nonempty ι]
{f : ι → Set α} (h : Directed (· ⊆ ·) f) {s : Finset α} (hs : (s : Set α) ⊆ ⋃ i, f i) :
∃ i, (s : Set α) ⊆ f i := by
classical
revert hs
apply s.induction_on
· refine' fun _ => ⟨hn.some, _⟩
simp only [coe_empty, Set.empty_subset]
· intro b t hbt htc hbtc
obtain ⟨i : ι, hti : (t : Set α) ⊆ f i⟩ := htc (Set.Subset.trans (t.subset_insert b) hbtc)
obtain ⟨j, hbj⟩ : ∃ j, b ∈ f j := by simpa [Set.mem_unionᵢ₂] using hbtc (t.mem_insert_self b)
rcases h j i with ⟨k, hk, hk'⟩
use k
rw [coe_insert, Set.insert_subset]
exact ⟨hk hbj, trans hti hk'⟩
#align directed.exists_mem_subset_of_finset_subset_bUnion Directed.exists_mem_subset_of_finset_subset_bunionᵢ
/- warning: directed_on.exists_mem_subset_of_finset_subset_bUnion -> DirectedOn.exists_mem_subset_of_finset_subset_bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {ι : Type.{u2}} {f : ι -> (Set.{u1} α)} {c : Set.{u2} ι}, (Set.Nonempty.{u2} ι c) -> (DirectedOn.{u2} ι (fun (i : ι) (j : ι) => HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) (f i) (f j)) c) -> (forall {s : Finset.{u1} α}, (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (Set.unionᵢ.{u1, succ u2} α ι (fun (i : ι) => Set.unionᵢ.{u1, 0} α (Membership.Mem.{u2, u2} ι (Set.{u2} ι) (Set.hasMem.{u2} ι) i c) (fun (H : Membership.Mem.{u2, u2} ι (Set.{u2} ι) (Set.hasMem.{u2} ι) i c) => f i)))) -> (Exists.{succ u2} ι (fun (i : ι) => Exists.{0} (Membership.Mem.{u2, u2} ι (Set.{u2} ι) (Set.hasMem.{u2} ι) i c) (fun (H : Membership.Mem.{u2, u2} ι (Set.{u2} ι) (Set.hasMem.{u2} ι) i c) => HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (f i)))))
but is expected to have type
forall {α : Type.{u2}} {ι : Type.{u1}} {f : ι -> (Set.{u2} α)} {c : Set.{u1} ι}, (Set.Nonempty.{u1} ι c) -> (DirectedOn.{u1} ι (fun (i : ι) (j : ι) => HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) (f i) (f j)) c) -> (forall {s : Finset.{u2} α}, (HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) (Finset.toSet.{u2} α s) (Set.unionᵢ.{u2, succ u1} α ι (fun (i : ι) => Set.unionᵢ.{u2, 0} α (Membership.mem.{u1, u1} ι (Set.{u1} ι) (Set.instMembershipSet.{u1} ι) i c) (fun (H : Membership.mem.{u1, u1} ι (Set.{u1} ι) (Set.instMembershipSet.{u1} ι) i c) => f i)))) -> (Exists.{succ u1} ι (fun (i : ι) => And (Membership.mem.{u1, u1} ι (Set.{u1} ι) (Set.instMembershipSet.{u1} ι) i c) (HasSubset.Subset.{u2} (Set.{u2} α) (Set.instHasSubsetSet.{u2} α) (Finset.toSet.{u2} α s) (f i)))))
Case conversion may be inaccurate. Consider using '#align directed_on.exists_mem_subset_of_finset_subset_bUnion DirectedOn.exists_mem_subset_of_finset_subset_bunionᵢₓ'. -/
theorem DirectedOn.exists_mem_subset_of_finset_subset_bunionᵢ {α ι : Type _} {f : ι → Set α}
{c : Set ι} (hn : c.Nonempty) (hc : DirectedOn (fun i j => f i ⊆ f j) c) {s : Finset α}
(hs : (s : Set α) ⊆ ⋃ i ∈ c, f i) : ∃ i ∈ c, (s : Set α) ⊆ f i :=
by
rw [Set.bunionᵢ_eq_unionᵢ] at hs
haveI := hn.coe_sort
obtain ⟨⟨i, hic⟩, hi⟩ :=
(directed_comp.2 hc.directed_coe).exists_mem_subset_of_finset_subset_bunionᵢ hs
exact ⟨i, hic, hi⟩
#align directed_on.exists_mem_subset_of_finset_subset_bUnion DirectedOn.exists_mem_subset_of_finset_subset_bunionᵢ
/-! #### inter -/
#print Finset.inter_val_nd /-
theorem inter_val_nd (s₁ s₂ : Finset α) : (s₁ ∩ s₂).1 = ndinter s₁.1 s₂.1 :=
rfl
#align finset.inter_val_nd Finset.inter_val_nd
-/
#print Finset.inter_val /-
@[simp]
theorem inter_val (s₁ s₂ : Finset α) : (s₁ ∩ s₂).1 = s₁.1 ∩ s₂.1 :=
ndinter_eq_inter s₁.2
#align finset.inter_val Finset.inter_val
-/
#print Finset.mem_inter /-
@[simp]
theorem mem_inter {a : α} {s₁ s₂ : Finset α} : a ∈ s₁ ∩ s₂ ↔ a ∈ s₁ ∧ a ∈ s₂ :=
mem_ndinter
#align finset.mem_inter Finset.mem_inter
-/
#print Finset.mem_of_mem_inter_left /-
theorem mem_of_mem_inter_left {a : α} {s₁ s₂ : Finset α} (h : a ∈ s₁ ∩ s₂) : a ∈ s₁ :=
(mem_inter.1 h).1
#align finset.mem_of_mem_inter_left Finset.mem_of_mem_inter_left
-/
#print Finset.mem_of_mem_inter_right /-
theorem mem_of_mem_inter_right {a : α} {s₁ s₂ : Finset α} (h : a ∈ s₁ ∩ s₂) : a ∈ s₂ :=
(mem_inter.1 h).2
#align finset.mem_of_mem_inter_right Finset.mem_of_mem_inter_right
-/
#print Finset.mem_inter_of_mem /-
theorem mem_inter_of_mem {a : α} {s₁ s₂ : Finset α} : a ∈ s₁ → a ∈ s₂ → a ∈ s₁ ∩ s₂ :=
and_imp.1 mem_inter.2
#align finset.mem_inter_of_mem Finset.mem_inter_of_mem
-/
#print Finset.inter_subset_left /-
theorem inter_subset_left (s₁ s₂ : Finset α) : s₁ ∩ s₂ ⊆ s₁ := fun a => mem_of_mem_inter_left
#align finset.inter_subset_left Finset.inter_subset_left
-/
#print Finset.inter_subset_right /-
theorem inter_subset_right (s₁ s₂ : Finset α) : s₁ ∩ s₂ ⊆ s₂ := fun a => mem_of_mem_inter_right
#align finset.inter_subset_right Finset.inter_subset_right
-/
#print Finset.subset_inter /-
theorem subset_inter {s₁ s₂ u : Finset α} : s₁ ⊆ s₂ → s₁ ⊆ u → s₁ ⊆ s₂ ∩ u := by
simp (config := { contextual := true }) only [subset_iff, mem_inter] <;> intros <;>
constructor <;>
trivial
#align finset.subset_inter Finset.subset_inter
-/
/- warning: finset.coe_inter -> Finset.coe_inter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s₁ : Finset.{u1} α) (s₂ : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s₁ s₂)) (Inter.inter.{u1} (Set.{u1} α) (Set.hasInter.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₁) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₂))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s₁ : Finset.{u1} α) (s₂ : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) (Finset.toSet.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s₁ s₂)) (Inter.inter.{u1} (Set.{u1} α) (Set.instInterSet.{u1} α) (Finset.toSet.{u1} α s₁) (Finset.toSet.{u1} α s₂))
Case conversion may be inaccurate. Consider using '#align finset.coe_inter Finset.coe_interₓ'. -/
@[simp, norm_cast]
theorem coe_inter (s₁ s₂ : Finset α) : ↑(s₁ ∩ s₂) = (s₁ ∩ s₂ : Set α) :=
Set.ext fun _ => mem_inter
#align finset.coe_inter Finset.coe_inter
#print Finset.union_inter_cancel_left /-
@[simp]
theorem union_inter_cancel_left {s t : Finset α} : (s ∪ t) ∩ s = s := by
rw [← coe_inj, coe_inter, coe_union, Set.union_inter_cancel_left]
#align finset.union_inter_cancel_left Finset.union_inter_cancel_left
-/
#print Finset.union_inter_cancel_right /-
@[simp]
theorem union_inter_cancel_right {s t : Finset α} : (s ∪ t) ∩ t = t := by
rw [← coe_inj, coe_inter, coe_union, Set.union_inter_cancel_right]
#align finset.union_inter_cancel_right Finset.union_inter_cancel_right
-/
#print Finset.inter_comm /-
theorem inter_comm (s₁ s₂ : Finset α) : s₁ ∩ s₂ = s₂ ∩ s₁ :=
ext fun _ => by simp only [mem_inter, and_comm']
#align finset.inter_comm Finset.inter_comm
-/
#print Finset.inter_assoc /-
@[simp]
theorem inter_assoc (s₁ s₂ s₃ : Finset α) : s₁ ∩ s₂ ∩ s₃ = s₁ ∩ (s₂ ∩ s₃) :=
ext fun _ => by simp only [mem_inter, and_assoc']
#align finset.inter_assoc Finset.inter_assoc
-/
#print Finset.inter_left_comm /-
theorem inter_left_comm (s₁ s₂ s₃ : Finset α) : s₁ ∩ (s₂ ∩ s₃) = s₂ ∩ (s₁ ∩ s₃) :=
ext fun _ => by simp only [mem_inter, and_left_comm]
#align finset.inter_left_comm Finset.inter_left_comm
-/
#print Finset.inter_right_comm /-
theorem inter_right_comm (s₁ s₂ s₃ : Finset α) : s₁ ∩ s₂ ∩ s₃ = s₁ ∩ s₃ ∩ s₂ :=
ext fun _ => by simp only [mem_inter, and_right_comm]
#align finset.inter_right_comm Finset.inter_right_comm
-/
#print Finset.inter_self /-
@[simp]
theorem inter_self (s : Finset α) : s ∩ s = s :=
ext fun _ => mem_inter.trans <| and_self_iff _
#align finset.inter_self Finset.inter_self
-/
#print Finset.inter_empty /-
@[simp]
theorem inter_empty (s : Finset α) : s ∩ ∅ = ∅ :=
ext fun _ => mem_inter.trans <| and_false_iff _
#align finset.inter_empty Finset.inter_empty
-/
#print Finset.empty_inter /-
@[simp]
theorem empty_inter (s : Finset α) : ∅ ∩ s = ∅ :=
ext fun _ => mem_inter.trans <| false_and_iff _
#align finset.empty_inter Finset.empty_inter
-/
#print Finset.inter_union_self /-
@[simp]
theorem inter_union_self (s t : Finset α) : s ∩ (t ∪ s) = s := by
rw [inter_comm, union_inter_cancel_right]
#align finset.inter_union_self Finset.inter_union_self
-/
#print Finset.insert_inter_of_mem /-
@[simp]
theorem insert_inter_of_mem {s₁ s₂ : Finset α} {a : α} (h : a ∈ s₂) :
insert a s₁ ∩ s₂ = insert a (s₁ ∩ s₂) :=
ext fun x =>
by
have : x = a ∨ x ∈ s₂ ↔ x ∈ s₂ := or_iff_right_of_imp <| by rintro rfl <;> exact h
simp only [mem_inter, mem_insert, or_and_left, this]
#align finset.insert_inter_of_mem Finset.insert_inter_of_mem
-/
#print Finset.inter_insert_of_mem /-
@[simp]
theorem inter_insert_of_mem {s₁ s₂ : Finset α} {a : α} (h : a ∈ s₁) :
s₁ ∩ insert a s₂ = insert a (s₁ ∩ s₂) := by rw [inter_comm, insert_inter_of_mem h, inter_comm]
#align finset.inter_insert_of_mem Finset.inter_insert_of_mem
-/
#print Finset.insert_inter_of_not_mem /-
@[simp]
theorem insert_inter_of_not_mem {s₁ s₂ : Finset α} {a : α} (h : a ∉ s₂) :
insert a s₁ ∩ s₂ = s₁ ∩ s₂ :=
ext fun x => by
have : ¬(x = a ∧ x ∈ s₂) := by rintro ⟨rfl, H⟩ <;> exact h H
simp only [mem_inter, mem_insert, or_and_right, this, false_or_iff]
#align finset.insert_inter_of_not_mem Finset.insert_inter_of_not_mem
-/
#print Finset.inter_insert_of_not_mem /-
@[simp]
theorem inter_insert_of_not_mem {s₁ s₂ : Finset α} {a : α} (h : a ∉ s₁) :
s₁ ∩ insert a s₂ = s₁ ∩ s₂ := by rw [inter_comm, insert_inter_of_not_mem h, inter_comm]
#align finset.inter_insert_of_not_mem Finset.inter_insert_of_not_mem
-/
#print Finset.singleton_inter_of_mem /-
@[simp]
theorem singleton_inter_of_mem {a : α} {s : Finset α} (H : a ∈ s) : {a} ∩ s = {a} :=
show insert a ∅ ∩ s = insert a ∅ by rw [insert_inter_of_mem H, empty_inter]
#align finset.singleton_inter_of_mem Finset.singleton_inter_of_mem
-/
#print Finset.singleton_inter_of_not_mem /-
@[simp]
theorem singleton_inter_of_not_mem {a : α} {s : Finset α} (H : a ∉ s) : {a} ∩ s = ∅ :=
eq_empty_of_forall_not_mem <| by
simp only [mem_inter, mem_singleton] <;> rintro x ⟨rfl, h⟩ <;> exact H h
#align finset.singleton_inter_of_not_mem Finset.singleton_inter_of_not_mem
-/
#print Finset.inter_singleton_of_mem /-
@[simp]
theorem inter_singleton_of_mem {a : α} {s : Finset α} (h : a ∈ s) : s ∩ {a} = {a} := by
rw [inter_comm, singleton_inter_of_mem h]
#align finset.inter_singleton_of_mem Finset.inter_singleton_of_mem
-/
#print Finset.inter_singleton_of_not_mem /-
@[simp]
theorem inter_singleton_of_not_mem {a : α} {s : Finset α} (h : a ∉ s) : s ∩ {a} = ∅ := by
rw [inter_comm, singleton_inter_of_not_mem h]
#align finset.inter_singleton_of_not_mem Finset.inter_singleton_of_not_mem
-/
#print Finset.inter_subset_inter /-
@[mono]
theorem inter_subset_inter {x y s t : Finset α} (h : x ⊆ y) (h' : s ⊆ t) : x ∩ s ⊆ y ∩ t :=
by
intro a a_in
rw [Finset.mem_inter] at a_in⊢
exact ⟨h a_in.1, h' a_in.2⟩
#align finset.inter_subset_inter Finset.inter_subset_inter
-/
#print Finset.inter_subset_inter_left /-
theorem inter_subset_inter_left (h : t ⊆ u) : s ∩ t ⊆ s ∩ u :=
inter_subset_inter Subset.rfl h
#align finset.inter_subset_inter_left Finset.inter_subset_inter_left
-/
#print Finset.inter_subset_inter_right /-
theorem inter_subset_inter_right (h : s ⊆ t) : s ∩ u ⊆ t ∩ u :=
inter_subset_inter h Subset.rfl
#align finset.inter_subset_inter_right Finset.inter_subset_inter_right
-/
#print Finset.inter_subset_union /-
theorem inter_subset_union : s ∩ t ⊆ s ∪ t :=
le_iff_subset.1 inf_le_sup
#align finset.inter_subset_union Finset.inter_subset_union
-/
instance : DistribLattice (Finset α) :=
{ Finset.lattice with
le_sup_inf := fun a b c =>
show (a ∪ b) ∩ (a ∪ c) ⊆ a ∪ b ∩ c by
simp (config := { contextual := true }) only [subset_iff, mem_inter, mem_union, and_imp,
or_imp] <;>
simp only [true_or_iff, imp_true_iff, true_and_iff, or_true_iff] }
#print Finset.union_left_idem /-
@[simp]
theorem union_left_idem (s t : Finset α) : s ∪ (s ∪ t) = s ∪ t :=
sup_left_idem
#align finset.union_left_idem Finset.union_left_idem
-/
#print Finset.union_right_idem /-
@[simp]
theorem union_right_idem (s t : Finset α) : s ∪ t ∪ t = s ∪ t :=
sup_right_idem
#align finset.union_right_idem Finset.union_right_idem
-/
#print Finset.inter_left_idem /-
@[simp]
theorem inter_left_idem (s t : Finset α) : s ∩ (s ∩ t) = s ∩ t :=
inf_left_idem
#align finset.inter_left_idem Finset.inter_left_idem
-/
#print Finset.inter_right_idem /-
@[simp]
theorem inter_right_idem (s t : Finset α) : s ∩ t ∩ t = s ∩ t :=
inf_right_idem
#align finset.inter_right_idem Finset.inter_right_idem
-/
#print Finset.inter_distrib_left /-
theorem inter_distrib_left (s t u : Finset α) : s ∩ (t ∪ u) = s ∩ t ∪ s ∩ u :=
inf_sup_left
#align finset.inter_distrib_left Finset.inter_distrib_left
-/
#print Finset.inter_distrib_right /-
theorem inter_distrib_right (s t u : Finset α) : (s ∪ t) ∩ u = s ∩ u ∪ t ∩ u :=
inf_sup_right
#align finset.inter_distrib_right Finset.inter_distrib_right
-/
#print Finset.union_distrib_left /-
theorem union_distrib_left (s t u : Finset α) : s ∪ t ∩ u = (s ∪ t) ∩ (s ∪ u) :=
sup_inf_left
#align finset.union_distrib_left Finset.union_distrib_left
-/
#print Finset.union_distrib_right /-
theorem union_distrib_right (s t u : Finset α) : s ∩ t ∪ u = (s ∪ u) ∩ (t ∪ u) :=
sup_inf_right
#align finset.union_distrib_right Finset.union_distrib_right
-/
#print Finset.union_union_distrib_left /-
theorem union_union_distrib_left (s t u : Finset α) : s ∪ (t ∪ u) = s ∪ t ∪ (s ∪ u) :=
sup_sup_distrib_left _ _ _
#align finset.union_union_distrib_left Finset.union_union_distrib_left
-/
#print Finset.union_union_distrib_right /-
theorem union_union_distrib_right (s t u : Finset α) : s ∪ t ∪ u = s ∪ u ∪ (t ∪ u) :=
sup_sup_distrib_right _ _ _
#align finset.union_union_distrib_right Finset.union_union_distrib_right
-/
#print Finset.inter_inter_distrib_left /-
theorem inter_inter_distrib_left (s t u : Finset α) : s ∩ (t ∩ u) = s ∩ t ∩ (s ∩ u) :=
inf_inf_distrib_left _ _ _
#align finset.inter_inter_distrib_left Finset.inter_inter_distrib_left
-/
#print Finset.inter_inter_distrib_right /-
theorem inter_inter_distrib_right (s t u : Finset α) : s ∩ t ∩ u = s ∩ u ∩ (t ∩ u) :=
inf_inf_distrib_right _ _ _
#align finset.inter_inter_distrib_right Finset.inter_inter_distrib_right
-/
#print Finset.union_union_union_comm /-
theorem union_union_union_comm (s t u v : Finset α) : s ∪ t ∪ (u ∪ v) = s ∪ u ∪ (t ∪ v) :=
sup_sup_sup_comm _ _ _ _
#align finset.union_union_union_comm Finset.union_union_union_comm
-/
#print Finset.inter_inter_inter_comm /-
theorem inter_inter_inter_comm (s t u v : Finset α) : s ∩ t ∩ (u ∩ v) = s ∩ u ∩ (t ∩ v) :=
inf_inf_inf_comm _ _ _ _
#align finset.inter_inter_inter_comm Finset.inter_inter_inter_comm
-/
#print Finset.union_eq_empty_iff /-
theorem union_eq_empty_iff (A B : Finset α) : A ∪ B = ∅ ↔ A = ∅ ∧ B = ∅ :=
sup_eq_bot_iff
#align finset.union_eq_empty_iff Finset.union_eq_empty_iff
-/
#print Finset.union_subset_iff /-
theorem union_subset_iff : s ∪ t ⊆ u ↔ s ⊆ u ∧ t ⊆ u :=
(sup_le_iff : s ⊔ t ≤ u ↔ s ≤ u ∧ t ≤ u)
#align finset.union_subset_iff Finset.union_subset_iff
-/
#print Finset.subset_inter_iff /-
theorem subset_inter_iff : s ⊆ t ∩ u ↔ s ⊆ t ∧ s ⊆ u :=
(le_inf_iff : s ≤ t ⊓ u ↔ s ≤ t ∧ s ≤ u)
#align finset.subset_inter_iff Finset.subset_inter_iff
-/
#print Finset.inter_eq_left_iff_subset /-
@[simp]
theorem inter_eq_left_iff_subset (s t : Finset α) : s ∩ t = s ↔ s ⊆ t :=
inf_eq_left
#align finset.inter_eq_left_iff_subset Finset.inter_eq_left_iff_subset
-/
#print Finset.inter_eq_right_iff_subset /-
@[simp]
theorem inter_eq_right_iff_subset (s t : Finset α) : t ∩ s = s ↔ s ⊆ t :=
inf_eq_right
#align finset.inter_eq_right_iff_subset Finset.inter_eq_right_iff_subset
-/
#print Finset.inter_congr_left /-
theorem inter_congr_left (ht : s ∩ u ⊆ t) (hu : s ∩ t ⊆ u) : s ∩ t = s ∩ u :=
inf_congr_left ht hu
#align finset.inter_congr_left Finset.inter_congr_left
-/
#print Finset.inter_congr_right /-
theorem inter_congr_right (hs : t ∩ u ⊆ s) (ht : s ∩ u ⊆ t) : s ∩ u = t ∩ u :=
inf_congr_right hs ht
#align finset.inter_congr_right Finset.inter_congr_right
-/
#print Finset.inter_eq_inter_iff_left /-
theorem inter_eq_inter_iff_left : s ∩ t = s ∩ u ↔ s ∩ u ⊆ t ∧ s ∩ t ⊆ u :=
inf_eq_inf_iff_left
#align finset.inter_eq_inter_iff_left Finset.inter_eq_inter_iff_left
-/
#print Finset.inter_eq_inter_iff_right /-
theorem inter_eq_inter_iff_right : s ∩ u = t ∩ u ↔ t ∩ u ⊆ s ∧ s ∩ u ⊆ t :=
inf_eq_inf_iff_right
#align finset.inter_eq_inter_iff_right Finset.inter_eq_inter_iff_right
-/
#print Finset.ite_subset_union /-
theorem ite_subset_union (s s' : Finset α) (P : Prop) [Decidable P] : ite P s s' ⊆ s ∪ s' :=
ite_le_sup s s' P
#align finset.ite_subset_union Finset.ite_subset_union
-/
#print Finset.inter_subset_ite /-
theorem inter_subset_ite (s s' : Finset α) (P : Prop) [Decidable P] : s ∩ s' ⊆ ite P s s' :=
inf_le_ite s s' P
#align finset.inter_subset_ite Finset.inter_subset_ite
-/
/- warning: finset.not_disjoint_iff_nonempty_inter -> Finset.not_disjoint_iff_nonempty_inter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Not (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)) (Finset.Nonempty.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Not (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)) (Finset.Nonempty.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t))
Case conversion may be inaccurate. Consider using '#align finset.not_disjoint_iff_nonempty_inter Finset.not_disjoint_iff_nonempty_interₓ'. -/
theorem not_disjoint_iff_nonempty_inter : ¬Disjoint s t ↔ (s ∩ t).Nonempty :=
not_disjoint_iff.trans <| by simp [Finset.Nonempty]
#align finset.not_disjoint_iff_nonempty_inter Finset.not_disjoint_iff_nonempty_inter
/- warning: finset.nonempty.not_disjoint -> Finset.Nonempty.not_disjoint is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, (Finset.Nonempty.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) -> (Not (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, (Finset.Nonempty.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) -> (Not (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t))
Case conversion may be inaccurate. Consider using '#align finset.nonempty.not_disjoint Finset.Nonempty.not_disjointₓ'. -/
alias not_disjoint_iff_nonempty_inter ↔ _ nonempty.not_disjoint
#align finset.nonempty.not_disjoint Finset.Nonempty.not_disjoint
/- warning: finset.disjoint_or_nonempty_inter -> Finset.disjoint_or_nonempty_inter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α) (t : Finset.{u1} α), Or (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) (Finset.Nonempty.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α) (t : Finset.{u1} α), Or (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) (Finset.Nonempty.{u1} α (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_or_nonempty_inter Finset.disjoint_or_nonempty_interₓ'. -/
theorem disjoint_or_nonempty_inter (s t : Finset α) : Disjoint s t ∨ (s ∩ t).Nonempty :=
by
rw [← not_disjoint_iff_nonempty_inter]
exact em _
#align finset.disjoint_or_nonempty_inter Finset.disjoint_or_nonempty_inter
end Lattice
/-! ### erase -/
section Erase
variable [DecidableEq α] {s t u v : Finset α} {a b : α}
#print Finset.erase /-
/-- `erase s a` is the set `s - {a}`, that is, the elements of `s` which are
not equal to `a`. -/
def erase (s : Finset α) (a : α) : Finset α :=
⟨_, s.2.eraseₓ a⟩
#align finset.erase Finset.erase
-/
#print Finset.erase_val /-
@[simp]
theorem erase_val (s : Finset α) (a : α) : (erase s a).1 = s.1.eraseₓ a :=
rfl
#align finset.erase_val Finset.erase_val
-/
#print Finset.mem_erase /-
@[simp]
theorem mem_erase {a b : α} {s : Finset α} : a ∈ erase s b ↔ a ≠ b ∧ a ∈ s :=
s.2.mem_erase_iff
#align finset.mem_erase Finset.mem_erase
-/
#print Finset.not_mem_erase /-
theorem not_mem_erase (a : α) (s : Finset α) : a ∉ erase s a :=
s.2.not_mem_erase
#align finset.not_mem_erase Finset.not_mem_erase
-/
#print Finset.erase_empty /-
-- While this can be solved by `simp`, this lemma is eligible for `dsimp`
@[nolint simp_nf, simp]
theorem erase_empty (a : α) : erase ∅ a = ∅ :=
rfl
#align finset.erase_empty Finset.erase_empty
-/
#print Finset.erase_singleton /-
@[simp]
theorem erase_singleton (a : α) : ({a} : Finset α).eraseₓ a = ∅ :=
by
ext x
rw [mem_erase, mem_singleton, not_and_self_iff]
rfl
#align finset.erase_singleton Finset.erase_singleton
-/
#print Finset.ne_of_mem_erase /-
theorem ne_of_mem_erase : b ∈ erase s a → b ≠ a := fun h => (mem_erase.1 h).1
#align finset.ne_of_mem_erase Finset.ne_of_mem_erase
-/
#print Finset.mem_of_mem_erase /-
theorem mem_of_mem_erase : b ∈ erase s a → b ∈ s :=
mem_of_mem_erase
#align finset.mem_of_mem_erase Finset.mem_of_mem_erase
-/
#print Finset.mem_erase_of_ne_of_mem /-
theorem mem_erase_of_ne_of_mem : a ≠ b → a ∈ s → a ∈ erase s b := by
simp only [mem_erase] <;> exact And.intro
#align finset.mem_erase_of_ne_of_mem Finset.mem_erase_of_ne_of_mem
-/
#print Finset.eq_of_mem_of_not_mem_erase /-
/-- An element of `s` that is not an element of `erase s a` must be
`a`. -/
theorem eq_of_mem_of_not_mem_erase (hs : b ∈ s) (hsa : b ∉ s.eraseₓ a) : b = a :=
by
rw [mem_erase, not_and] at hsa
exact not_imp_not.mp hsa hs
#align finset.eq_of_mem_of_not_mem_erase Finset.eq_of_mem_of_not_mem_erase
-/
#print Finset.erase_eq_of_not_mem /-
@[simp]
theorem erase_eq_of_not_mem {a : α} {s : Finset α} (h : a ∉ s) : erase s a = s :=
eq_of_veq <| erase_of_not_mem h
#align finset.erase_eq_of_not_mem Finset.erase_eq_of_not_mem
-/
#print Finset.erase_eq_self /-
@[simp]
theorem erase_eq_self : s.eraseₓ a = s ↔ a ∉ s :=
⟨fun h => h ▸ not_mem_erase _ _, erase_eq_of_not_mem⟩
#align finset.erase_eq_self Finset.erase_eq_self
-/
#print Finset.erase_insert_eq_erase /-
@[simp]
theorem erase_insert_eq_erase (s : Finset α) (a : α) : (insert a s).eraseₓ a = s.eraseₓ a :=
ext fun x => by
simp (config := { contextual := true }) only [mem_erase, mem_insert, and_congr_right_iff,
false_or_iff, iff_self_iff, imp_true_iff]
#align finset.erase_insert_eq_erase Finset.erase_insert_eq_erase
-/
#print Finset.erase_insert /-
theorem erase_insert {a : α} {s : Finset α} (h : a ∉ s) : erase (insert a s) a = s := by
rw [erase_insert_eq_erase, erase_eq_of_not_mem h]
#align finset.erase_insert Finset.erase_insert
-/
#print Finset.erase_insert_of_ne /-
theorem erase_insert_of_ne {a b : α} {s : Finset α} (h : a ≠ b) :
erase (insert a s) b = insert a (erase s b) :=
ext fun x =>
by
have : x ≠ b ∧ x = a ↔ x = a := and_iff_right_of_imp fun hx => hx.symm ▸ h
simp only [mem_erase, mem_insert, and_or_left, this]
#align finset.erase_insert_of_ne Finset.erase_insert_of_ne
-/
#print Finset.erase_cons_of_ne /-
theorem erase_cons_of_ne {a b : α} {s : Finset α} (ha : a ∉ s) (hb : a ≠ b) :
erase (cons a s ha) b = cons a (erase s b) fun h => ha <| erase_subset _ _ h := by
simp only [cons_eq_insert, erase_insert_of_ne hb]
#align finset.erase_cons_of_ne Finset.erase_cons_of_ne
-/
#print Finset.insert_erase /-
theorem insert_erase {a : α} {s : Finset α} (h : a ∈ s) : insert a (erase s a) = s :=
ext fun x => by
simp only [mem_insert, mem_erase, or_and_left, dec_em, true_and_iff] <;>
apply or_iff_right_of_imp <;>
rintro rfl <;>
exact h
#align finset.insert_erase Finset.insert_erase
-/
#print Finset.erase_subset_erase /-
theorem erase_subset_erase (a : α) {s t : Finset α} (h : s ⊆ t) : erase s a ⊆ erase t a :=
val_le_iff.1 <| erase_le_erase _ <| val_le_iff.2 h
#align finset.erase_subset_erase Finset.erase_subset_erase
-/
#print Finset.erase_subset /-
theorem erase_subset (a : α) (s : Finset α) : erase s a ⊆ s :=
erase_subset _ _
#align finset.erase_subset Finset.erase_subset
-/
#print Finset.subset_erase /-
theorem subset_erase {a : α} {s t : Finset α} : s ⊆ t.eraseₓ a ↔ s ⊆ t ∧ a ∉ s :=
⟨fun h => ⟨h.trans (erase_subset _ _), fun ha => not_mem_erase _ _ (h ha)⟩, fun h b hb =>
mem_erase.2 ⟨ne_of_mem_of_not_mem hb h.2, h.1 hb⟩⟩
#align finset.subset_erase Finset.subset_erase
-/
/- warning: finset.coe_erase -> Finset.coe_erase is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (a : α) (s : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (Finset.erase.{u1} α (fun (a : α) (b : α) => _inst_1 a b) s a)) (SDiff.sdiff.{u1} (Set.{u1} α) (BooleanAlgebra.toHasSdiff.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (Singleton.singleton.{u1, u1} α (Set.{u1} α) (Set.hasSingleton.{u1} α) a))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (a : α) (s : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) (Finset.toSet.{u1} α (Finset.erase.{u1} α (fun (a : α) (b : α) => _inst_1 a b) s a)) (SDiff.sdiff.{u1} (Set.{u1} α) (Set.instSDiffSet.{u1} α) (Finset.toSet.{u1} α s) (Singleton.singleton.{u1, u1} α (Set.{u1} α) (Set.instSingletonSet.{u1} α) a))
Case conversion may be inaccurate. Consider using '#align finset.coe_erase Finset.coe_eraseₓ'. -/
@[simp, norm_cast]
theorem coe_erase (a : α) (s : Finset α) : ↑(erase s a) = (s \ {a} : Set α) :=
Set.ext fun _ => mem_erase.trans <| by rw [and_comm', Set.mem_diff, Set.mem_singleton_iff] <;> rfl
#align finset.coe_erase Finset.coe_erase
#print Finset.erase_ssubset /-
theorem erase_ssubset {a : α} {s : Finset α} (h : a ∈ s) : s.eraseₓ a ⊂ s :=
calc
s.eraseₓ a ⊂ insert a (s.eraseₓ a) := ssubset_insert <| not_mem_erase _ _
_ = _ := insert_erase h
#align finset.erase_ssubset Finset.erase_ssubset
-/
/- warning: finset.ssubset_iff_exists_subset_erase -> Finset.ssubset_iff_exists_subset_erase is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.hasSsubset.{u1} α) s t) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t) => HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s (Finset.erase.{u1} α (fun (a : α) (b : α) => _inst_1 a b) t a))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.instHasSSubsetFinset.{u1} α) s t) (Exists.{succ u1} α (fun (a : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t) (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s (Finset.erase.{u1} α (fun (a : α) (b : α) => _inst_1 a b) t a))))
Case conversion may be inaccurate. Consider using '#align finset.ssubset_iff_exists_subset_erase Finset.ssubset_iff_exists_subset_eraseₓ'. -/
theorem ssubset_iff_exists_subset_erase {s t : Finset α} : s ⊂ t ↔ ∃ a ∈ t, s ⊆ t.eraseₓ a :=
by
refine' ⟨fun h => _, fun ⟨a, ha, h⟩ => ssubset_of_subset_of_ssubset h <| erase_ssubset ha⟩
obtain ⟨a, ht, hs⟩ := not_subset.1 h.2
exact ⟨a, ht, subset_erase.2 ⟨h.1, hs⟩⟩
#align finset.ssubset_iff_exists_subset_erase Finset.ssubset_iff_exists_subset_erase
#print Finset.erase_ssubset_insert /-
theorem erase_ssubset_insert (s : Finset α) (a : α) : s.eraseₓ a ⊂ insert a s :=
ssubset_iff_exists_subset_erase.2
⟨a, mem_insert_self _ _, erase_subset_erase _ <| subset_insert _ _⟩
#align finset.erase_ssubset_insert Finset.erase_ssubset_insert
-/
#print Finset.erase_ne_self /-
theorem erase_ne_self : s.eraseₓ a ≠ s ↔ a ∈ s :=
erase_eq_self.not_left
#align finset.erase_ne_self Finset.erase_ne_self
-/
#print Finset.erase_cons /-
theorem erase_cons {s : Finset α} {a : α} (h : a ∉ s) : (s.cons a h).eraseₓ a = s := by
rw [cons_eq_insert, erase_insert_eq_erase, erase_eq_of_not_mem h]
#align finset.erase_cons Finset.erase_cons
-/
#print Finset.erase_idem /-
theorem erase_idem {a : α} {s : Finset α} : erase (erase s a) a = erase s a := by simp
#align finset.erase_idem Finset.erase_idem
-/
#print Finset.erase_right_comm /-
theorem erase_right_comm {a b : α} {s : Finset α} : erase (erase s a) b = erase (erase s b) a :=
by
ext x
simp only [mem_erase, ← and_assoc']
rw [and_comm' (x ≠ a)]
#align finset.erase_right_comm Finset.erase_right_comm
-/
#print Finset.subset_insert_iff /-
theorem subset_insert_iff {a : α} {s t : Finset α} : s ⊆ insert a t ↔ erase s a ⊆ t := by
simp only [subset_iff, or_iff_not_imp_left, mem_erase, mem_insert, and_imp] <;>
exact forall_congr' fun x => forall_swap
#align finset.subset_insert_iff Finset.subset_insert_iff
-/
#print Finset.erase_insert_subset /-
theorem erase_insert_subset (a : α) (s : Finset α) : erase (insert a s) a ⊆ s :=
subset_insert_iff.1 <| Subset.rfl
#align finset.erase_insert_subset Finset.erase_insert_subset
-/
#print Finset.insert_erase_subset /-
theorem insert_erase_subset (a : α) (s : Finset α) : s ⊆ insert a (erase s a) :=
subset_insert_iff.2 <| Subset.rfl
#align finset.insert_erase_subset Finset.insert_erase_subset
-/
#print Finset.subset_insert_iff_of_not_mem /-
theorem subset_insert_iff_of_not_mem (h : a ∉ s) : s ⊆ insert a t ↔ s ⊆ t := by
rw [subset_insert_iff, erase_eq_of_not_mem h]
#align finset.subset_insert_iff_of_not_mem Finset.subset_insert_iff_of_not_mem
-/
#print Finset.erase_subset_iff_of_mem /-
theorem erase_subset_iff_of_mem (h : a ∈ t) : s.eraseₓ a ⊆ t ↔ s ⊆ t := by
rw [← subset_insert_iff, insert_eq_of_mem h]
#align finset.erase_subset_iff_of_mem Finset.erase_subset_iff_of_mem
-/
#print Finset.erase_inj /-
theorem erase_inj {x y : α} (s : Finset α) (hx : x ∈ s) : s.eraseₓ x = s.eraseₓ y ↔ x = y :=
by
refine' ⟨fun h => _, congr_arg _⟩
rw [eq_of_mem_of_not_mem_erase hx]
rw [← h]
simp
#align finset.erase_inj Finset.erase_inj
-/
#print Finset.erase_injOn /-
theorem erase_injOn (s : Finset α) : Set.InjOn s.eraseₓ s := fun _ _ _ _ => (erase_inj s ‹_›).mp
#align finset.erase_inj_on Finset.erase_injOn
-/
#print Finset.erase_injOn' /-
theorem erase_injOn' (a : α) : { s : Finset α | a ∈ s }.InjOn fun s => erase s a :=
fun s hs t ht (h : s.eraseₓ a = _) => by rw [← insert_erase hs, ← insert_erase ht, h]
#align finset.erase_inj_on' Finset.erase_injOn'
-/
end Erase
/-! ### sdiff -/
section Sdiff
variable [DecidableEq α] {s t u v : Finset α} {a b : α}
/-- `s \ t` is the set consisting of the elements of `s` that are not in `t`. -/
instance : SDiff (Finset α) :=
⟨fun s₁ s₂ => ⟨s₁.1 - s₂.1, nodup_of_le tsub_le_self s₁.2⟩⟩
#print Finset.sdiff_val /-
@[simp]
theorem sdiff_val (s₁ s₂ : Finset α) : (s₁ \ s₂).val = s₁.val - s₂.val :=
rfl
#align finset.sdiff_val Finset.sdiff_val
-/
#print Finset.mem_sdiff /-
@[simp]
theorem mem_sdiff : a ∈ s \ t ↔ a ∈ s ∧ a ∉ t :=
mem_sub_of_nodup s.2
#align finset.mem_sdiff Finset.mem_sdiff
-/
#print Finset.inter_sdiff_self /-
@[simp]
theorem inter_sdiff_self (s₁ s₂ : Finset α) : s₁ ∩ (s₂ \ s₁) = ∅ :=
eq_empty_of_forall_not_mem <| by
simp only [mem_inter, mem_sdiff] <;> rintro x ⟨h, _, hn⟩ <;> exact hn h
#align finset.inter_sdiff_self Finset.inter_sdiff_self
-/
instance : GeneralizedBooleanAlgebra (Finset α) :=
{ Finset.hasSdiff, Finset.distribLattice,
Finset.orderBot with
sup_inf_sdiff := fun x y =>
by
simp only [ext_iff, mem_union, mem_sdiff, inf_eq_inter, sup_eq_union, mem_inter]
tauto
inf_inf_sdiff := fun x y =>
by
simp only [ext_iff, inter_sdiff_self, inter_empty, inter_assoc, false_iff_iff, inf_eq_inter,
not_mem_empty]
tauto }
#print Finset.not_mem_sdiff_of_mem_right /-
theorem not_mem_sdiff_of_mem_right (h : a ∈ t) : a ∉ s \ t := by
simp only [mem_sdiff, h, not_true, not_false_iff, and_false_iff]
#align finset.not_mem_sdiff_of_mem_right Finset.not_mem_sdiff_of_mem_right
-/
#print Finset.not_mem_sdiff_of_not_mem_left /-
theorem not_mem_sdiff_of_not_mem_left (h : a ∉ s) : a ∉ s \ t := by simpa
#align finset.not_mem_sdiff_of_not_mem_left Finset.not_mem_sdiff_of_not_mem_left
-/
#print Finset.union_sdiff_of_subset /-
theorem union_sdiff_of_subset (h : s ⊆ t) : s ∪ t \ s = t :=
sup_sdiff_cancel_right h
#align finset.union_sdiff_of_subset Finset.union_sdiff_of_subset
-/
#print Finset.sdiff_union_of_subset /-
theorem sdiff_union_of_subset {s₁ s₂ : Finset α} (h : s₁ ⊆ s₂) : s₂ \ s₁ ∪ s₁ = s₂ :=
(union_comm _ _).trans (union_sdiff_of_subset h)
#align finset.sdiff_union_of_subset Finset.sdiff_union_of_subset
-/
#print Finset.inter_sdiff /-
theorem inter_sdiff (s t u : Finset α) : s ∩ (t \ u) = (s ∩ t) \ u :=
by
ext x
simp [and_assoc']
#align finset.inter_sdiff Finset.inter_sdiff
-/
#print Finset.sdiff_inter_self /-
@[simp]
theorem sdiff_inter_self (s₁ s₂ : Finset α) : s₂ \ s₁ ∩ s₁ = ∅ :=
inf_sdiff_self_left
#align finset.sdiff_inter_self Finset.sdiff_inter_self
-/
#print Finset.sdiff_self /-
@[simp]
theorem sdiff_self (s₁ : Finset α) : s₁ \ s₁ = ∅ :=
sdiff_self
#align finset.sdiff_self Finset.sdiff_self
-/
#print Finset.sdiff_inter_distrib_right /-
theorem sdiff_inter_distrib_right (s t u : Finset α) : s \ (t ∩ u) = s \ t ∪ s \ u :=
sdiff_inf
#align finset.sdiff_inter_distrib_right Finset.sdiff_inter_distrib_right
-/
#print Finset.sdiff_inter_self_left /-
@[simp]
theorem sdiff_inter_self_left (s t : Finset α) : s \ (s ∩ t) = s \ t :=
sdiff_inf_self_left _ _
#align finset.sdiff_inter_self_left Finset.sdiff_inter_self_left
-/
#print Finset.sdiff_inter_self_right /-
@[simp]
theorem sdiff_inter_self_right (s t : Finset α) : s \ (t ∩ s) = s \ t :=
sdiff_inf_self_right _ _
#align finset.sdiff_inter_self_right Finset.sdiff_inter_self_right
-/
#print Finset.sdiff_empty /-
@[simp]
theorem sdiff_empty : s \ ∅ = s :=
sdiff_bot
#align finset.sdiff_empty Finset.sdiff_empty
-/
#print Finset.sdiff_subset_sdiff /-
@[mono]
theorem sdiff_subset_sdiff (hst : s ⊆ t) (hvu : v ⊆ u) : s \ u ⊆ t \ v :=
sdiff_le_sdiff ‹s ≤ t› ‹v ≤ u›
#align finset.sdiff_subset_sdiff Finset.sdiff_subset_sdiff
-/
/- warning: finset.coe_sdiff -> Finset.coe_sdiff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s₁ : Finset.{u1} α) (s₂ : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s₁ s₂)) (SDiff.sdiff.{u1} (Set.{u1} α) (BooleanAlgebra.toHasSdiff.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₁) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₂))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s₁ : Finset.{u1} α) (s₂ : Finset.{u1} α), Eq.{succ u1} (Set.{u1} α) (Finset.toSet.{u1} α (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s₁ s₂)) (SDiff.sdiff.{u1} (Set.{u1} α) (Set.instSDiffSet.{u1} α) (Finset.toSet.{u1} α s₁) (Finset.toSet.{u1} α s₂))
Case conversion may be inaccurate. Consider using '#align finset.coe_sdiff Finset.coe_sdiffₓ'. -/
@[simp, norm_cast]
theorem coe_sdiff (s₁ s₂ : Finset α) : ↑(s₁ \ s₂) = (s₁ \ s₂ : Set α) :=
Set.ext fun _ => mem_sdiff
#align finset.coe_sdiff Finset.coe_sdiff
#print Finset.union_sdiff_self_eq_union /-
@[simp]
theorem union_sdiff_self_eq_union : s ∪ t \ s = s ∪ t :=
sup_sdiff_self_right _ _
#align finset.union_sdiff_self_eq_union Finset.union_sdiff_self_eq_union
-/
#print Finset.sdiff_union_self_eq_union /-
@[simp]
theorem sdiff_union_self_eq_union : s \ t ∪ t = s ∪ t :=
sup_sdiff_self_left _ _
#align finset.sdiff_union_self_eq_union Finset.sdiff_union_self_eq_union
-/
#print Finset.union_sdiff_left /-
theorem union_sdiff_left (s t : Finset α) : (s ∪ t) \ s = t \ s :=
sup_sdiff_left_self
#align finset.union_sdiff_left Finset.union_sdiff_left
-/
#print Finset.union_sdiff_right /-
theorem union_sdiff_right (s t : Finset α) : (s ∪ t) \ t = s \ t :=
sup_sdiff_right_self
#align finset.union_sdiff_right Finset.union_sdiff_right
-/
#print Finset.union_sdiff_symm /-
theorem union_sdiff_symm : s ∪ t \ s = t ∪ s \ t := by simp [union_comm]
#align finset.union_sdiff_symm Finset.union_sdiff_symm
-/
#print Finset.sdiff_union_inter /-
theorem sdiff_union_inter (s t : Finset α) : s \ t ∪ s ∩ t = s :=
sup_sdiff_inf _ _
#align finset.sdiff_union_inter Finset.sdiff_union_inter
-/
#print Finset.sdiff_idem /-
@[simp]
theorem sdiff_idem (s t : Finset α) : (s \ t) \ t = s \ t :=
sdiff_idem
#align finset.sdiff_idem Finset.sdiff_idem
-/
/- warning: finset.subset_sdiff -> Finset.subset_sdiff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, Iff (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t u)) (And (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s t) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s u))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {u : Finset.{u1} α}, Iff (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t u)) (And (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.instHasSubsetFinset.{u1} α) s t) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s u))
Case conversion may be inaccurate. Consider using '#align finset.subset_sdiff Finset.subset_sdiffₓ'. -/
theorem subset_sdiff : s ⊆ t \ u ↔ s ⊆ t ∧ Disjoint s u :=
le_iff_subset.symm.trans le_sdiff
#align finset.subset_sdiff Finset.subset_sdiff
#print Finset.sdiff_eq_empty_iff_subset /-
@[simp]
theorem sdiff_eq_empty_iff_subset : s \ t = ∅ ↔ s ⊆ t :=
sdiff_eq_bot_iff
#align finset.sdiff_eq_empty_iff_subset Finset.sdiff_eq_empty_iff_subset
-/
#print Finset.sdiff_nonempty /-
theorem sdiff_nonempty : (s \ t).Nonempty ↔ ¬s ⊆ t :=
nonempty_iff_ne_empty.trans sdiff_eq_empty_iff_subset.Not
#align finset.sdiff_nonempty Finset.sdiff_nonempty
-/
#print Finset.empty_sdiff /-
@[simp]
theorem empty_sdiff (s : Finset α) : ∅ \ s = ∅ :=
bot_sdiff
#align finset.empty_sdiff Finset.empty_sdiff
-/
#print Finset.insert_sdiff_of_not_mem /-
theorem insert_sdiff_of_not_mem (s : Finset α) {t : Finset α} {x : α} (h : x ∉ t) :
insert x s \ t = insert x (s \ t) :=
by
rw [← coe_inj, coe_insert, coe_sdiff, coe_sdiff, coe_insert]
exact Set.insert_diff_of_not_mem s h
#align finset.insert_sdiff_of_not_mem Finset.insert_sdiff_of_not_mem
-/
#print Finset.insert_sdiff_of_mem /-
theorem insert_sdiff_of_mem (s : Finset α) {x : α} (h : x ∈ t) : insert x s \ t = s \ t :=
by
rw [← coe_inj, coe_sdiff, coe_sdiff, coe_insert]
exact Set.insert_diff_of_mem s h
#align finset.insert_sdiff_of_mem Finset.insert_sdiff_of_mem
-/
#print Finset.insert_sdiff_insert /-
@[simp]
theorem insert_sdiff_insert (s t : Finset α) (x : α) : insert x s \ insert x t = s \ insert x t :=
insert_sdiff_of_mem _ (mem_insert_self _ _)
#align finset.insert_sdiff_insert Finset.insert_sdiff_insert
-/
#print Finset.sdiff_insert_of_not_mem /-
theorem sdiff_insert_of_not_mem {x : α} (h : x ∉ s) (t : Finset α) : s \ insert x t = s \ t :=
by
refine' subset.antisymm (sdiff_subset_sdiff (subset.refl _) (subset_insert _ _)) fun y hy => _
simp only [mem_sdiff, mem_insert, not_or] at hy⊢
exact ⟨hy.1, fun hxy => h <| hxy ▸ hy.1, hy.2⟩
#align finset.sdiff_insert_of_not_mem Finset.sdiff_insert_of_not_mem
-/
#print Finset.sdiff_subset /-
@[simp]
theorem sdiff_subset (s t : Finset α) : s \ t ⊆ s :=
show s \ t ≤ s from sdiff_le
#align finset.sdiff_subset Finset.sdiff_subset
-/
#print Finset.sdiff_ssubset /-
theorem sdiff_ssubset (h : t ⊆ s) (ht : t.Nonempty) : s \ t ⊂ s :=
sdiff_lt ‹t ≤ s› ht.ne_empty
#align finset.sdiff_ssubset Finset.sdiff_ssubset
-/
#print Finset.union_sdiff_distrib /-
theorem union_sdiff_distrib (s₁ s₂ t : Finset α) : (s₁ ∪ s₂) \ t = s₁ \ t ∪ s₂ \ t :=
sup_sdiff
#align finset.union_sdiff_distrib Finset.union_sdiff_distrib
-/
#print Finset.sdiff_union_distrib /-
theorem sdiff_union_distrib (s t₁ t₂ : Finset α) : s \ (t₁ ∪ t₂) = s \ t₁ ∩ (s \ t₂) :=
sdiff_sup
#align finset.sdiff_union_distrib Finset.sdiff_union_distrib
-/
#print Finset.union_sdiff_self /-
theorem union_sdiff_self (s t : Finset α) : (s ∪ t) \ t = s \ t :=
sup_sdiff_right_self
#align finset.union_sdiff_self Finset.union_sdiff_self
-/
#print Finset.sdiff_singleton_eq_erase /-
theorem sdiff_singleton_eq_erase (a : α) (s : Finset α) : s \ singleton a = erase s a :=
by
ext
rw [mem_erase, mem_sdiff, mem_singleton]
tauto
#align finset.sdiff_singleton_eq_erase Finset.sdiff_singleton_eq_erase
-/
#print Finset.sdiff_singleton_not_mem_eq_self /-
@[simp]
theorem sdiff_singleton_not_mem_eq_self (s : Finset α) {a : α} (ha : a ∉ s) : s \ {a} = s := by
simp only [sdiff_singleton_eq_erase, ha, erase_eq_of_not_mem, not_false_iff]
#align finset.sdiff_singleton_not_mem_eq_self Finset.sdiff_singleton_not_mem_eq_self
-/
#print Finset.sdiff_sdiff_left' /-
theorem sdiff_sdiff_left' (s t u : Finset α) : (s \ t) \ u = s \ t ∩ (s \ u) :=
sdiff_sdiff_left'
#align finset.sdiff_sdiff_left' Finset.sdiff_sdiff_left'
-/
#print Finset.sdiff_insert /-
theorem sdiff_insert (s t : Finset α) (x : α) : s \ insert x t = (s \ t).eraseₓ x := by
simp_rw [← sdiff_singleton_eq_erase, insert_eq, sdiff_sdiff_left', sdiff_union_distrib,
inter_comm]
#align finset.sdiff_insert Finset.sdiff_insert
-/
#print Finset.sdiff_insert_insert_of_mem_of_not_mem /-
theorem sdiff_insert_insert_of_mem_of_not_mem {s t : Finset α} {x : α} (hxs : x ∈ s) (hxt : x ∉ t) :
insert x (s \ insert x t) = s \ t := by
rw [sdiff_insert, insert_erase (mem_sdiff.mpr ⟨hxs, hxt⟩)]
#align finset.sdiff_insert_insert_of_mem_of_not_mem Finset.sdiff_insert_insert_of_mem_of_not_mem
-/
#print Finset.sdiff_erase /-
theorem sdiff_erase {x : α} (hx : x ∈ s) : s \ s.eraseₓ x = {x} :=
by
rw [← sdiff_singleton_eq_erase, sdiff_sdiff_right_self]
exact inf_eq_right.2 (singleton_subset_iff.2 hx)
#align finset.sdiff_erase Finset.sdiff_erase
-/
#print Finset.sdiff_sdiff_self_left /-
theorem sdiff_sdiff_self_left (s t : Finset α) : s \ (s \ t) = s ∩ t :=
sdiff_sdiff_right_self
#align finset.sdiff_sdiff_self_left Finset.sdiff_sdiff_self_left
-/
#print Finset.sdiff_sdiff_eq_self /-
theorem sdiff_sdiff_eq_self (h : t ⊆ s) : s \ (s \ t) = t :=
sdiff_sdiff_eq_self h
#align finset.sdiff_sdiff_eq_self Finset.sdiff_sdiff_eq_self
-/
#print Finset.sdiff_eq_sdiff_iff_inter_eq_inter /-
theorem sdiff_eq_sdiff_iff_inter_eq_inter {s t₁ t₂ : Finset α} :
s \ t₁ = s \ t₂ ↔ s ∩ t₁ = s ∩ t₂ :=
sdiff_eq_sdiff_iff_inf_eq_inf
#align finset.sdiff_eq_sdiff_iff_inter_eq_inter Finset.sdiff_eq_sdiff_iff_inter_eq_inter
-/
#print Finset.union_eq_sdiff_union_sdiff_union_inter /-
theorem union_eq_sdiff_union_sdiff_union_inter (s t : Finset α) : s ∪ t = s \ t ∪ t \ s ∪ s ∩ t :=
sup_eq_sdiff_sup_sdiff_sup_inf
#align finset.union_eq_sdiff_union_sdiff_union_inter Finset.union_eq_sdiff_union_sdiff_union_inter
-/
#print Finset.erase_eq_empty_iff /-
theorem erase_eq_empty_iff (s : Finset α) (a : α) : s.eraseₓ a = ∅ ↔ s = ∅ ∨ s = {a} := by
rw [← sdiff_singleton_eq_erase, sdiff_eq_empty_iff_subset, subset_singleton_iff]
#align finset.erase_eq_empty_iff Finset.erase_eq_empty_iff
-/
/- warning: finset.sdiff_disjoint -> Finset.sdiff_disjoint is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t s) s
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t s) s
Case conversion may be inaccurate. Consider using '#align finset.sdiff_disjoint Finset.sdiff_disjointₓ'. -/
--TODO@Yaël: Kill lemmas duplicate with `boolean_algebra`
theorem sdiff_disjoint : Disjoint (t \ s) s :=
disjoint_left.2 fun a ha => (mem_sdiff.1 ha).2
#align finset.sdiff_disjoint Finset.sdiff_disjoint
/- warning: finset.disjoint_sdiff -> Finset.disjoint_sdiff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) t s)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_sdiff Finset.disjoint_sdiffₓ'. -/
theorem disjoint_sdiff : Disjoint s (t \ s) :=
sdiff_disjoint.symm
#align finset.disjoint_sdiff Finset.disjoint_sdiff
/- warning: finset.disjoint_sdiff_inter -> Finset.disjoint_sdiff_inter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α) (t : Finset.{u1} α), Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{u1} α) (t : Finset.{u1} α), Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_sdiff_inter Finset.disjoint_sdiff_interₓ'. -/
theorem disjoint_sdiff_inter (s t : Finset α) : Disjoint (s \ t) (s ∩ t) :=
disjoint_of_subset_right (inter_subset_right _ _) sdiff_disjoint
#align finset.disjoint_sdiff_inter Finset.disjoint_sdiff_inter
/- warning: finset.sdiff_eq_self_iff_disjoint -> Finset.sdiff_eq_self_iff_disjoint is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Eq.{succ u1} (Finset.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) s) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Iff (Eq.{succ u1} (Finset.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) s) (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t)
Case conversion may be inaccurate. Consider using '#align finset.sdiff_eq_self_iff_disjoint Finset.sdiff_eq_self_iff_disjointₓ'. -/
theorem sdiff_eq_self_iff_disjoint : s \ t = s ↔ Disjoint s t :=
sdiff_eq_self_iff_disjoint'
#align finset.sdiff_eq_self_iff_disjoint Finset.sdiff_eq_self_iff_disjoint
/- warning: finset.sdiff_eq_self_of_disjoint -> Finset.sdiff_eq_self_of_disjoint is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) -> (Eq.{succ u1} (Finset.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) -> (Eq.{succ u1} (Finset.{u1} α) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t) s)
Case conversion may be inaccurate. Consider using '#align finset.sdiff_eq_self_of_disjoint Finset.sdiff_eq_self_of_disjointₓ'. -/
theorem sdiff_eq_self_of_disjoint (h : Disjoint s t) : s \ t = s :=
sdiff_eq_self_iff_disjoint.2 h
#align finset.sdiff_eq_self_of_disjoint Finset.sdiff_eq_self_of_disjoint
end Sdiff
/-! ### Symmetric difference -/
section symmDiff
variable [DecidableEq α] {s t : Finset α} {a b : α}
/- warning: finset.mem_symm_diff -> Finset.mem_symmDiff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α}, Iff (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a (symmDiff.{u1} (Finset.{u1} α) (SemilatticeSup.toHasSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.lattice.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) (Or (And (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t))) (And (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a t) (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s))))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α} {a : α}, Iff (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a (symmDiff.{u1} (Finset.{u1} α) (SemilatticeSup.toSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.instLatticeFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) (Or (And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t))) (And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a t) (Not (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s))))
Case conversion may be inaccurate. Consider using '#align finset.mem_symm_diff Finset.mem_symmDiffₓ'. -/
theorem mem_symmDiff : a ∈ s ∆ t ↔ a ∈ s ∧ a ∉ t ∨ a ∈ t ∧ a ∉ s := by
simp_rw [symmDiff, sup_eq_union, mem_union, mem_sdiff]
#align finset.mem_symm_diff Finset.mem_symmDiff
/- warning: finset.coe_symm_diff -> Finset.coe_symmDiff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Eq.{succ u1} (Set.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (symmDiff.{u1} (Finset.{u1} α) (SemilatticeSup.toHasSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.lattice.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) (symmDiff.{u1} (Set.{u1} α) (SemilatticeSup.toHasSup.{u1} (Set.{u1} α) (Lattice.toSemilatticeSup.{u1} (Set.{u1} α) (CompleteLattice.toLattice.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.completeBooleanAlgebra.{u1} α))))))) (BooleanAlgebra.toHasSdiff.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α)) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) t))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u1} α}, Eq.{succ u1} (Set.{u1} α) (Finset.toSet.{u1} α (symmDiff.{u1} (Finset.{u1} α) (SemilatticeSup.toSup.{u1} (Finset.{u1} α) (Lattice.toSemilatticeSup.{u1} (Finset.{u1} α) (Finset.instLatticeFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)))) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) s t)) (symmDiff.{u1} (Set.{u1} α) (SemilatticeSup.toSup.{u1} (Set.{u1} α) (Lattice.toSemilatticeSup.{u1} (Set.{u1} α) (CompleteLattice.toLattice.{u1} (Set.{u1} α) (Order.Coframe.toCompleteLattice.{u1} (Set.{u1} α) (CompleteDistribLattice.toCoframe.{u1} (Set.{u1} α) (CompleteBooleanAlgebra.toCompleteDistribLattice.{u1} (Set.{u1} α) (Set.instCompleteBooleanAlgebraSet.{u1} α))))))) (Set.instSDiffSet.{u1} α) (Finset.toSet.{u1} α s) (Finset.toSet.{u1} α t))
Case conversion may be inaccurate. Consider using '#align finset.coe_symm_diff Finset.coe_symmDiffₓ'. -/
@[simp, norm_cast]
theorem coe_symmDiff : (↑(s ∆ t) : Set α) = s ∆ t :=
Set.ext fun _ => mem_symmDiff
#align finset.coe_symm_diff Finset.coe_symmDiff
end symmDiff
/-! ### attach -/
#print Finset.attach /-
/-- `attach s` takes the elements of `s` and forms a new set of elements of the subtype
`{x // x ∈ s}`. -/
def attach (s : Finset α) : Finset { x // x ∈ s } :=
⟨attach s.1, nodup_attach.2 s.2⟩
#align finset.attach Finset.attach
-/
#print Finset.sizeOf_lt_sizeOf_of_mem /-
theorem sizeOf_lt_sizeOf_of_mem [SizeOf α] {x : α} {s : Finset α} (hx : x ∈ s) :
SizeOf.sizeOf x < SizeOf.sizeOf s := by
cases s
dsimp [SizeOf.sizeOf, SizeOf.sizeOf, Finset.sizeof]
apply lt_add_left
exact Multiset.sizeOf_lt_sizeOf_of_mem hx
#align finset.sizeof_lt_sizeof_of_mem Finset.sizeOf_lt_sizeOf_of_mem
-/
#print Finset.attach_val /-
@[simp]
theorem attach_val (s : Finset α) : s.attach.1 = s.1.attach :=
rfl
#align finset.attach_val Finset.attach_val
-/
#print Finset.mem_attach /-
@[simp]
theorem mem_attach (s : Finset α) : ∀ x, x ∈ s.attach :=
mem_attach _
#align finset.mem_attach Finset.mem_attach
-/
#print Finset.attach_empty /-
@[simp]
theorem attach_empty : attach (∅ : Finset α) = ∅ :=
rfl
#align finset.attach_empty Finset.attach_empty
-/
#print Finset.attach_nonempty_iff /-
@[simp]
theorem attach_nonempty_iff (s : Finset α) : s.attach.Nonempty ↔ s.Nonempty := by
simp [Finset.Nonempty]
#align finset.attach_nonempty_iff Finset.attach_nonempty_iff
-/
#print Finset.attach_eq_empty_iff /-
@[simp]
theorem attach_eq_empty_iff (s : Finset α) : s.attach = ∅ ↔ s = ∅ := by
simpa [eq_empty_iff_forall_not_mem]
#align finset.attach_eq_empty_iff Finset.attach_eq_empty_iff
-/
/-! ### piecewise -/
section Piecewise
#print Finset.piecewise /-
/-- `s.piecewise f g` is the function equal to `f` on the finset `s`, and to `g` on its
complement. -/
def piecewise {α : Type _} {δ : α → Sort _} (s : Finset α) (f g : ∀ i, δ i)
[∀ j, Decidable (j ∈ s)] : ∀ i, δ i := fun i => if i ∈ s then f i else g i
#align finset.piecewise Finset.piecewise
-/
variable {δ : α → Sort _} (s : Finset α) (f g : ∀ i, δ i)
/- warning: finset.piecewise_insert_self -> Finset.piecewise_insert_self is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : DecidableEq.{succ u1} α] {j : α} [_inst_2 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) j s))], Eq.{u2} (δ j) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_1 a b)) j s) f g (fun (j : α) => _inst_2 j) j) (f j)
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : DecidableEq.{succ u2} α] {j : α} [_inst_2 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_1 a b)) j s))], Eq.{u1} (δ j) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_1 a b)) j s) f g (fun (j : α) => _inst_2 j) j) (f j)
Case conversion may be inaccurate. Consider using '#align finset.piecewise_insert_self Finset.piecewise_insert_selfₓ'. -/
@[simp]
theorem piecewise_insert_self [DecidableEq α] {j : α} [∀ i, Decidable (i ∈ insert j s)] :
(insert j s).piecewise f g j = f j := by simp [piecewise]
#align finset.piecewise_insert_self Finset.piecewise_insert_self
/- warning: finset.piecewise_empty -> Finset.piecewise_empty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)))], Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α)) f g (fun (j : α) => _inst_1 j)) g
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i (EmptyCollection.emptyCollection.{u2} (Finset.{u2} α) (Finset.instEmptyCollectionFinset.{u2} α)))], Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) (EmptyCollection.emptyCollection.{u2} (Finset.{u2} α) (Finset.instEmptyCollectionFinset.{u2} α)) f g (fun (j : α) => _inst_1 j)) g
Case conversion may be inaccurate. Consider using '#align finset.piecewise_empty Finset.piecewise_emptyₓ'. -/
@[simp]
theorem piecewise_empty [∀ i : α, Decidable (i ∈ (∅ : Finset α))] : piecewise ∅ f g = g :=
by
ext i
simp [piecewise]
#align finset.piecewise_empty Finset.piecewise_empty
variable [∀ j, Decidable (j ∈ s)]
/- warning: finset.piecewise_coe -> Finset.piecewise_coe is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] [_inst_2 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) j ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s))], Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Set.piecewise.{u1, u2} α (fun (i : α) => δ i) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) f g (fun (j : α) => _inst_2 j)) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] [_inst_2 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) j (Finset.toSet.{u2} α s))], Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Set.piecewise.{u2, u1} α (fun (i : α) => δ i) (Finset.toSet.{u2} α s) f g (fun (j : α) => _inst_2 j)) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_coe Finset.piecewise_coeₓ'. -/
-- TODO: fix this in norm_cast
@[norm_cast move]
theorem piecewise_coe [∀ j, Decidable (j ∈ (s : Set α))] :
(s : Set α).piecewise f g = s.piecewise f g :=
by
ext
congr
#align finset.piecewise_coe Finset.piecewise_coe
/- warning: finset.piecewise_eq_of_mem -> Finset.piecewise_eq_of_mem is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] {i : α}, (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s) -> (Eq.{u2} (δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j) i) (f i))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] {i : α}, (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s) -> (Eq.{u1} (δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j) i) (f i))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_eq_of_mem Finset.piecewise_eq_of_memₓ'. -/
@[simp]
theorem piecewise_eq_of_mem {i : α} (hi : i ∈ s) : s.piecewise f g i = f i := by
simp [piecewise, hi]
#align finset.piecewise_eq_of_mem Finset.piecewise_eq_of_mem
/- warning: finset.piecewise_eq_of_not_mem -> Finset.piecewise_eq_of_not_mem is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] {i : α}, (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s)) -> (Eq.{u2} (δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j) i) (g i))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] {i : α}, (Not (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s)) -> (Eq.{u1} (δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j) i) (g i))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_eq_of_not_mem Finset.piecewise_eq_of_not_memₓ'. -/
@[simp]
theorem piecewise_eq_of_not_mem {i : α} (hi : i ∉ s) : s.piecewise f g i = g i := by
simp [piecewise, hi]
#align finset.piecewise_eq_of_not_mem Finset.piecewise_eq_of_not_mem
/- warning: finset.piecewise_congr -> Finset.piecewise_congr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] {f : forall (i : α), δ i} {f' : forall (i : α), δ i} {g : forall (i : α), δ i} {g' : forall (i : α), δ i}, (forall (i : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s) -> (Eq.{u2} (δ i) (f i) (f' i))) -> (forall (i : α), (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s)) -> (Eq.{u2} (δ i) (g i) (g' i))) -> (Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f' g' (fun (j : α) => _inst_1 j)))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] {f : forall (i : α), δ i} {f' : forall (i : α), δ i} {g : forall (i : α), δ i} {g' : forall (i : α), δ i}, (forall (i : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s) -> (Eq.{u1} (δ i) (f i) (f' i))) -> (forall (i : α), (Not (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s)) -> (Eq.{u1} (δ i) (g i) (g' i))) -> (Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f' g' (fun (j : α) => _inst_1 j)))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_congr Finset.piecewise_congrₓ'. -/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (i «expr ∉ » s) -/
theorem piecewise_congr {f f' g g' : ∀ i, δ i} (hf : ∀ i ∈ s, f i = f' i)
(hg : ∀ (i) (_ : i ∉ s), g i = g' i) : s.piecewise f g = s.piecewise f' g' :=
funext fun i => if_ctx_congr Iff.rfl (hf i) (hg i)
#align finset.piecewise_congr Finset.piecewise_congr
/- warning: finset.piecewise_insert_of_ne -> Finset.piecewise_insert_of_ne is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] [_inst_2 : DecidableEq.{succ u1} α] {i : α} {j : α} [_inst_3 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_2 a b)) j s))], (Ne.{succ u1} α i j) -> (Eq.{u2} (δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_2 a b)) j s) f g (fun (j : α) => _inst_3 j) i) (Finset.piecewise.{u1, u2} α δ s f g (fun (j : α) => _inst_1 j) i))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] [_inst_2 : DecidableEq.{succ u2} α] {i : α} {j : α} [_inst_3 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_2 a b)) j s))], (Ne.{succ u2} α i j) -> (Eq.{u1} (δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_2 a b)) j s) f g (fun (j : α) => _inst_3 j) i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j) i))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_insert_of_ne Finset.piecewise_insert_of_neₓ'. -/
@[simp]
theorem piecewise_insert_of_ne [DecidableEq α] {i j : α} [∀ i, Decidable (i ∈ insert j s)]
(h : i ≠ j) : (insert j s).piecewise f g i = s.piecewise f g i := by simp [piecewise, h]
#align finset.piecewise_insert_of_ne Finset.piecewise_insert_of_ne
/- warning: finset.piecewise_insert -> Finset.piecewise_insert is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] [_inst_2 : DecidableEq.{succ u1} α] (j : α) [_inst_3 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_2 a b)) j s))], Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_2 a b)) j s) f g (fun (j : α) => _inst_3 j)) (Function.update.{succ u1, u2} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s f g (fun (j : α) => _inst_1 j)) j (f j))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] [_inst_2 : DecidableEq.{succ u2} α] (j : α) [_inst_3 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_2 a b)) j s))], Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_2 a b)) j s) f g (fun (j : α) => _inst_3 j)) (Function.update.{succ u2, u1} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s f g (fun (j : α) => _inst_1 j)) j (f j))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_insert Finset.piecewise_insertₓ'. -/
theorem piecewise_insert [DecidableEq α] (j : α) [∀ i, Decidable (i ∈ insert j s)] :
(insert j s).piecewise f g = update (s.piecewise f g) j (f j) := by
classical simp only [← piecewise_coe, coe_insert, ← Set.piecewise_insert]
#align finset.piecewise_insert Finset.piecewise_insert
/- warning: finset.piecewise_cases -> Finset.piecewise_cases is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] {i : α} (p : (δ i) -> Prop), (p (f i)) -> (p (g i)) -> (p (Finset.piecewise.{u1, u2} α δ s f g (fun (j : α) => _inst_1 j) i))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] {i : α} (p : (δ i) -> Prop), (p (f i)) -> (p (g i)) -> (p (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j) i))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_cases Finset.piecewise_casesₓ'. -/
theorem piecewise_cases {i} (p : δ i → Prop) (hf : p (f i)) (hg : p (g i)) :
p (s.piecewise f g i) := by by_cases hi : i ∈ s <;> simpa [hi]
#align finset.piecewise_cases Finset.piecewise_cases
#print Finset.piecewise_mem_set_pi /-
theorem piecewise_mem_set_pi {δ : α → Type _} {t : Set α} {t' : ∀ i, Set (δ i)} {f g}
(hf : f ∈ Set.pi t t') (hg : g ∈ Set.pi t t') : s.piecewise f g ∈ Set.pi t t' := by
classical
rw [← piecewise_coe]
exact Set.piecewise_mem_pi (↑s) hf hg
#align finset.piecewise_mem_set_pi Finset.piecewise_mem_set_pi
-/
/- warning: finset.piecewise_singleton -> Finset.piecewise_singleton is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_2 : DecidableEq.{succ u1} α] (i : α), Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) i) f g (fun (j : α) => Finset.decidableMem.{u1} α (fun (a : α) (b : α) => _inst_2 a b) j (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) i))) (Function.update.{succ u1, u2} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) g i (f i))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_2 : DecidableEq.{succ u2} α] (i : α), Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) (Singleton.singleton.{u2, u2} α (Finset.{u2} α) (Finset.instSingletonFinset.{u2} α) i) f g (fun (j : α) => Finset.decidableMem.{u2} α (fun (a : α) (b : α) => _inst_2 a b) j (Singleton.singleton.{u2, u2} α (Finset.{u2} α) (Finset.instSingletonFinset.{u2} α) i))) (Function.update.{succ u2, u1} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) g i (f i))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_singleton Finset.piecewise_singletonₓ'. -/
theorem piecewise_singleton [DecidableEq α] (i : α) : piecewise {i} f g = update g i (f i) := by
rw [← insert_emptyc_eq, piecewise_insert, piecewise_empty]
#align finset.piecewise_singleton Finset.piecewise_singleton
/- warning: finset.piecewise_piecewise_of_subset_left -> Finset.piecewise_piecewise_of_subset_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} {s : Finset.{u1} α} {t : Finset.{u1} α} [_inst_2 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s)] [_inst_3 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i t)], (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) s t) -> (forall (f₁ : forall (a : α), δ a) (f₂ : forall (a : α), δ a) (g : forall (a : α), δ a), Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) t f₁ f₂ (fun (j : α) => _inst_3 j)) g (fun (j : α) => _inst_2 j)) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f₁ g (fun (j : α) => _inst_2 j)))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} {s : Finset.{u2} α} {t : Finset.{u2} α} [_inst_2 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s)] [_inst_3 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i t)], (HasSubset.Subset.{u2} (Finset.{u2} α) (Finset.instHasSubsetFinset.{u2} α) s t) -> (forall (f₁ : forall (a : α), δ a) (f₂ : forall (a : α), δ a) (g : forall (a : α), δ a), Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) t f₁ f₂ (fun (j : α) => _inst_3 j)) g (fun (j : α) => _inst_2 j)) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f₁ g (fun (j : α) => _inst_2 j)))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_piecewise_of_subset_left Finset.piecewise_piecewise_of_subset_leftₓ'. -/
theorem piecewise_piecewise_of_subset_left {s t : Finset α} [∀ i, Decidable (i ∈ s)]
[∀ i, Decidable (i ∈ t)] (h : s ⊆ t) (f₁ f₂ g : ∀ a, δ a) :
s.piecewise (t.piecewise f₁ f₂) g = s.piecewise f₁ g :=
s.piecewise_congr (fun i hi => piecewise_eq_of_mem _ _ _ (h hi)) fun _ _ => rfl
#align finset.piecewise_piecewise_of_subset_left Finset.piecewise_piecewise_of_subset_left
/- warning: finset.piecewise_idem_left -> Finset.piecewise_idem_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] (f₁ : forall (a : α), δ a) (f₂ : forall (a : α), δ a) (g : forall (a : α), δ a), Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s f₁ f₂ (fun (j : α) => _inst_1 j)) g (fun (j : α) => _inst_1 j)) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f₁ g (fun (j : α) => _inst_1 j))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] (f₁ : forall (a : α), δ a) (f₂ : forall (a : α), δ a) (g : forall (a : α), δ a), Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s f₁ f₂ (fun (j : α) => _inst_1 j)) g (fun (j : α) => _inst_1 j)) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f₁ g (fun (j : α) => _inst_1 j))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_idem_left Finset.piecewise_idem_leftₓ'. -/
@[simp]
theorem piecewise_idem_left (f₁ f₂ g : ∀ a, δ a) :
s.piecewise (s.piecewise f₁ f₂) g = s.piecewise f₁ g :=
piecewise_piecewise_of_subset_left (Subset.refl _) _ _ _
#align finset.piecewise_idem_left Finset.piecewise_idem_left
/- warning: finset.piecewise_piecewise_of_subset_right -> Finset.piecewise_piecewise_of_subset_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} {s : Finset.{u1} α} {t : Finset.{u1} α} [_inst_2 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s)] [_inst_3 : forall (i : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i t)], (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) t s) -> (forall (f : forall (a : α), δ a) (g₁ : forall (a : α), δ a) (g₂ : forall (a : α), δ a), Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s f (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) t g₁ g₂ (fun (j : α) => _inst_3 j)) (fun (j : α) => _inst_2 j)) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g₂ (fun (j : α) => _inst_2 j)))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} {s : Finset.{u2} α} {t : Finset.{u2} α} [_inst_2 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s)] [_inst_3 : forall (i : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i t)], (HasSubset.Subset.{u2} (Finset.{u2} α) (Finset.instHasSubsetFinset.{u2} α) t s) -> (forall (f : forall (a : α), δ a) (g₁ : forall (a : α), δ a) (g₂ : forall (a : α), δ a), Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s f (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) t g₁ g₂ (fun (j : α) => _inst_3 j)) (fun (j : α) => _inst_2 j)) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g₂ (fun (j : α) => _inst_2 j)))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_piecewise_of_subset_right Finset.piecewise_piecewise_of_subset_rightₓ'. -/
theorem piecewise_piecewise_of_subset_right {s t : Finset α} [∀ i, Decidable (i ∈ s)]
[∀ i, Decidable (i ∈ t)] (h : t ⊆ s) (f g₁ g₂ : ∀ a, δ a) :
s.piecewise f (t.piecewise g₁ g₂) = s.piecewise f g₂ :=
s.piecewise_congr (fun _ _ => rfl) fun i hi => t.piecewise_eq_of_not_mem _ _ (mt (@h _) hi)
#align finset.piecewise_piecewise_of_subset_right Finset.piecewise_piecewise_of_subset_right
/- warning: finset.piecewise_idem_right -> Finset.piecewise_idem_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] (f : forall (a : α), δ a) (g₁ : forall (a : α), δ a) (g₂ : forall (a : α), δ a), Eq.{imax (succ u1) u2} (forall (i : α), δ i) (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s f (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s g₁ g₂ (fun (j : α) => _inst_1 j)) (fun (j : α) => _inst_1 j)) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g₂ (fun (j : α) => _inst_1 j))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] (f : forall (a : α), δ a) (g₁ : forall (a : α), δ a) (g₂ : forall (a : α), δ a), Eq.{imax (succ u2) u1} (forall (i : α), δ i) (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s f (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s g₁ g₂ (fun (j : α) => _inst_1 j)) (fun (j : α) => _inst_1 j)) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g₂ (fun (j : α) => _inst_1 j))
Case conversion may be inaccurate. Consider using '#align finset.piecewise_idem_right Finset.piecewise_idem_rightₓ'. -/
@[simp]
theorem piecewise_idem_right (f g₁ g₂ : ∀ a, δ a) :
s.piecewise f (s.piecewise g₁ g₂) = s.piecewise f g₂ :=
piecewise_piecewise_of_subset_right (Subset.refl _) f g₁ g₂
#align finset.piecewise_idem_right Finset.piecewise_idem_right
#print Finset.update_eq_piecewise /-
theorem update_eq_piecewise {β : Type _} [DecidableEq α] (f : α → β) (i : α) (v : β) :
update f i v = piecewise (singleton i) (fun j => v) f :=
(piecewise_singleton _ _ _).symm
#align finset.update_eq_piecewise Finset.update_eq_piecewise
-/
/- warning: finset.update_piecewise -> Finset.update_piecewise is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] [_inst_2 : DecidableEq.{succ u1} α] (i : α) (v : δ i), Eq.{imax (succ u1) u2} (forall (a : α), δ a) (Function.update.{succ u1, u2} α (fun (a : α) => δ a) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) i v) (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s (Function.update.{succ u1, u2} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) f i v) (Function.update.{succ u1, u2} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) g i v) (fun (j : α) => _inst_1 j))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] [_inst_2 : DecidableEq.{succ u2} α] (i : α) (v : δ i), Eq.{imax (succ u2) u1} (forall (a : α), δ a) (Function.update.{succ u2, u1} α (fun (a : α) => δ a) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) i v) (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s (Function.update.{succ u2, u1} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) f i v) (Function.update.{succ u2, u1} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) g i v) (fun (j : α) => _inst_1 j))
Case conversion may be inaccurate. Consider using '#align finset.update_piecewise Finset.update_piecewiseₓ'. -/
theorem update_piecewise [DecidableEq α] (i : α) (v : δ i) :
update (s.piecewise f g) i v = s.piecewise (update f i v) (update g i v) :=
by
ext j
rcases em (j = i) with (rfl | hj) <;> by_cases hs : j ∈ s <;> simp [*]
#align finset.update_piecewise Finset.update_piecewise
/- warning: finset.update_piecewise_of_mem -> Finset.update_piecewise_of_mem is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] [_inst_2 : DecidableEq.{succ u1} α] {i : α}, (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s) -> (forall (v : δ i), Eq.{imax (succ u1) u2} (forall (a : α), δ a) (Function.update.{succ u1, u2} α (fun (a : α) => δ a) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) i v) (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s (Function.update.{succ u1, u2} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) f i v) g (fun (j : α) => _inst_1 j)))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] [_inst_2 : DecidableEq.{succ u2} α] {i : α}, (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s) -> (forall (v : δ i), Eq.{imax (succ u2) u1} (forall (a : α), δ a) (Function.update.{succ u2, u1} α (fun (a : α) => δ a) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) i v) (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s (Function.update.{succ u2, u1} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) f i v) g (fun (j : α) => _inst_1 j)))
Case conversion may be inaccurate. Consider using '#align finset.update_piecewise_of_mem Finset.update_piecewise_of_memₓ'. -/
theorem update_piecewise_of_mem [DecidableEq α] {i : α} (hi : i ∈ s) (v : δ i) :
update (s.piecewise f g) i v = s.piecewise (update f i v) g :=
by
rw [update_piecewise]
refine' s.piecewise_congr (fun _ _ => rfl) fun j hj => update_noteq _ _ _
exact fun h => hj (h.symm ▸ hi)
#align finset.update_piecewise_of_mem Finset.update_piecewise_of_mem
/- warning: finset.update_piecewise_of_not_mem -> Finset.update_piecewise_of_not_mem is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {δ : α -> Sort.{u2}} (s : Finset.{u1} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) j s)] [_inst_2 : DecidableEq.{succ u1} α] {i : α}, (Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s)) -> (forall (v : δ i), Eq.{imax (succ u1) u2} (forall (a : α), δ a) (Function.update.{succ u1, u2} α (fun (a : α) => δ a) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u1, u2} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) i v) (Finset.piecewise.{u1, u2} α (fun (a : α) => δ a) s f (Function.update.{succ u1, u2} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) g i v) (fun (j : α) => _inst_1 j)))
but is expected to have type
forall {α : Type.{u2}} {δ : α -> Sort.{u1}} (s : Finset.{u2} α) (f : forall (i : α), δ i) (g : forall (i : α), δ i) [_inst_1 : forall (j : α), Decidable (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) j s)] [_inst_2 : DecidableEq.{succ u2} α] {i : α}, (Not (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s)) -> (forall (v : δ i), Eq.{imax (succ u2) u1} (forall (a : α), δ a) (Function.update.{succ u2, u1} α (fun (a : α) => δ a) (fun (a : α) (b : α) => _inst_2 a b) (Finset.piecewise.{u2, u1} α (fun (i : α) => δ i) s f g (fun (j : α) => _inst_1 j)) i v) (Finset.piecewise.{u2, u1} α (fun (a : α) => δ a) s f (Function.update.{succ u2, u1} α (fun (i : α) => δ i) (fun (a : α) (b : α) => _inst_2 a b) g i v) (fun (j : α) => _inst_1 j)))
Case conversion may be inaccurate. Consider using '#align finset.update_piecewise_of_not_mem Finset.update_piecewise_of_not_memₓ'. -/
theorem update_piecewise_of_not_mem [DecidableEq α] {i : α} (hi : i ∉ s) (v : δ i) :
update (s.piecewise f g) i v = s.piecewise f (update g i v) :=
by
rw [update_piecewise]
refine' s.piecewise_congr (fun j hj => update_noteq _ _ _) fun _ _ => rfl
exact fun h => hi (h ▸ hj)
#align finset.update_piecewise_of_not_mem Finset.update_piecewise_of_not_mem
#print Finset.piecewise_le_of_le_of_le /-
theorem piecewise_le_of_le_of_le {δ : α → Type _} [∀ i, Preorder (δ i)] {f g h : ∀ i, δ i}
(Hf : f ≤ h) (Hg : g ≤ h) : s.piecewise f g ≤ h := fun x =>
piecewise_cases s f g (· ≤ h x) (Hf x) (Hg x)
#align finset.piecewise_le_of_le_of_le Finset.piecewise_le_of_le_of_le
-/
#print Finset.le_piecewise_of_le_of_le /-
theorem le_piecewise_of_le_of_le {δ : α → Type _} [∀ i, Preorder (δ i)] {f g h : ∀ i, δ i}
(Hf : h ≤ f) (Hg : h ≤ g) : h ≤ s.piecewise f g := fun x =>
piecewise_cases s f g (fun y => h x ≤ y) (Hf x) (Hg x)
#align finset.le_piecewise_of_le_of_le Finset.le_piecewise_of_le_of_le
-/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (x «expr ∉ » s) -/
#print Finset.piecewise_le_piecewise' /-
theorem piecewise_le_piecewise' {δ : α → Type _} [∀ i, Preorder (δ i)] {f g f' g' : ∀ i, δ i}
(Hf : ∀ x ∈ s, f x ≤ f' x) (Hg : ∀ (x) (_ : x ∉ s), g x ≤ g' x) :
s.piecewise f g ≤ s.piecewise f' g' := fun x => by by_cases hx : x ∈ s <;> simp [hx, *]
#align finset.piecewise_le_piecewise' Finset.piecewise_le_piecewise'
-/
#print Finset.piecewise_le_piecewise /-
theorem piecewise_le_piecewise {δ : α → Type _} [∀ i, Preorder (δ i)] {f g f' g' : ∀ i, δ i}
(Hf : f ≤ f') (Hg : g ≤ g') : s.piecewise f g ≤ s.piecewise f' g' :=
s.piecewise_le_piecewise' (fun x _ => Hf x) fun x _ => Hg x
#align finset.piecewise_le_piecewise Finset.piecewise_le_piecewise
-/
#print Finset.piecewise_mem_Icc_of_mem_of_mem /-
theorem piecewise_mem_Icc_of_mem_of_mem {δ : α → Type _} [∀ i, Preorder (δ i)]
{f f₁ g g₁ : ∀ i, δ i} (hf : f ∈ Set.Icc f₁ g₁) (hg : g ∈ Set.Icc f₁ g₁) :
s.piecewise f g ∈ Set.Icc f₁ g₁ :=
⟨le_piecewise_of_le_of_le _ hf.1 hg.1, piecewise_le_of_le_of_le _ hf.2 hg.2⟩
#align finset.piecewise_mem_Icc_of_mem_of_mem Finset.piecewise_mem_Icc_of_mem_of_mem
-/
#print Finset.piecewise_mem_Icc /-
theorem piecewise_mem_Icc {δ : α → Type _} [∀ i, Preorder (δ i)] {f g : ∀ i, δ i} (h : f ≤ g) :
s.piecewise f g ∈ Set.Icc f g :=
piecewise_mem_Icc_of_mem_of_mem _ (Set.left_mem_Icc.2 h) (Set.right_mem_Icc.2 h)
#align finset.piecewise_mem_Icc Finset.piecewise_mem_Icc
-/
#print Finset.piecewise_mem_Icc' /-
theorem piecewise_mem_Icc' {δ : α → Type _} [∀ i, Preorder (δ i)] {f g : ∀ i, δ i} (h : g ≤ f) :
s.piecewise f g ∈ Set.Icc g f :=
piecewise_mem_Icc_of_mem_of_mem _ (Set.right_mem_Icc.2 h) (Set.left_mem_Icc.2 h)
#align finset.piecewise_mem_Icc' Finset.piecewise_mem_Icc'
-/
end Piecewise
section DecidablePiExists
variable {s : Finset α}
#print Finset.decidableDforallFinset /-
instance decidableDforallFinset {p : ∀ a ∈ s, Prop} [hp : ∀ (a) (h : a ∈ s), Decidable (p a h)] :
Decidable (∀ (a) (h : a ∈ s), p a h) :=
Multiset.decidableDforallMultiset
#align finset.decidable_dforall_finset Finset.decidableDforallFinset
-/
#print Finset.decidableEqPiFinset /-
/-- decidable equality for functions whose domain is bounded by finsets -/
instance decidableEqPiFinset {β : α → Type _} [h : ∀ a, DecidableEq (β a)] :
DecidableEq (∀ a ∈ s, β a) :=
Multiset.decidableEqPiMultiset
#align finset.decidable_eq_pi_finset Finset.decidableEqPiFinset
-/
#print Finset.decidableDexistsFinset /-
instance decidableDexistsFinset {p : ∀ a ∈ s, Prop} [hp : ∀ (a) (h : a ∈ s), Decidable (p a h)] :
Decidable (∃ (a : _)(h : a ∈ s), p a h) :=
Multiset.decidableDexistsMultiset
#align finset.decidable_dexists_finset Finset.decidableDexistsFinset
-/
end DecidablePiExists
/-! ### filter -/
section Filter
variable (p q : α → Prop) [DecidablePred p] [DecidablePred q]
#print Finset.filter /-
/-- `filter p s` is the set of elements of `s` that satisfy `p`. -/
def filter (s : Finset α) : Finset α :=
⟨_, s.2.filterₓ p⟩
#align finset.filter Finset.filter
-/
#print Finset.filter_val /-
@[simp]
theorem filter_val (s : Finset α) : (filter p s).1 = s.1.filterₓ p :=
rfl
#align finset.filter_val Finset.filter_val
-/
#print Finset.filter_subset /-
@[simp]
theorem filter_subset (s : Finset α) : s.filterₓ p ⊆ s :=
filter_subset _ _
#align finset.filter_subset Finset.filter_subset
-/
variable {p}
#print Finset.mem_filter /-
@[simp]
theorem mem_filter {s : Finset α} {a : α} : a ∈ s.filterₓ p ↔ a ∈ s ∧ p a :=
mem_filter
#align finset.mem_filter Finset.mem_filter
-/
#print Finset.mem_of_mem_filter /-
theorem mem_of_mem_filter {s : Finset α} (x : α) (h : x ∈ s.filterₓ p) : x ∈ s :=
mem_of_mem_filter h
#align finset.mem_of_mem_filter Finset.mem_of_mem_filter
-/
/- warning: finset.filter_ssubset -> Finset.filter_ssubset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {p : α -> Prop} [_inst_1 : DecidablePred.{succ u1} α p] {s : Finset.{u1} α}, Iff (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.hasSsubset.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) s) (Exists.{succ u1} α (fun (x : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) => Not (p x))))
but is expected to have type
forall {α : Type.{u1}} {p : α -> Prop} [_inst_1 : DecidablePred.{succ u1} α p] {s : Finset.{u1} α}, Iff (HasSSubset.SSubset.{u1} (Finset.{u1} α) (Finset.instHasSSubsetFinset.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) s) (Exists.{succ u1} α (fun (x : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s) (Not (p x))))
Case conversion may be inaccurate. Consider using '#align finset.filter_ssubset Finset.filter_ssubsetₓ'. -/
theorem filter_ssubset {s : Finset α} : s.filterₓ p ⊂ s ↔ ∃ x ∈ s, ¬p x :=
⟨fun h =>
let ⟨x, hs, hp⟩ := Set.exists_of_ssubset h
⟨x, hs, mt (fun hp => mem_filter.2 ⟨hs, hp⟩) hp⟩,
fun ⟨x, hs, hp⟩ => ⟨s.filter_subset _, fun h => hp (mem_filter.1 (h hs)).2⟩⟩
#align finset.filter_ssubset Finset.filter_ssubset
variable (p)
#print Finset.filter_filter /-
theorem filter_filter (s : Finset α) : (s.filterₓ p).filterₓ q = s.filterₓ fun a => p a ∧ q a :=
ext fun a => by simp only [mem_filter, and_comm', and_left_comm]
#align finset.filter_filter Finset.filter_filter
-/
/- warning: finset.filter_true -> Finset.filter_True is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} [h : DecidablePred.{succ u1} α (fun (_x : α) => True)], Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (_x : α) => True) h s) s
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α}, Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (x._@.Mathlib.Data.Finset.Basic._hyg.25509 : α) => True) (fun (a : α) => instDecidableTrue) s) s
Case conversion may be inaccurate. Consider using '#align finset.filter_true Finset.filter_Trueₓ'. -/
theorem filter_True {s : Finset α} [h : DecidablePred fun _ => True] :
@Finset.filter α (fun _ => True) h s = s := by ext <;> simp
#align finset.filter_true Finset.filter_True
/- warning: finset.filter_false -> Finset.filter_False is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {h : DecidablePred.{succ u1} α (fun (a : α) => False)} (s : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => False) h s) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α))
but is expected to have type
forall {α : Type.{u1}} (h : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (x._@.Mathlib.Data.Finset.Basic._hyg.25547 : α) => False) (fun (a : α) => instDecidableFalse) h) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α))
Case conversion may be inaccurate. Consider using '#align finset.filter_false Finset.filter_Falseₓ'. -/
@[simp]
theorem filter_False {h} (s : Finset α) : @filter α (fun a => False) h s = ∅ :=
ext fun a => by simp only [mem_filter, and_false_iff] <;> rfl
#align finset.filter_false Finset.filter_False
variable {p q}
#print Finset.filter_eq_self /-
theorem filter_eq_self (s : Finset α) : s.filterₓ p = s ↔ ∀ x ∈ s, p x := by simp [Finset.ext_iff]
#align finset.filter_eq_self Finset.filter_eq_self
-/
#print Finset.filter_true_of_mem /-
/-- If all elements of a `finset` satisfy the predicate `p`, `s.filter p` is `s`. -/
@[simp]
theorem filter_true_of_mem {s : Finset α} (h : ∀ x ∈ s, p x) : s.filterₓ p = s :=
(filter_eq_self s).mpr h
#align finset.filter_true_of_mem Finset.filter_true_of_mem
-/
#print Finset.filter_false_of_mem /-
/-- If all elements of a `finset` fail to satisfy the predicate `p`, `s.filter p` is `∅`. -/
theorem filter_false_of_mem {s : Finset α} (h : ∀ x ∈ s, ¬p x) : s.filterₓ p = ∅ :=
eq_empty_of_forall_not_mem (by simpa)
#align finset.filter_false_of_mem Finset.filter_false_of_mem
-/
#print Finset.filter_eq_empty_iff /-
theorem filter_eq_empty_iff (s : Finset α) : s.filterₓ p = ∅ ↔ ∀ x ∈ s, ¬p x :=
by
refine' ⟨_, filter_false_of_mem⟩
intro hs
injection hs with hs'
rwa [filter_eq_nil] at hs'
#align finset.filter_eq_empty_iff Finset.filter_eq_empty_iff
-/
/- warning: finset.filter_nonempty_iff -> Finset.filter_nonempty_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {p : α -> Prop} [_inst_1 : DecidablePred.{succ u1} α p] {s : Finset.{u1} α}, Iff (Finset.Nonempty.{u1} α (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => p a)))
but is expected to have type
forall {α : Type.{u1}} {p : α -> Prop} [_inst_1 : DecidablePred.{succ u1} α p] {s : Finset.{u1} α}, Iff (Finset.Nonempty.{u1} α (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s)) (Exists.{succ u1} α (fun (a : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) (p a)))
Case conversion may be inaccurate. Consider using '#align finset.filter_nonempty_iff Finset.filter_nonempty_iffₓ'. -/
theorem filter_nonempty_iff {s : Finset α} : (s.filterₓ p).Nonempty ↔ ∃ a ∈ s, p a := by
simp only [nonempty_iff_ne_empty, Ne.def, filter_eq_empty_iff, Classical.not_not, not_forall]
#align finset.filter_nonempty_iff Finset.filter_nonempty_iff
#print Finset.filter_congr /-
theorem filter_congr {s : Finset α} (H : ∀ x ∈ s, p x ↔ q x) : filter p s = filter q s :=
eq_of_veq <| filter_congr H
#align finset.filter_congr Finset.filter_congr
-/
variable (p q)
#print Finset.filter_empty /-
theorem filter_empty : filter p ∅ = ∅ :=
subset_empty.1 <| filter_subset _ _
#align finset.filter_empty Finset.filter_empty
-/
#print Finset.filter_subset_filter /-
theorem filter_subset_filter {s t : Finset α} (h : s ⊆ t) : s.filterₓ p ⊆ t.filterₓ p := fun a ha =>
mem_filter.2 ⟨h (mem_filter.1 ha).1, (mem_filter.1 ha).2⟩
#align finset.filter_subset_filter Finset.filter_subset_filter
-/
#print Finset.monotone_filter_left /-
theorem monotone_filter_left : Monotone (filter p) := fun _ _ => filter_subset_filter p
#align finset.monotone_filter_left Finset.monotone_filter_left
-/
#print Finset.monotone_filter_right /-
theorem monotone_filter_right (s : Finset α) ⦃p q : α → Prop⦄ [DecidablePred p] [DecidablePred q]
(h : p ≤ q) : s.filterₓ p ≤ s.filterₓ q :=
Multiset.subset_of_le (Multiset.monotone_filter_right s.val h)
#align finset.monotone_filter_right Finset.monotone_filter_right
-/
#print Finset.coe_filter /-
@[simp, norm_cast]
theorem coe_filter (s : Finset α) : ↑(s.filterₓ p) = ({ x ∈ ↑s | p x } : Set α) :=
Set.ext fun _ => mem_filter
#align finset.coe_filter Finset.coe_filter
-/
#print Finset.subset_coe_filter_of_subset_forall /-
theorem subset_coe_filter_of_subset_forall (s : Finset α) {t : Set α} (h₁ : t ⊆ s)
(h₂ : ∀ x ∈ t, p x) : t ⊆ s.filterₓ p := fun x hx => (s.coe_filter p).symm ▸ ⟨h₁ hx, h₂ x hx⟩
#align finset.subset_coe_filter_of_subset_forall Finset.subset_coe_filter_of_subset_forall
-/
#print Finset.filter_singleton /-
theorem filter_singleton (a : α) : filter p (singleton a) = if p a then singleton a else ∅ := by
classical
ext x
simp
split_ifs with h <;> by_cases h' : x = a <;> simp [h, h']
#align finset.filter_singleton Finset.filter_singleton
-/
#print Finset.filter_cons_of_pos /-
theorem filter_cons_of_pos (a : α) (s : Finset α) (ha : a ∉ s) (hp : p a) :
filter p (cons a s ha) = cons a (filter p s) (mem_filter.Not.mpr <| mt And.left ha) :=
eq_of_veq <| Multiset.filter_cons_of_pos s.val hp
#align finset.filter_cons_of_pos Finset.filter_cons_of_pos
-/
#print Finset.filter_cons_of_neg /-
theorem filter_cons_of_neg (a : α) (s : Finset α) (ha : a ∉ s) (hp : ¬p a) :
filter p (cons a s ha) = filter p s :=
eq_of_veq <| Multiset.filter_cons_of_neg s.val hp
#align finset.filter_cons_of_neg Finset.filter_cons_of_neg
-/
/- warning: finset.disjoint_filter -> Finset.disjoint_filter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {p : α -> Prop} {q : α -> Prop} [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α q], Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_4 a) s)) (forall (x : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) -> (p x) -> (Not (q x)))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {p : α -> Prop} {q : α -> Prop} [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α q], Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_4 a) s)) (forall (x : α), (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s) -> (p x) -> (Not (q x)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_filter Finset.disjoint_filterₓ'. -/
theorem disjoint_filter {s : Finset α} {p q : α → Prop} [DecidablePred p] [DecidablePred q] :
Disjoint (s.filterₓ p) (s.filterₓ q) ↔ ∀ x ∈ s, p x → ¬q x := by
constructor <;> simp (config := { contextual := true }) [disjoint_left]
#align finset.disjoint_filter Finset.disjoint_filter
/- warning: finset.disjoint_filter_filter -> Finset.disjoint_filter_filter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {p : α -> Prop} {q : α -> Prop} [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α q], (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_4 a) t))
but is expected to have type
forall {α : Type.{u1}} {s : Finset.{u1} α} {t : Finset.{u1} α} {p : α -> Prop} {q : α -> Prop} [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α q], (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_4 a) t))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_filter_filter Finset.disjoint_filter_filterₓ'. -/
theorem disjoint_filter_filter {s t : Finset α} {p q : α → Prop} [DecidablePred p]
[DecidablePred q] : Disjoint s t → Disjoint (s.filterₓ p) (t.filterₓ q) :=
Disjoint.mono (filter_subset _ _) (filter_subset _ _)
#align finset.disjoint_filter_filter Finset.disjoint_filter_filter
/- warning: finset.disjoint_filter_filter' -> Finset.disjoint_filter_filter' is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α) {p : α -> Prop} {q : α -> Prop} [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α q], (Disjoint.{u1} (α -> Prop) (Pi.partialOrder.{u1, 0} α (fun (ᾰ : α) => Prop) (fun (i : α) => Prop.partialOrder)) (Pi.orderBot.{u1, 0} α (fun (ᾰ : α) => Prop) (fun (i : α) => Preorder.toLE.{0} ((fun (i : α) => (fun (i : α) => (fun (ᾰ : α) => Prop) i) i) i) ((fun (i : α) => PartialOrder.toPreorder.{0} ((fun (ᾰ : α) => Prop) i) ((fun (i : α) => Prop.partialOrder) i)) i)) (fun (i : α) => BoundedOrder.toOrderBot.{0} Prop (Preorder.toLE.{0} ((fun (i : α) => (fun (i : α) => (fun (ᾰ : α) => Prop) i) i) i) ((fun (i : α) => PartialOrder.toPreorder.{0} ((fun (ᾰ : α) => Prop) i) ((fun (i : α) => Prop.partialOrder) i)) i)) Prop.boundedOrder)) p q) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_4 a) t))
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α) {p : α -> Prop} {q : α -> Prop} [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α q], (Disjoint.{u1} (α -> Prop) (Pi.partialOrder.{u1, 0} α (fun (ᾰ : α) => Prop) (fun (i : α) => Prop.partialOrder)) (Pi.orderBot.{u1, 0} α (fun (ᾰ : α) => Prop) (fun (i : α) => Preorder.toLE.{0} ((fun (i : α) => (fun (i : α) => Prop) i) i) ((fun (i : α) => PartialOrder.toPreorder.{0} ((fun (ᾰ : α) => Prop) i) ((fun (i : α) => Prop.partialOrder) i)) i)) (fun (i : α) => BoundedOrder.toOrderBot.{0} Prop Prop.le Prop.boundedOrder)) p q) -> (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_4 a) t))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_filter_filter' Finset.disjoint_filter_filter'ₓ'. -/
theorem disjoint_filter_filter' (s t : Finset α) {p q : α → Prop} [DecidablePred p]
[DecidablePred q] (h : Disjoint p q) : Disjoint (s.filterₓ p) (t.filterₓ q) :=
by
simp_rw [disjoint_left, mem_filter]
rintro a ⟨hs, hp⟩ ⟨ht, hq⟩
exact h.le_bot _ ⟨hp, hq⟩
#align finset.disjoint_filter_filter' Finset.disjoint_filter_filter'
/- warning: finset.disjoint_filter_filter_neg -> Finset.disjoint_filter_filter_neg is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α) (p : α -> Prop) [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : DecidablePred.{succ u1} α (fun (a : α) => Not (p a))], Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α (fun (a : α) => Not (p a)) (fun (a : α) => _inst_4 a) t)
but is expected to have type
forall {α : Type.{u1}} (s : Finset.{u1} α) (t : Finset.{u1} α) (p : α -> Prop) [_inst_3 : DecidablePred.{succ u1} α p] [_inst_4 : forall (x : α), Decidable (Not (p x))], Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_3 a) s) (Finset.filter.{u1} α (fun (a : α) => Not (p a)) (fun (a : α) => _inst_4 a) t)
Case conversion may be inaccurate. Consider using '#align finset.disjoint_filter_filter_neg Finset.disjoint_filter_filter_negₓ'. -/
theorem disjoint_filter_filter_neg (s t : Finset α) (p : α → Prop) [DecidablePred p]
[DecidablePred fun a => ¬p a] : Disjoint (s.filterₓ p) (t.filterₓ fun a => ¬p a) :=
disjoint_filter_filter' s t disjoint_compl_right
#align finset.disjoint_filter_filter_neg Finset.disjoint_filter_filter_neg
/- warning: finset.filter_disj_union -> Finset.filter_disj_union is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (p : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] (s : Finset.{u1} α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) s t), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) (Finset.disjUnion.{u1} α s t h)) (Finset.disjUnion.{u1} α (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) t) (Finset.disjoint_filter_filter.{u1} α s t p p (fun (a : α) => _inst_1 a) (fun (a : α) => _inst_1 a) h))
but is expected to have type
forall {α : Type.{u1}} (p : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] (s : Finset.{u1} α) (t : Finset.{u1} α) (h : Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) s t), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) (Finset.disjUnion.{u1} α s t h)) (Finset.disjUnion.{u1} α (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) t) (Finset.disjoint_filter_filter.{u1} α s t p p (fun (a : α) => _inst_1 a) (fun (a : α) => _inst_1 a) h))
Case conversion may be inaccurate. Consider using '#align finset.filter_disj_union Finset.filter_disj_unionₓ'. -/
theorem filter_disj_union (s : Finset α) (t : Finset α) (h : Disjoint s t) :
filter p (disjUnion s t h) = (filter p s).disjUnion (filter p t) (disjoint_filter_filter h) :=
eq_of_veq <| Multiset.filter_add _ _ _
#align finset.filter_disj_union Finset.filter_disj_union
#print Finset.filter_cons /-
theorem filter_cons {a : α} (s : Finset α) (ha : a ∉ s) :
filter p (cons a s ha) =
(if p a then {a} else ∅ : Finset α).disjUnion (filter p s)
(by
split_ifs
· rw [disjoint_singleton_left]
exact mem_filter.not.mpr <| mt And.left ha
· exact disjoint_empty_left _) :=
by
split_ifs with h
· rw [filter_cons_of_pos _ _ _ ha h, singleton_disj_union]
· rw [filter_cons_of_neg _ _ _ ha h, empty_disj_union]
#align finset.filter_cons Finset.filter_cons
-/
variable [DecidableEq α]
#print Finset.filter_union /-
theorem filter_union (s₁ s₂ : Finset α) : (s₁ ∪ s₂).filterₓ p = s₁.filterₓ p ∪ s₂.filterₓ p :=
ext fun _ => by simp only [mem_filter, mem_union, or_and_right]
#align finset.filter_union Finset.filter_union
-/
#print Finset.filter_union_right /-
theorem filter_union_right (s : Finset α) :
s.filterₓ p ∪ s.filterₓ q = s.filterₓ fun x => p x ∨ q x :=
ext fun x => by simp only [mem_filter, mem_union, and_or_distrib_left.symm]
#align finset.filter_union_right Finset.filter_union_right
-/
#print Finset.filter_mem_eq_inter /-
theorem filter_mem_eq_inter {s t : Finset α} [∀ i, Decidable (i ∈ t)] :
(s.filterₓ fun i => i ∈ t) = s ∩ t :=
ext fun i => by rw [mem_filter, mem_inter]
#align finset.filter_mem_eq_inter Finset.filter_mem_eq_inter
-/
#print Finset.filter_inter_distrib /-
theorem filter_inter_distrib (s t : Finset α) : (s ∩ t).filterₓ p = s.filterₓ p ∩ t.filterₓ p :=
by
ext
simp only [mem_filter, mem_inter]
exact and_and_right _ _ _
#align finset.filter_inter_distrib Finset.filter_inter_distrib
-/
#print Finset.filter_inter /-
theorem filter_inter (s t : Finset α) : filter p s ∩ t = filter p (s ∩ t) :=
by
ext
simp only [mem_inter, mem_filter, and_right_comm]
#align finset.filter_inter Finset.filter_inter
-/
#print Finset.inter_filter /-
theorem inter_filter (s t : Finset α) : s ∩ filter p t = filter p (s ∩ t) := by
rw [inter_comm, filter_inter, inter_comm]
#align finset.inter_filter Finset.inter_filter
-/
#print Finset.filter_insert /-
theorem filter_insert (a : α) (s : Finset α) :
filter p (insert a s) = if p a then insert a (filter p s) else filter p s :=
by
ext x
simp
split_ifs with h <;> by_cases h' : x = a <;> simp [h, h']
#align finset.filter_insert Finset.filter_insert
-/
#print Finset.filter_erase /-
theorem filter_erase (a : α) (s : Finset α) : filter p (erase s a) = erase (filter p s) a :=
by
ext x
simp only [and_assoc', mem_filter, iff_self_iff, mem_erase]
#align finset.filter_erase Finset.filter_erase
-/
/- warning: finset.filter_or -> Finset.filter_or is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (p : α -> Prop) (q : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_2 : DecidablePred.{succ u1} α q] [_inst_3 : DecidableEq.{succ u1} α] [_inst_4 : DecidablePred.{succ u1} α (fun (a : α) => Or (p a) (q a))] (s : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => Or (p a) (q a)) (fun (a : α) => _inst_4 a) s) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_2 a) s))
but is expected to have type
forall {α : Type.{u1}} (p : α -> Prop) (q : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_2 : DecidablePred.{succ u1} α q] [_inst_3 : DecidableEq.{succ u1} α] (_inst_4 : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => Or (p a) (q a)) (fun (a : α) => instDecidableOr (p a) (q a) (_inst_1 a) (_inst_2 a)) _inst_4) (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) _inst_4) (Finset.filter.{u1} α q (fun (a : α) => _inst_2 a) _inst_4))
Case conversion may be inaccurate. Consider using '#align finset.filter_or Finset.filter_orₓ'. -/
theorem filter_or [DecidablePred fun a => p a ∨ q a] (s : Finset α) :
(s.filterₓ fun a => p a ∨ q a) = s.filterₓ p ∪ s.filterₓ q :=
ext fun _ => by simp only [mem_filter, mem_union, and_or_left]
#align finset.filter_or Finset.filter_or
/- warning: finset.filter_and -> Finset.filter_and is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (p : α -> Prop) (q : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_2 : DecidablePred.{succ u1} α q] [_inst_3 : DecidableEq.{succ u1} α] [_inst_4 : DecidablePred.{succ u1} α (fun (a : α) => And (p a) (q a))] (s : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => And (p a) (q a)) (fun (a : α) => _inst_4 a) s) (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) (Finset.filter.{u1} α q (fun (a : α) => _inst_2 a) s))
but is expected to have type
forall {α : Type.{u1}} (p : α -> Prop) (q : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_2 : DecidablePred.{succ u1} α q] [_inst_3 : DecidableEq.{succ u1} α] (_inst_4 : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => And (p a) (q a)) (fun (a : α) => instDecidableAnd (p a) (q a) (_inst_1 a) (_inst_2 a)) _inst_4) (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) _inst_4) (Finset.filter.{u1} α q (fun (a : α) => _inst_2 a) _inst_4))
Case conversion may be inaccurate. Consider using '#align finset.filter_and Finset.filter_andₓ'. -/
theorem filter_and [DecidablePred fun a => p a ∧ q a] (s : Finset α) :
(s.filterₓ fun a => p a ∧ q a) = s.filterₓ p ∩ s.filterₓ q :=
ext fun _ => by simp only [mem_filter, mem_inter, and_comm', and_left_comm, and_self_iff]
#align finset.filter_and Finset.filter_and
/- warning: finset.filter_not -> Finset.filter_not is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (p : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_3 : DecidableEq.{succ u1} α] [_inst_4 : DecidablePred.{succ u1} α (fun (a : α) => Not (p a))] (s : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => Not (p a)) (fun (a : α) => _inst_4 a) s) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.hasSdiff.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) s (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s))
but is expected to have type
forall {α : Type.{u1}} (p : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_3 : DecidableEq.{succ u1} α] (_inst_4 : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finset.filter.{u1} α (fun (a : α) => Not (p a)) (fun (a : α) => instDecidableNot (p a) (_inst_1 a)) _inst_4) (SDiff.sdiff.{u1} (Finset.{u1} α) (Finset.instSDiffFinset.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) _inst_4 (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) _inst_4))
Case conversion may be inaccurate. Consider using '#align finset.filter_not Finset.filter_notₓ'. -/
theorem filter_not [DecidablePred fun a => ¬p a] (s : Finset α) :
(s.filterₓ fun a => ¬p a) = s \ s.filterₓ p :=
ext <| by
simpa only [mem_filter, mem_sdiff, and_comm', not_and] using fun a =>
and_congr_right fun h : a ∈ s => (imp_iff_right h).symm.trans imp_not_comm
#align finset.filter_not Finset.filter_not
#print Finset.sdiff_eq_filter /-
theorem sdiff_eq_filter (s₁ s₂ : Finset α) : s₁ \ s₂ = filter (· ∉ s₂) s₁ :=
ext fun _ => by simp only [mem_sdiff, mem_filter]
#align finset.sdiff_eq_filter Finset.sdiff_eq_filter
-/
#print Finset.sdiff_eq_self /-
theorem sdiff_eq_self (s₁ s₂ : Finset α) : s₁ \ s₂ = s₁ ↔ s₁ ∩ s₂ ⊆ ∅ :=
by
simp [subset.antisymm_iff]
constructor <;> intro h
· trans s₁ \ s₂ ∩ s₂
mono
simp
·
calc
s₁ \ s₂ ⊇ s₁ \ (s₁ ∩ s₂) := by simp [(· ⊇ ·)]
_ ⊇ s₁ \ ∅ := by mono using (· ⊇ ·)
_ ⊇ s₁ := by simp [(· ⊇ ·)]
#align finset.sdiff_eq_self Finset.sdiff_eq_self
-/
/- warning: finset.subset_union_elim -> Finset.subset_union_elim is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_3 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t₁ : Set.{u1} α} {t₂ : Set.{u1} α}, (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (Union.union.{u1} (Set.{u1} α) (Set.hasUnion.{u1} α) t₁ t₂)) -> (Exists.{succ u1} (Finset.{u1} α) (fun (s₁ : Finset.{u1} α) => Exists.{succ u1} (Finset.{u1} α) (fun (s₂ : Finset.{u1} α) => And (Eq.{succ u1} (Finset.{u1} α) (Union.union.{u1} (Finset.{u1} α) (Finset.hasUnion.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) s₁ s₂) s) (And (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₁) t₁) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.hasSubset.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s₂) (SDiff.sdiff.{u1} (Set.{u1} α) (BooleanAlgebra.toHasSdiff.{u1} (Set.{u1} α) (Set.booleanAlgebra.{u1} α)) t₂ t₁))))))
but is expected to have type
forall {α : Type.{u1}} [_inst_3 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t₁ : Set.{u1} α} {t₂ : Set.{u1} α}, (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (Finset.toSet.{u1} α s) (Union.union.{u1} (Set.{u1} α) (Set.instUnionSet.{u1} α) t₁ t₂)) -> (Exists.{succ u1} (Finset.{u1} α) (fun (s₁ : Finset.{u1} α) => Exists.{succ u1} (Finset.{u1} α) (fun (s₂ : Finset.{u1} α) => And (Eq.{succ u1} (Finset.{u1} α) (Union.union.{u1} (Finset.{u1} α) (Finset.instUnionFinset.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) s₁ s₂) s) (And (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (Finset.toSet.{u1} α s₁) t₁) (HasSubset.Subset.{u1} (Set.{u1} α) (Set.instHasSubsetSet.{u1} α) (Finset.toSet.{u1} α s₂) (SDiff.sdiff.{u1} (Set.{u1} α) (Set.instSDiffSet.{u1} α) t₂ t₁))))))
Case conversion may be inaccurate. Consider using '#align finset.subset_union_elim Finset.subset_union_elimₓ'. -/
theorem subset_union_elim {s : Finset α} {t₁ t₂ : Set α} (h : ↑s ⊆ t₁ ∪ t₂) :
∃ s₁ s₂ : Finset α, s₁ ∪ s₂ = s ∧ ↑s₁ ⊆ t₁ ∧ ↑s₂ ⊆ t₂ \ t₁ := by
classical
refine' ⟨s.filter (· ∈ t₁), s.filter (· ∉ t₁), _, _, _⟩
· simp [filter_union_right, em]
· intro x
simp
· intro x
simp
intro hx hx₂
refine' ⟨Or.resolve_left (h hx) hx₂, hx₂⟩
#align finset.subset_union_elim Finset.subset_union_elim
/- warning: finset.filter_congr_decidable clashes with [anonymous] -> [anonymous]
warning: finset.filter_congr_decidable -> [anonymous] is a dubious translation:
lean 3 declaration is
forall {α : Type.{u_1}} (s : Finset.{u_1} α) (p : α -> Prop) (h : DecidablePred.{succ u_1} α p) [_inst_4 : DecidablePred.{succ u_1} α p], Eq.{succ u_1} (Finset.{u_1} α) (Finset.filter.{u_1} α p h s) (Finset.filter.{u_1} α p (fun (a : α) => _inst_4 a) s)
but is expected to have type
forall {α : Type.{u}} {s : Type.{v}}, (Nat -> α -> s) -> Nat -> (List.{u} α) -> (List.{v} s)
Case conversion may be inaccurate. Consider using '#align finset.filter_congr_decidable [anonymous]ₓ'. -/
-- We can simplify an application of filter where the decidability is inferred in "the wrong way"
@[simp]
theorem [anonymous] {α} (s : Finset α) (p : α → Prop) (h : DecidablePred p) [DecidablePred p] :
@filter α p h s = s.filterₓ p := by congr
#align finset.filter_congr_decidable [anonymous]
section Classical
open Classical
/-- The following instance allows us to write `{x ∈ s | p x}` for `finset.filter p s`.
Since the former notation requires us to define this for all propositions `p`, and `finset.filter`
only works for decidable propositions, the notation `{x ∈ s | p x}` is only compatible with
classical logic because it uses `classical.prop_decidable`.
We don't want to redo all lemmas of `finset.filter` for `has_sep.sep`, so we make sure that `simp`
unfolds the notation `{x ∈ s | p x}` to `finset.filter p s`. If `p` happens to be decidable, the
simp-lemma `finset.filter_congr_decidable` will make sure that `finset.filter` uses the right
instance for decidability.
-/
noncomputable instance {α : Type _} : Sep α (Finset α) :=
⟨fun p x => x.filterₓ p⟩
@[simp]
theorem sep_def {α : Type _} (s : Finset α) (p : α → Prop) : { x ∈ s | p x } = s.filterₓ p :=
rfl
#align finset.sep_def Finset.sep_def
end Classical
#print Finset.filter_eq /-
-- This is not a good simp lemma, as it would prevent `finset.mem_filter` from firing
-- on, e.g. `x ∈ s.filter(eq b)`.
/-- After filtering out everything that does not equal a given value, at most that value remains.
This is equivalent to `filter_eq'` with the equality the other way.
-/
theorem filter_eq [DecidableEq β] (s : Finset β) (b : β) : s.filterₓ (Eq b) = ite (b ∈ s) {b} ∅ :=
by
split_ifs
· ext
simp only [mem_filter, mem_singleton]
exact
⟨fun h => h.2.symm, by
rintro ⟨h⟩
exact ⟨h, rfl⟩⟩
· ext
simp only [mem_filter, not_and, iff_false_iff, not_mem_empty]
rintro m ⟨e⟩
exact h m
#align finset.filter_eq Finset.filter_eq
-/
#print Finset.filter_eq' /-
/-- After filtering out everything that does not equal a given value, at most that value remains.
This is equivalent to `filter_eq` with the equality the other way.
-/
theorem filter_eq' [DecidableEq β] (s : Finset β) (b : β) :
(s.filterₓ fun a => a = b) = ite (b ∈ s) {b} ∅ :=
trans (filter_congr fun _ _ => ⟨Eq.symm, Eq.symm⟩) (filter_eq s b)
#align finset.filter_eq' Finset.filter_eq'
-/
#print Finset.filter_ne /-
theorem filter_ne [DecidableEq β] (s : Finset β) (b : β) :
(s.filterₓ fun a => b ≠ a) = s.eraseₓ b := by
ext
simp only [mem_filter, mem_erase, Ne.def]
tauto
#align finset.filter_ne Finset.filter_ne
-/
#print Finset.filter_ne' /-
theorem filter_ne' [DecidableEq β] (s : Finset β) (b : β) :
(s.filterₓ fun a => a ≠ b) = s.eraseₓ b :=
trans (filter_congr fun _ _ => ⟨Ne.symm, Ne.symm⟩) (filter_ne s b)
#align finset.filter_ne' Finset.filter_ne'
-/
/- warning: finset.filter_inter_filter_neg_eq -> Finset.filter_inter_filter_neg_eq is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (p : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_3 : DecidableEq.{succ u1} α] [_inst_4 : DecidablePred.{succ u1} α (fun (a : α) => Not (p a))] (s : Finset.{u1} α) (t : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Inter.inter.{u1} (Finset.{u1} α) (Finset.hasInter.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) s) (Finset.filter.{u1} α (fun (a : α) => Not (p a)) (fun (a : α) => _inst_4 a) t)) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.hasEmptyc.{u1} α))
but is expected to have type
forall {α : Type.{u1}} (p : α -> Prop) [_inst_1 : DecidablePred.{succ u1} α p] [_inst_3 : DecidableEq.{succ u1} α] (_inst_4 : Finset.{u1} α) (s : Finset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Inter.inter.{u1} (Finset.{u1} α) (Finset.instInterFinset.{u1} α (fun (a : α) (b : α) => _inst_3 a b)) (Finset.filter.{u1} α p (fun (a : α) => _inst_1 a) _inst_4) (Finset.filter.{u1} α (fun (a : α) => Not (p a)) (fun (a : α) => instDecidableNot (p a) (_inst_1 a)) s)) (EmptyCollection.emptyCollection.{u1} (Finset.{u1} α) (Finset.instEmptyCollectionFinset.{u1} α))
Case conversion may be inaccurate. Consider using '#align finset.filter_inter_filter_neg_eq Finset.filter_inter_filter_neg_eqₓ'. -/
theorem filter_inter_filter_neg_eq [DecidablePred fun a => ¬p a] (s t : Finset α) :
(s.filterₓ p ∩ t.filterₓ fun a => ¬p a) = ∅ :=
(disjoint_filter_filter_neg s t p).eq_bot
#align finset.filter_inter_filter_neg_eq Finset.filter_inter_filter_neg_eq
#print Finset.filter_union_filter_of_codisjoint /-
theorem filter_union_filter_of_codisjoint (s : Finset α) (h : Codisjoint p q) :
s.filterₓ p ∪ s.filterₓ q = s :=
(filter_or _ _ _).symm.trans <| filter_true_of_mem fun x hx => h.top_le x trivial
#align finset.filter_union_filter_of_codisjoint Finset.filter_union_filter_of_codisjoint
-/
#print Finset.filter_union_filter_neg_eq /-
theorem filter_union_filter_neg_eq [DecidablePred fun a => ¬p a] (s : Finset α) :
(s.filterₓ p ∪ s.filterₓ fun a => ¬p a) = s :=
filter_union_filter_of_codisjoint _ _ _ codisjoint_hnot_right
#align finset.filter_union_filter_neg_eq Finset.filter_union_filter_neg_eq
-/
end Filter
/-! ### range -/
section Range
variable {n m l : ℕ}
#print Finset.range /-
/-- `range n` is the set of natural numbers less than `n`. -/
def range (n : ℕ) : Finset ℕ :=
⟨_, nodup_range n⟩
#align finset.range Finset.range
-/
#print Finset.range_val /-
@[simp]
theorem range_val (n : ℕ) : (range n).1 = Multiset.range n :=
rfl
#align finset.range_val Finset.range_val
-/
#print Finset.mem_range /-
@[simp]
theorem mem_range : m ∈ range n ↔ m < n :=
mem_range
#align finset.mem_range Finset.mem_range
-/
#print Finset.coe_range /-
@[simp, norm_cast]
theorem coe_range (n : ℕ) : (range n : Set ℕ) = Set.Iio n :=
Set.ext fun _ => mem_range
#align finset.coe_range Finset.coe_range
-/
#print Finset.range_zero /-
@[simp]
theorem range_zero : range 0 = ∅ :=
rfl
#align finset.range_zero Finset.range_zero
-/
#print Finset.range_one /-
@[simp]
theorem range_one : range 1 = {0} :=
rfl
#align finset.range_one Finset.range_one
-/
/- warning: finset.range_succ -> Finset.range_succ is a dubious translation:
lean 3 declaration is
forall {n : Nat}, Eq.{1} (Finset.{0} Nat) (Finset.range (Nat.succ n)) (Insert.insert.{0, 0} Nat (Finset.{0} Nat) (Finset.hasInsert.{0} Nat (fun (a : Nat) (b : Nat) => Nat.decidableEq a b)) n (Finset.range n))
but is expected to have type
forall {n : Nat}, Eq.{1} (Finset.{0} Nat) (Finset.range (Nat.succ n)) (Insert.insert.{0, 0} Nat (Finset.{0} Nat) (Finset.instInsertFinset.{0} Nat (fun (a : Nat) (b : Nat) => instDecidableEqNat a b)) n (Finset.range n))
Case conversion may be inaccurate. Consider using '#align finset.range_succ Finset.range_succₓ'. -/
theorem range_succ : range (succ n) = insert n (range n) :=
eq_of_veq <| (range_succ n).trans <| (ndinsert_of_not_mem not_mem_range_self).symm
#align finset.range_succ Finset.range_succ
/- warning: finset.range_add_one -> Finset.range_add_one is a dubious translation:
lean 3 declaration is
forall {n : Nat}, Eq.{1} (Finset.{0} Nat) (Finset.range (HAdd.hAdd.{0, 0, 0} Nat Nat Nat (instHAdd.{0} Nat Nat.hasAdd) n (OfNat.ofNat.{0} Nat 1 (OfNat.mk.{0} Nat 1 (One.one.{0} Nat Nat.hasOne))))) (Insert.insert.{0, 0} Nat (Finset.{0} Nat) (Finset.hasInsert.{0} Nat (fun (a : Nat) (b : Nat) => Nat.decidableEq a b)) n (Finset.range n))
but is expected to have type
forall {n : Nat}, Eq.{1} (Finset.{0} Nat) (Finset.range (HAdd.hAdd.{0, 0, 0} Nat Nat Nat (instHAdd.{0} Nat instAddNat) n (OfNat.ofNat.{0} Nat 1 (instOfNatNat 1)))) (Insert.insert.{0, 0} Nat (Finset.{0} Nat) (Finset.instInsertFinset.{0} Nat (fun (a : Nat) (b : Nat) => instDecidableEqNat a b)) n (Finset.range n))
Case conversion may be inaccurate. Consider using '#align finset.range_add_one Finset.range_add_oneₓ'. -/
theorem range_add_one : range (n + 1) = insert n (range n) :=
range_succ
#align finset.range_add_one Finset.range_add_one
#print Finset.not_mem_range_self /-
@[simp]
theorem not_mem_range_self : n ∉ range n :=
not_mem_range_self
#align finset.not_mem_range_self Finset.not_mem_range_self
-/
#print Finset.self_mem_range_succ /-
@[simp]
theorem self_mem_range_succ (n : ℕ) : n ∈ range (n + 1) :=
Multiset.self_mem_range_succ n
#align finset.self_mem_range_succ Finset.self_mem_range_succ
-/
#print Finset.range_subset /-
@[simp]
theorem range_subset {n m} : range n ⊆ range m ↔ n ≤ m :=
range_subset
#align finset.range_subset Finset.range_subset
-/
#print Finset.range_mono /-
theorem range_mono : Monotone range := fun _ _ => range_subset.2
#align finset.range_mono Finset.range_mono
-/
#print Finset.mem_range_succ_iff /-
theorem mem_range_succ_iff {a b : ℕ} : a ∈ Finset.range b.succ ↔ a ≤ b :=
Finset.mem_range.trans Nat.lt_succ_iff
#align finset.mem_range_succ_iff Finset.mem_range_succ_iff
-/
#print Finset.mem_range_le /-
theorem mem_range_le {n x : ℕ} (hx : x ∈ range n) : x ≤ n :=
(mem_range.1 hx).le
#align finset.mem_range_le Finset.mem_range_le
-/
#print Finset.mem_range_sub_ne_zero /-
theorem mem_range_sub_ne_zero {n x : ℕ} (hx : x ∈ range n) : n - x ≠ 0 :=
ne_of_gt <| tsub_pos_of_lt <| mem_range.1 hx
#align finset.mem_range_sub_ne_zero Finset.mem_range_sub_ne_zero
-/
#print Finset.nonempty_range_iff /-
@[simp]
theorem nonempty_range_iff : (range n).Nonempty ↔ n ≠ 0 :=
⟨fun ⟨k, hk⟩ => ((zero_le k).trans_lt <| mem_range.1 hk).ne', fun h =>
⟨0, mem_range.2 <| pos_iff_ne_zero.2 h⟩⟩
#align finset.nonempty_range_iff Finset.nonempty_range_iff
-/
#print Finset.range_eq_empty_iff /-
@[simp]
theorem range_eq_empty_iff : range n = ∅ ↔ n = 0 := by
rw [← not_nonempty_iff_eq_empty, nonempty_range_iff, Classical.not_not]
#align finset.range_eq_empty_iff Finset.range_eq_empty_iff
-/
#print Finset.nonempty_range_succ /-
theorem nonempty_range_succ : (range <| n + 1).Nonempty :=
nonempty_range_iff.2 n.succ_ne_zero
#align finset.nonempty_range_succ Finset.nonempty_range_succ
-/
/- warning: finset.range_filter_eq -> Finset.range_filter_eq is a dubious translation:
lean 3 declaration is
forall {n : Nat} {m : Nat}, Eq.{1} (Finset.{0} Nat) (Finset.filter.{0} Nat (fun (_x : Nat) => Eq.{1} Nat _x m) (fun (a : Nat) => Nat.decidableEq a m) (Finset.range n)) (ite.{1} (Finset.{0} Nat) (LT.lt.{0} Nat Nat.hasLt m n) (Nat.decidableLt m n) (Singleton.singleton.{0, 0} Nat (Finset.{0} Nat) (Finset.hasSingleton.{0} Nat) m) (EmptyCollection.emptyCollection.{0} (Finset.{0} Nat) (Finset.hasEmptyc.{0} Nat)))
but is expected to have type
forall {n : Nat} {m : Nat}, Eq.{1} (Finset.{0} Nat) (Finset.filter.{0} Nat (fun (_x : Nat) => Eq.{1} Nat _x m) (fun (a : Nat) => instDecidableEqNat a m) (Finset.range n)) (ite.{1} (Finset.{0} Nat) (LT.lt.{0} Nat instLTNat m n) (Nat.decLt m n) (Singleton.singleton.{0, 0} Nat (Finset.{0} Nat) (Finset.instSingletonFinset.{0} Nat) m) (EmptyCollection.emptyCollection.{0} (Finset.{0} Nat) (Finset.instEmptyCollectionFinset.{0} Nat)))
Case conversion may be inaccurate. Consider using '#align finset.range_filter_eq Finset.range_filter_eqₓ'. -/
@[simp]
theorem range_filter_eq {n m : ℕ} : (range n).filterₓ (· = m) = if m < n then {m} else ∅ :=
by
convert filter_eq (range n) m
· ext
exact comm
· simp
#align finset.range_filter_eq Finset.range_filter_eq
end Range
#print Finset.exists_mem_empty_iff /-
-- useful rules for calculations with quantifiers
theorem exists_mem_empty_iff (p : α → Prop) : (∃ x, x ∈ (∅ : Finset α) ∧ p x) ↔ False := by
simp only [not_mem_empty, false_and_iff, exists_false]
#align finset.exists_mem_empty_iff Finset.exists_mem_empty_iff
-/
#print Finset.exists_mem_insert /-
theorem exists_mem_insert [DecidableEq α] (a : α) (s : Finset α) (p : α → Prop) :
(∃ x, x ∈ insert a s ∧ p x) ↔ p a ∨ ∃ x, x ∈ s ∧ p x := by
simp only [mem_insert, or_and_right, exists_or, exists_eq_left]
#align finset.exists_mem_insert Finset.exists_mem_insert
-/
#print Finset.forall_mem_empty_iff /-
theorem forall_mem_empty_iff (p : α → Prop) : (∀ x, x ∈ (∅ : Finset α) → p x) ↔ True :=
iff_true_intro fun _ => False.elim
#align finset.forall_mem_empty_iff Finset.forall_mem_empty_iff
-/
#print Finset.forall_mem_insert /-
theorem forall_mem_insert [DecidableEq α] (a : α) (s : Finset α) (p : α → Prop) :
(∀ x, x ∈ insert a s → p x) ↔ p a ∧ ∀ x, x ∈ s → p x := by
simp only [mem_insert, or_imp, forall_and, forall_eq]
#align finset.forall_mem_insert Finset.forall_mem_insert
-/
end Finset
#print notMemRangeEquiv /-
/-- Equivalence between the set of natural numbers which are `≥ k` and `ℕ`, given by `n → n - k`. -/
def notMemRangeEquiv (k : ℕ) : { n // n ∉ range k } ≃ ℕ
where
toFun i := i.1 - k
invFun j := ⟨j + k, by simp⟩
left_inv j := by
rw [Subtype.ext_iff_val]
apply tsub_add_cancel_of_le
simpa using j.2
right_inv j := add_tsub_cancel_right _ _
#align not_mem_range_equiv notMemRangeEquiv
-/
#print coe_notMemRangeEquiv /-
@[simp]
theorem coe_notMemRangeEquiv (k : ℕ) :
(notMemRangeEquiv k : { n // n ∉ range k } → ℕ) = fun i => i - k :=
rfl
#align coe_not_mem_range_equiv coe_notMemRangeEquiv
-/
#print coe_notMemRangeEquiv_symm /-
@[simp]
theorem coe_notMemRangeEquiv_symm (k : ℕ) :
((notMemRangeEquiv k).symm : ℕ → { n // n ∉ range k }) = fun j => ⟨j + k, by simp⟩ :=
rfl
#align coe_not_mem_range_equiv_symm coe_notMemRangeEquiv_symm
-/
/-! ### dedup on list and multiset -/
namespace Multiset
variable [DecidableEq α] {s t : Multiset α}
#print Multiset.toFinset /-
/-- `to_finset s` removes duplicates from the multiset `s` to produce a finset. -/
def toFinset (s : Multiset α) : Finset α :=
⟨_, nodup_dedup s⟩
#align multiset.to_finset Multiset.toFinset
-/
#print Multiset.toFinset_val /-
@[simp]
theorem toFinset_val (s : Multiset α) : s.toFinset.1 = s.dedup :=
rfl
#align multiset.to_finset_val Multiset.toFinset_val
-/
#print Multiset.toFinset_eq /-
theorem toFinset_eq {s : Multiset α} (n : Nodup s) : Finset.mk s n = s.toFinset :=
Finset.val_inj.1 n.dedup.symm
#align multiset.to_finset_eq Multiset.toFinset_eq
-/
#print Multiset.Nodup.toFinset_inj /-
theorem Nodup.toFinset_inj {l l' : Multiset α} (hl : Nodup l) (hl' : Nodup l')
(h : l.toFinset = l'.toFinset) : l = l' := by
simpa [← to_finset_eq hl, ← to_finset_eq hl'] using h
#align multiset.nodup.to_finset_inj Multiset.Nodup.toFinset_inj
-/
#print Multiset.mem_toFinset /-
@[simp]
theorem mem_toFinset {a : α} {s : Multiset α} : a ∈ s.toFinset ↔ a ∈ s :=
mem_dedup
#align multiset.mem_to_finset Multiset.mem_toFinset
-/
#print Multiset.toFinset_zero /-
@[simp]
theorem toFinset_zero : toFinset (0 : Multiset α) = ∅ :=
rfl
#align multiset.to_finset_zero Multiset.toFinset_zero
-/
#print Multiset.toFinset_cons /-
@[simp]
theorem toFinset_cons (a : α) (s : Multiset α) : toFinset (a ::ₘ s) = insert a (toFinset s) :=
Finset.eq_of_veq dedup_cons
#align multiset.to_finset_cons Multiset.toFinset_cons
-/
#print Multiset.toFinset_singleton /-
@[simp]
theorem toFinset_singleton (a : α) : toFinset ({a} : Multiset α) = {a} := by
rw [← cons_zero, to_finset_cons, to_finset_zero, IsLawfulSingleton.insert_emptyCollection_eq]
#align multiset.to_finset_singleton Multiset.toFinset_singleton
-/
#print Multiset.toFinset_add /-
@[simp]
theorem toFinset_add (s t : Multiset α) : toFinset (s + t) = toFinset s ∪ toFinset t :=
Finset.ext <| by simp
#align multiset.to_finset_add Multiset.toFinset_add
-/
/- warning: multiset.to_finset_nsmul -> Multiset.toFinset_nsmul is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (n : Nat), (Ne.{1} Nat n (OfNat.ofNat.{0} Nat 0 (OfNat.mk.{0} Nat 0 (Zero.zero.{0} Nat Nat.hasZero)))) -> (Eq.{succ u1} (Finset.{u1} α) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) (SMul.smul.{0, u1} Nat (Multiset.{u1} α) (AddMonoid.SMul.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) n s)) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) s))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (n : Nat), (Ne.{1} Nat n (OfNat.ofNat.{0} Nat 0 (instOfNatNat 0))) -> (Eq.{succ u1} (Finset.{u1} α) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) (HSMul.hSMul.{0, u1, u1} Nat (Multiset.{u1} α) (Multiset.{u1} α) (instHSMul.{0, u1} Nat (Multiset.{u1} α) (AddMonoid.SMul.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) n s)) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) s))
Case conversion may be inaccurate. Consider using '#align multiset.to_finset_nsmul Multiset.toFinset_nsmulₓ'. -/
@[simp]
theorem toFinset_nsmul (s : Multiset α) : ∀ (n : ℕ) (hn : n ≠ 0), (n • s).toFinset = s.toFinset
| 0, h => by contradiction
| n + 1, h => by
by_cases n = 0
· rw [h, zero_add, one_nsmul]
· rw [add_nsmul, to_finset_add, one_nsmul, to_finset_nsmul n h, Finset.union_idempotent]
#align multiset.to_finset_nsmul Multiset.toFinset_nsmul
#print Multiset.toFinset_inter /-
@[simp]
theorem toFinset_inter (s t : Multiset α) : toFinset (s ∩ t) = toFinset s ∩ toFinset t :=
Finset.ext <| by simp
#align multiset.to_finset_inter Multiset.toFinset_inter
-/
#print Multiset.toFinset_union /-
@[simp]
theorem toFinset_union (s t : Multiset α) : (s ∪ t).toFinset = s.toFinset ∪ t.toFinset := by
ext <;> simp
#align multiset.to_finset_union Multiset.toFinset_union
-/
#print Multiset.toFinset_eq_empty /-
@[simp]
theorem toFinset_eq_empty {m : Multiset α} : m.toFinset = ∅ ↔ m = 0 :=
Finset.val_inj.symm.trans Multiset.dedup_eq_zero
#align multiset.to_finset_eq_empty Multiset.toFinset_eq_empty
-/
#print Multiset.toFinset_subset /-
@[simp]
theorem toFinset_subset : s.toFinset ⊆ t.toFinset ↔ s ⊆ t := by
simp only [Finset.subset_iff, Multiset.subset_iff, Multiset.mem_toFinset]
#align multiset.to_finset_subset Multiset.toFinset_subset
-/
#print Multiset.toFinset_ssubset /-
@[simp]
theorem toFinset_ssubset : s.toFinset ⊂ t.toFinset ↔ s ⊂ t :=
by
simp_rw [Finset.ssubset_def, to_finset_subset]
rfl
#align multiset.to_finset_ssubset Multiset.toFinset_ssubset
-/
#print Multiset.toFinset_dedup /-
@[simp]
theorem toFinset_dedup (m : Multiset α) : m.dedup.toFinset = m.toFinset := by
simp_rw [to_finset, dedup_idempotent]
#align multiset.to_finset_dedup Multiset.toFinset_dedup
-/
#print Multiset.toFinset_bind_dedup /-
@[simp]
theorem toFinset_bind_dedup [DecidableEq β] (m : Multiset α) (f : α → Multiset β) :
(m.dedup.bind f).toFinset = (m.bind f).toFinset := by simp_rw [to_finset, dedup_bind_dedup]
#align multiset.to_finset_bind_dedup Multiset.toFinset_bind_dedup
-/
#print Multiset.isWellFounded_ssubset /-
instance isWellFounded_ssubset : IsWellFounded (Multiset β) (· ⊂ ·) :=
Subrelation.isWellFounded (InvImage _ _) fun _ _ => by classical exact to_finset_ssubset.2
#align multiset.is_well_founded_ssubset Multiset.isWellFounded_ssubset
-/
end Multiset
namespace Finset
#print Finset.val_toFinset /-
@[simp]
theorem val_toFinset [DecidableEq α] (s : Finset α) : s.val.toFinset = s :=
by
ext
rw [Multiset.mem_toFinset, ← mem_def]
#align finset.val_to_finset Finset.val_toFinset
-/
/- warning: finset.val_le_iff_val_subset -> Finset.val_le_iff_val_subset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {a : Finset.{u1} α} {b : Multiset.{u1} α}, Iff (LE.le.{u1} (Multiset.{u1} α) (Preorder.toLE.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.partialOrder.{u1} α))) (Finset.val.{u1} α a) b) (HasSubset.Subset.{u1} (Multiset.{u1} α) (Multiset.hasSubset.{u1} α) (Finset.val.{u1} α a) b)
but is expected to have type
forall {α : Type.{u1}} {a : Finset.{u1} α} {b : Multiset.{u1} α}, Iff (LE.le.{u1} (Multiset.{u1} α) (Preorder.toLE.{u1} (Multiset.{u1} α) (PartialOrder.toPreorder.{u1} (Multiset.{u1} α) (Multiset.instPartialOrderMultiset.{u1} α))) (Finset.val.{u1} α a) b) (HasSubset.Subset.{u1} (Multiset.{u1} α) (Multiset.instHasSubsetMultiset.{u1} α) (Finset.val.{u1} α a) b)
Case conversion may be inaccurate. Consider using '#align finset.val_le_iff_val_subset Finset.val_le_iff_val_subsetₓ'. -/
theorem val_le_iff_val_subset {a : Finset α} {b : Multiset α} : a.val ≤ b ↔ a.val ⊆ b :=
Multiset.le_iff_subset a.Nodup
#align finset.val_le_iff_val_subset Finset.val_le_iff_val_subset
end Finset
namespace List
variable [DecidableEq α] {l l' : List α} {a : α}
#print List.toFinset /-
/-- `to_finset l` removes duplicates from the list `l` to produce a finset. -/
def toFinset (l : List α) : Finset α :=
Multiset.toFinset l
#align list.to_finset List.toFinset
-/
#print List.toFinset_val /-
@[simp]
theorem toFinset_val (l : List α) : l.toFinset.1 = (l.dedup : Multiset α) :=
rfl
#align list.to_finset_val List.toFinset_val
-/
#print List.toFinset_coe /-
@[simp]
theorem toFinset_coe (l : List α) : (l : Multiset α).toFinset = l.toFinset :=
rfl
#align list.to_finset_coe List.toFinset_coe
-/
#print List.toFinset_eq /-
theorem toFinset_eq (n : Nodup l) : @Finset.mk α l n = l.toFinset :=
Multiset.toFinset_eq n
#align list.to_finset_eq List.toFinset_eq
-/
#print List.mem_toFinset /-
@[simp]
theorem mem_toFinset : a ∈ l.toFinset ↔ a ∈ l :=
mem_dedup
#align list.mem_to_finset List.mem_toFinset
-/
#print List.coe_toFinset /-
@[simp, norm_cast]
theorem coe_toFinset (l : List α) : (l.toFinset : Set α) = { a | a ∈ l } :=
Set.ext fun _ => List.mem_toFinset
#align list.coe_to_finset List.coe_toFinset
-/
#print List.toFinset_nil /-
@[simp]
theorem toFinset_nil : toFinset (@nil α) = ∅ :=
rfl
#align list.to_finset_nil List.toFinset_nil
-/
#print List.toFinset_cons /-
@[simp]
theorem toFinset_cons : toFinset (a :: l) = insert a (toFinset l) :=
Finset.eq_of_veq <| by by_cases h : a ∈ l <;> simp [Finset.insert_val', Multiset.dedup_cons, h]
#align list.to_finset_cons List.toFinset_cons
-/
#print List.toFinset_surj_on /-
theorem toFinset_surj_on : Set.SurjOn toFinset { l : List α | l.Nodup } Set.univ :=
by
rintro ⟨⟨l⟩, hl⟩ _
exact ⟨l, hl, (to_finset_eq hl).symm⟩
#align list.to_finset_surj_on List.toFinset_surj_on
-/
#print List.toFinset_surjective /-
theorem toFinset_surjective : Surjective (toFinset : List α → Finset α) := fun s =>
let ⟨l, _, hls⟩ := toFinset_surj_on (Set.mem_univ s)
⟨l, hls⟩
#align list.to_finset_surjective List.toFinset_surjective
-/
#print List.toFinset_eq_iff_perm_dedup /-
theorem toFinset_eq_iff_perm_dedup : l.toFinset = l'.toFinset ↔ l.dedup ~ l'.dedup := by
simp [Finset.ext_iff, perm_ext (nodup_dedup _) (nodup_dedup _)]
#align list.to_finset_eq_iff_perm_dedup List.toFinset_eq_iff_perm_dedup
-/
#print List.toFinset.ext_iff /-
theorem toFinset.ext_iff {a b : List α} : a.toFinset = b.toFinset ↔ ∀ x, x ∈ a ↔ x ∈ b := by
simp only [Finset.ext_iff, mem_to_finset]
#align list.to_finset.ext_iff List.toFinset.ext_iff
-/
#print List.toFinset.ext /-
theorem toFinset.ext : (∀ x, x ∈ l ↔ x ∈ l') → l.toFinset = l'.toFinset :=
toFinset.ext_iff.mpr
#align list.to_finset.ext List.toFinset.ext
-/
#print List.toFinset_eq_of_perm /-
theorem toFinset_eq_of_perm (l l' : List α) (h : l ~ l') : l.toFinset = l'.toFinset :=
toFinset_eq_iff_perm_dedup.mpr h.dedup
#align list.to_finset_eq_of_perm List.toFinset_eq_of_perm
-/
#print List.perm_of_nodup_nodup_toFinset_eq /-
theorem perm_of_nodup_nodup_toFinset_eq (hl : Nodup l) (hl' : Nodup l')
(h : l.toFinset = l'.toFinset) : l ~ l' :=
by
rw [← Multiset.coe_eq_coe]
exact Multiset.Nodup.toFinset_inj hl hl' h
#align list.perm_of_nodup_nodup_to_finset_eq List.perm_of_nodup_nodup_toFinset_eq
-/
#print List.toFinset_append /-
@[simp]
theorem toFinset_append : toFinset (l ++ l') = l.toFinset ∪ l'.toFinset :=
by
induction' l with hd tl hl
· simp
· simp [hl]
#align list.to_finset_append List.toFinset_append
-/
#print List.toFinset_reverse /-
@[simp]
theorem toFinset_reverse {l : List α} : toFinset l.reverse = l.toFinset :=
toFinset_eq_of_perm _ _ (reverse_perm l)
#align list.to_finset_reverse List.toFinset_reverse
-/
#print List.toFinset_replicate_of_ne_zero /-
theorem toFinset_replicate_of_ne_zero {n : ℕ} (hn : n ≠ 0) : (List.replicate n a).toFinset = {a} :=
by
ext x
simp [hn, List.mem_replicate]
#align list.to_finset_replicate_of_ne_zero List.toFinset_replicate_of_ne_zero
-/
#print List.toFinset_union /-
@[simp]
theorem toFinset_union (l l' : List α) : (l ∪ l').toFinset = l.toFinset ∪ l'.toFinset :=
by
ext
simp
#align list.to_finset_union List.toFinset_union
-/
#print List.toFinset_inter /-
@[simp]
theorem toFinset_inter (l l' : List α) : (l ∩ l').toFinset = l.toFinset ∩ l'.toFinset :=
by
ext
simp
#align list.to_finset_inter List.toFinset_inter
-/
#print List.toFinset_eq_empty_iff /-
@[simp]
theorem toFinset_eq_empty_iff (l : List α) : l.toFinset = ∅ ↔ l = nil := by cases l <;> simp
#align list.to_finset_eq_empty_iff List.toFinset_eq_empty_iff
-/
end List
namespace Finset
section ToList
#print Finset.toList /-
/-- Produce a list of the elements in the finite set using choice. -/
noncomputable def toList (s : Finset α) : List α :=
s.1.toList
#align finset.to_list Finset.toList
-/
#print Finset.nodup_toList /-
theorem nodup_toList (s : Finset α) : s.toList.Nodup :=
by
rw [to_list, ← Multiset.coe_nodup, Multiset.coe_toList]
exact s.nodup
#align finset.nodup_to_list Finset.nodup_toList
-/
#print Finset.mem_toList /-
@[simp]
theorem mem_toList {a : α} {s : Finset α} : a ∈ s.toList ↔ a ∈ s :=
mem_toList
#align finset.mem_to_list Finset.mem_toList
-/
#print Finset.toList_eq_nil /-
@[simp]
theorem toList_eq_nil {s : Finset α} : s.toList = [] ↔ s = ∅ :=
toList_eq_nil.trans val_eq_zero
#align finset.to_list_eq_nil Finset.toList_eq_nil
-/
#print Finset.empty_toList /-
@[simp]
theorem empty_toList {s : Finset α} : s.toList.Empty ↔ s = ∅ :=
List.isEmpty_iff_eq_nil.trans toList_eq_nil
#align finset.empty_to_list Finset.empty_toList
-/
#print Finset.toList_empty /-
@[simp]
theorem toList_empty : (∅ : Finset α).toList = [] :=
toList_eq_nil.mpr rfl
#align finset.to_list_empty Finset.toList_empty
-/
#print Finset.Nonempty.toList_ne_nil /-
theorem Nonempty.toList_ne_nil {s : Finset α} (hs : s.Nonempty) : s.toList ≠ [] :=
mt toList_eq_nil.mp hs.ne_empty
#align finset.nonempty.to_list_ne_nil Finset.Nonempty.toList_ne_nil
-/
#print Finset.Nonempty.not_empty_toList /-
theorem Nonempty.not_empty_toList {s : Finset α} (hs : s.Nonempty) : ¬s.toList.Empty :=
mt empty_toList.mp hs.ne_empty
#align finset.nonempty.not_empty_to_list Finset.Nonempty.not_empty_toList
-/
#print Finset.coe_toList /-
@[simp, norm_cast]
theorem coe_toList (s : Finset α) : (s.toList : Multiset α) = s.val :=
s.val.coe_toList
#align finset.coe_to_list Finset.coe_toList
-/
#print Finset.toList_toFinset /-
@[simp]
theorem toList_toFinset [DecidableEq α] (s : Finset α) : s.toList.toFinset = s :=
by
ext
simp
#align finset.to_list_to_finset Finset.toList_toFinset
-/
#print Finset.toList_eq_singleton_iff /-
@[simp]
theorem toList_eq_singleton_iff {a : α} {s : Finset α} : s.toList = [a] ↔ s = {a} := by
rw [to_list, to_list_eq_singleton_iff, val_eq_singleton_iff]
#align finset.to_list_eq_singleton_iff Finset.toList_eq_singleton_iff
-/
#print Finset.toList_singleton /-
@[simp]
theorem toList_singleton : ∀ a, ({a} : Finset α).toList = [a] :=
toList_singleton
#align finset.to_list_singleton Finset.toList_singleton
-/
#print Finset.exists_list_nodup_eq /-
theorem exists_list_nodup_eq [DecidableEq α] (s : Finset α) :
∃ l : List α, l.Nodup ∧ l.toFinset = s :=
⟨s.toList, s.nodup_toList, s.toList_toFinset⟩
#align finset.exists_list_nodup_eq Finset.exists_list_nodup_eq
-/
#print Finset.toList_cons /-
theorem toList_cons {a : α} {s : Finset α} (h : a ∉ s) : (cons a s h).toList ~ a :: s.toList :=
(List.perm_ext (nodup_toList _) (by simp [h, nodup_to_list s])).2 fun x => by
simp only [List.mem_cons, Finset.mem_toList, Finset.mem_cons]
#align finset.to_list_cons Finset.toList_cons
-/
#print Finset.toList_insert /-
theorem toList_insert [DecidableEq α] {a : α} {s : Finset α} (h : a ∉ s) :
(insert a s).toList ~ a :: s.toList :=
cons_eq_insert _ _ h ▸ toList_cons _
#align finset.to_list_insert Finset.toList_insert
-/
end ToList
/-!
### disj_Union
This section is about the bounded union of a disjoint indexed family `t : α → finset β` of finite
sets over a finite set `s : finset α`. In most cases `finset.bUnion` should be preferred.
-/
section DisjUnion
variable {s s₁ s₂ : Finset α} {t t₁ t₂ : α → Finset β}
/-- `disj_Union s f h` is the set such that `a ∈ disj_Union s f` iff `a ∈ f i` for some `i ∈ s`.
It is the same as `s.bUnion f`, but it does not require decidable equality on the type. The
hypothesis ensures that the sets are disjoint. -/
def disjUnion (s : Finset α) (t : α → Finset β) (hf : (s : Set α).PairwiseDisjoint t) : Finset β :=
⟨s.val.bind (Finset.val ∘ t),
Multiset.nodup_bind.mpr
⟨fun a ha => (t a).Nodup,
s.Nodup.Pairwise fun a ha b hb hab => disjoint_val.2 <| hf ha hb hab⟩⟩
#align finset.disj_Union Finset.disjUnionₓ
/- warning: finset.disj_Union_val -> Finset.disjUnionᵢ_val is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} (s : Finset.{u1} α) (t : α -> (Finset.{u2} β)) (h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) t), Eq.{succ u2} (Multiset.{u2} β) (Finset.val.{u2} β (Finset.disjUnionₓ.{u1, u2} α β s t h)) (Multiset.bind.{u1, u2} α β (Finset.val.{u1} α s) (fun (a : α) => Finset.val.{u2} β (t a)))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} (s : Finset.{u2} α) (t : α -> (Finset.{u1} β)) (h : Set.PairwiseDisjoint.{u1, u2} (Finset.{u1} β) α (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (Finset.toSet.{u2} α s) t), Eq.{succ u1} (Multiset.{u1} β) (Finset.val.{u1} β (Finset.disjUnionᵢ.{u2, u1} α β s t h)) (Multiset.bind.{u2, u1} α β (Finset.val.{u2} α s) (fun (a : α) => Finset.val.{u1} β (t a)))
Case conversion may be inaccurate. Consider using '#align finset.disj_Union_val Finset.disjUnionᵢ_valₓ'. -/
@[simp]
theorem disjUnionᵢ_val (s : Finset α) (t : α → Finset β) (h) :
(s.disjUnionₓ t h).1 = s.1.bind fun a => (t a).1 :=
rfl
#align finset.disj_Union_val Finset.disjUnionᵢ_val
#print Finset.disjUnionᵢ_empty /-
@[simp]
theorem disjUnionᵢ_empty (t : α → Finset β) : disjUnion ∅ t (by simp) = ∅ :=
rfl
#align finset.disj_Union_empty Finset.disjUnionᵢ_empty
-/
/- warning: finset.mem_disj_Union -> Finset.mem_disjUnionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} {b : β} {h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) t}, Iff (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s t h)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (t a))))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} {b : β} {h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) (Finset.toSet.{u1} α s) t}, Iff (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u1, u2} α β s t h)) (Exists.{succ u1} α (fun (a : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (t a))))
Case conversion may be inaccurate. Consider using '#align finset.mem_disj_Union Finset.mem_disjUnionᵢₓ'. -/
@[simp]
theorem mem_disjUnionᵢ {b : β} {h} : b ∈ s.disjUnionₓ t h ↔ ∃ a ∈ s, b ∈ t a := by
simp only [mem_def, disj_Union_val, mem_bind, exists_prop]
#align finset.mem_disj_Union Finset.mem_disjUnionᵢ
/- warning: finset.coe_disj_Union -> Finset.coe_disjUnionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} {h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) t}, Eq.{succ u2} (Set.{u2} β) ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (Finset.disjUnionₓ.{u1, u2} α β s t h)) (Set.unionᵢ.{u2, succ u1} β α (fun (x : α) => Set.unionᵢ.{u2, 0} β (Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (fun (H : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => (fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (t x))))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} {h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) (Finset.toSet.{u1} α s) t}, Eq.{succ u2} (Set.{u2} β) (Finset.toSet.{u2} β (Finset.disjUnionᵢ.{u1, u2} α β s t h)) (Set.unionᵢ.{u2, succ u1} β α (fun (x : α) => Set.unionᵢ.{u2, 0} β (Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Finset.toSet.{u1} α s)) (fun (H : Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Finset.toSet.{u1} α s)) => Finset.toSet.{u2} β (t x))))
Case conversion may be inaccurate. Consider using '#align finset.coe_disj_Union Finset.coe_disjUnionᵢₓ'. -/
@[simp, norm_cast]
theorem coe_disjUnionᵢ {h} : (s.disjUnionₓ t h : Set β) = ⋃ x ∈ (s : Set α), t x := by
simp only [Set.ext_iff, mem_disj_Union, Set.mem_unionᵢ, iff_self_iff, mem_coe, imp_true_iff]
#align finset.coe_disj_Union Finset.coe_disjUnionᵢ
/- warning: finset.disj_Union_cons -> Finset.disjUnionᵢ_cons is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} (a : α) (s : Finset.{u1} α) (ha : Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s)) (f : α -> (Finset.{u2} β)) (H : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (Finset.cons.{u1} α a s ha)) f), Eq.{succ u2} (Finset.{u2} β) (Finset.disjUnionₓ.{u1, u2} α β (Finset.cons.{u1} α a s ha) f H) (Finset.disjUnion.{u2} β (f a) (Finset.disjUnionₓ.{u1, u2} α β s f (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc))) (Iff.mpr (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) (f a) (Finset.disjUnionₓ.{u1, u2} α β s f (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc)))) (forall {{a_1 : β}}, (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f a)) -> (Not (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (Finset.disjUnionₓ.{u1, u2} α β s f (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc)))))) (Finset.disjoint_left.{u2} β (f a) (Finset.disjUnionₓ.{u1, u2} α β s f (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc)))) (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)) (h : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc)))) => (fun (_a : Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) => Exists.dcases_on.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) (fun (_a : Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) => False) _a (fun (w : α) (h_1 : Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f w))) => Exists.dcases_on.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f w)) (fun (h_1 : Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f w))) => False) h_1 (fun (h_1_w : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) w s) (h_1_h : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f w)) => id.{0} False (Iff.mp (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) (f a) (f w)) (forall {{a_1 : β}}, (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f a)) -> (Not (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f w)))) (Finset.disjoint_left.{u2} β (f a) (f w)) (H a (Finset.mem_cons_self.{u1} α a s ha) w (Multiset.mem_cons_of_mem.{u1} α w a (Finset.val.{u1} α s) h_1_w) (Ne.symm.{succ u1} α w a (ne_of_mem_of_not_mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) s w a h_1_w ha))) b hb h_1_h)))) (Iff.mp (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc)))) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b (fun (b : α) (hb : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) (c : α) (hc : Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) c ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s)) => H b (Multiset.mem_cons_of_mem.{u1} α b a (Finset.val.{u1} α s) hb) c (Multiset.mem_cons_of_mem.{u1} α c a (Finset.val.{u1} α s) hc))) h))))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} (a : α) (s : Finset.{u2} α) (ha : Not (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) a s)) (f : α -> (Finset.{u1} β)) (H : Set.PairwiseDisjoint.{u1, u2} (Finset.{u1} β) α (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (Finset.toSet.{u2} α (Finset.cons.{u2} α a s ha)) f), Eq.{succ u1} (Finset.{u1} β) (Finset.disjUnionᵢ.{u2, u1} α β (Finset.cons.{u2} α a s ha) f H) (Finset.disjUnion.{u1} β (f a) (Finset.disjUnionᵢ.{u2, u1} α β s f (fun (b : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) b (Finset.toSet.{u2} α s)) (c : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) c (Finset.toSet.{u2} α s)) => H b (Multiset.mem_cons_of_mem.{u2} α b a (Finset.val.{u2} α s) hb) c (Multiset.mem_cons_of_mem.{u2} α c a (Finset.val.{u2} α s) hc))) (Iff.mpr (Disjoint.{u1} (Finset.{u1} β) (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (f a) (Finset.disjUnionᵢ.{u2, u1} α β s f (fun (b : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) b (Finset.toSet.{u2} α s)) (c : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) c (Finset.toSet.{u2} α s)) => H b (Multiset.mem_cons_of_mem.{u2} α b a (Finset.val.{u2} α s) hb) c (Multiset.mem_cons_of_mem.{u2} α c a (Finset.val.{u2} α s) hc)))) (forall {{a_1 : β}}, (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) a_1 (f a)) -> (Not (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) a_1 (Finset.disjUnionᵢ.{u2, u1} α β s f (fun (b : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) b (Finset.toSet.{u2} α s)) (c : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) c (Finset.toSet.{u2} α s)) => H b (Multiset.mem_cons_of_mem.{u2} α b a (Finset.val.{u2} α s) hb) c (Multiset.mem_cons_of_mem.{u2} α c a (Finset.val.{u2} α s) hc)))))) (Finset.disjoint_left.{u1} β (f a) (Finset.disjUnionᵢ.{u2, u1} α β s f (fun (b : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) b (Finset.toSet.{u2} α s)) (c : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) c (Finset.toSet.{u2} α s)) => H b (Multiset.mem_cons_of_mem.{u2} α b a (Finset.val.{u2} α s) hb) c (Multiset.mem_cons_of_mem.{u2} α c a (Finset.val.{u2} α s) hc)))) (fun (b : β) (hb : Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) b (f a)) (h : Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) b (Finset.disjUnionᵢ.{u2, u1} α β s f (fun (b : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) b (Finset.toSet.{u2} α s)) (c : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) c (Finset.toSet.{u2} α s)) => H b (Multiset.mem_cons_of_mem.{u2} α b a (Finset.val.{u2} α s) hb) c (Multiset.mem_cons_of_mem.{u2} α c a (Finset.val.{u2} α s) hc)))) => Finset.disjUnionᵢ_cons.match_1.{u2, u1} α β s f b (fun (x._@.Mathlib.Data.Finset.Basic._hyg.32716 : Exists.{succ u2} α (fun (a : α) => And (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) a s) (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) b (f a)))) => False) (Iff.mp (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) b (Finset.disjUnionᵢ.{u2, u1} α β s f (fun (x._@.Mathlib.Data.Finset.Basic._hyg.32681 : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x._@.Mathlib.Data.Finset.Basic._hyg.32681 (Finset.toSet.{u2} α s)) (x._@.Mathlib.Data.Finset.Basic._hyg.32684 : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x._@.Mathlib.Data.Finset.Basic._hyg.32684 (Finset.toSet.{u2} α s)) => H x._@.Mathlib.Data.Finset.Basic._hyg.32681 (Multiset.mem_cons_of_mem.{u2} α x._@.Mathlib.Data.Finset.Basic._hyg.32681 a (Finset.val.{u2} α s) hb) x._@.Mathlib.Data.Finset.Basic._hyg.32684 (Multiset.mem_cons_of_mem.{u2} α x._@.Mathlib.Data.Finset.Basic._hyg.32684 a (Finset.val.{u2} α s) hc)))) (Exists.{succ u2} α (fun (a : α) => And (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) a s) (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u2, u1} α β s f b (fun (x._@.Mathlib.Data.Finset.Basic._hyg.32681 : α) (hb : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x._@.Mathlib.Data.Finset.Basic._hyg.32681 (Finset.toSet.{u2} α s)) (x._@.Mathlib.Data.Finset.Basic._hyg.32684 : α) (hc : Membership.mem.{u2, u2} α (Set.{u2} α) (Set.instMembershipSet.{u2} α) x._@.Mathlib.Data.Finset.Basic._hyg.32684 (Finset.toSet.{u2} α s)) => H x._@.Mathlib.Data.Finset.Basic._hyg.32681 (Multiset.mem_cons_of_mem.{u2} α x._@.Mathlib.Data.Finset.Basic._hyg.32681 a (Finset.val.{u2} α s) hb) x._@.Mathlib.Data.Finset.Basic._hyg.32684 (Multiset.mem_cons_of_mem.{u2} α x._@.Mathlib.Data.Finset.Basic._hyg.32684 a (Finset.val.{u2} α s) hc))) h) (fun (w._@.Mathlib.Data.Finset.Basic._hyg.32729 : α) (hc : Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) w._@.Mathlib.Data.Finset.Basic._hyg.32729 s) (h : Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) b (f w._@.Mathlib.Data.Finset.Basic._hyg.32729)) => Iff.mp (Disjoint.{u1} (Finset.{u1} β) (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (f a) (f w._@.Mathlib.Data.Finset.Basic._hyg.32729)) (forall {{a_1 : β}}, (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) a_1 (f a)) -> (Not (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) a_1 (f w._@.Mathlib.Data.Finset.Basic._hyg.32729)))) (Finset.disjoint_left.{u1} β (f a) (f w._@.Mathlib.Data.Finset.Basic._hyg.32729)) (H a (Finset.mem_cons_self.{u2} α a s ha) w._@.Mathlib.Data.Finset.Basic._hyg.32729 (Multiset.mem_cons_of_mem.{u2} α w._@.Mathlib.Data.Finset.Basic._hyg.32729 a (Finset.val.{u2} α s) hc) (Ne.symm.{succ u2} α w._@.Mathlib.Data.Finset.Basic._hyg.32729 a (ne_of_mem_of_not_mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) s w._@.Mathlib.Data.Finset.Basic._hyg.32729 a hc ha))) b hb h))))
Case conversion may be inaccurate. Consider using '#align finset.disj_Union_cons Finset.disjUnionᵢ_consₓ'. -/
@[simp]
theorem disjUnionᵢ_cons (a : α) (s : Finset α) (ha : a ∉ s) (f : α → Finset β) (H) :
disjUnion (cons a s ha) f H =
(f a).disjUnion (s.disjUnionₓ f fun b hb c hc => H (mem_cons_of_mem hb) (mem_cons_of_mem hc))
(disjoint_left.mpr fun b hb h =>
let ⟨c, hc, h⟩ := mem_disjUnionᵢ.mp h
disjoint_left.mp
(H (mem_cons_self a s) (mem_cons_of_mem hc) (ne_of_mem_of_not_mem hc ha).symm) hb h) :=
eq_of_veq <| Multiset.cons_bind _ _ _
#align finset.disj_Union_cons Finset.disjUnionᵢ_cons
/- warning: finset.singleton_disj_Union -> Finset.singleton_disjUnionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {t : α -> (Finset.{u2} β)} (a : α) {h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a)) t}, Eq.{succ u2} (Finset.{u2} β) (Finset.disjUnionₓ.{u1, u2} α β (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.hasSingleton.{u1} α) a) t h) (t a)
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} {t : α -> (Finset.{u2} β)} (a : α) {h : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) (Finset.toSet.{u1} α (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a)) t}, Eq.{succ u2} (Finset.{u2} β) (Finset.disjUnionᵢ.{u1, u2} α β (Singleton.singleton.{u1, u1} α (Finset.{u1} α) (Finset.instSingletonFinset.{u1} α) a) t h) (t a)
Case conversion may be inaccurate. Consider using '#align finset.singleton_disj_Union Finset.singleton_disjUnionᵢₓ'. -/
@[simp]
theorem singleton_disjUnionᵢ (a : α) {h} : Finset.disjUnion {a} t h = t a :=
eq_of_veq <| Multiset.singleton_bind _ _
#align finset.singleton_disj_Union Finset.singleton_disjUnionᵢ
/- warning: finset.disj_Union_disj_Union -> Finset.disjUnionᵢ_disjUnionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} (s : Finset.{u1} α) (f : α -> (Finset.{u2} β)) (g : β -> (Finset.{u3} γ)) (h1 : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) f) (h2 : Set.PairwiseDisjoint.{u3, u2} (Finset.{u3} γ) β (Finset.partialOrder.{u3} γ) (Finset.orderBot.{u3} γ) ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (Finset.disjUnionₓ.{u1, u2} α β s f h1)) g), Eq.{succ u3} (Finset.{u3} γ) (Finset.disjUnionₓ.{u2, u3} β γ (Finset.disjUnionₓ.{u1, u2} α β s f h1) g h2) (Finset.disjUnionₓ.{u1, u3} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) γ (Finset.attach.{u1} α s) (fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) (fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) (ha : Membership.Mem.{u1, u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.hasMem.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) a ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Finset.Set.hasCoeT.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) (Finset.attach.{u1} α s))) (b : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) (hb : Membership.Mem.{u1, u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.hasMem.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) b ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Set.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))) (Finset.Set.hasCoeT.{u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) (Finset.attach.{u1} α s))) (hab : Ne.{succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) a b) => Iff.mpr (Disjoint.{u3} (Finset.{u3} γ) (Finset.partialOrder.{u3} γ) (Finset.orderBot.{u3} γ) ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) a) ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) b)) (forall {{a_1 : γ}}, (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) a_1 ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) a)) -> (Not (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) a_1 ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) b)))) (Finset.disjoint_left.{u3} γ ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) a) ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) b)) (fun (x : γ) (hxa : Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) a)) (hxb : Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x ((fun (a : Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) => Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) b)) => Exists.dcases_on.{succ u2} β (fun (a_1 : β) => Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g a_1))) (fun (_fresh.631.39471 : Exists.{succ u2} β (fun (a_1 : β) => Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g a_1)))) => False) (Iff.mp (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc)))))) (Exists.{succ u2} β (fun (a_1 : β) => Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g a_1)))) (Finset.mem_disjUnionᵢ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) g x (fun (b : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) => h2 b (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hc))))) hxa) (fun (xa : β) (h : Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xa))) => Exists.dcases_on.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xa)) (fun (h : Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xa))) => False) h (fun (hfa : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (hga : Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xa)) => Exists.dcases_on.{succ u2} β (fun (a : β) => Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g a))) (fun (_fresh.631.39588 : Exists.{succ u2} β (fun (a : β) => Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g a)))) => False) (Iff.mp (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (Finset.disjUnionₓ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b)) g (fun (b_1 : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b_1 ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b)))) => h2 b_1 (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b_1 h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) b) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) b) hc)))))) (Exists.{succ u2} β (fun (a : β) => Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g a)))) (Finset.mem_disjUnionᵢ.{u2, u3} β γ (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b)) g x (fun (b_1 : β) (hb : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) b_1 ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b)))) (c : β) (hc : Membership.Mem.{u2, u2} β (Set.{u2} β) (Set.hasMem.{u2} β) c ((fun (a : Type.{u2}) (b : Type.{u2}) [self : HasLiftT.{succ u2, succ u2} a b] => self.0) (Finset.{u2} β) (Set.{u2} β) (HasLiftT.mk.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (CoeTCₓ.coe.{succ u2, succ u2} (Finset.{u2} β) (Set.{u2} β) (Finset.Set.hasCoeT.{u2} β))) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b)))) => h2 b_1 (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f b_1 h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) b) hb))) c (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f c h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) c (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) b) hc))))) hxb) (fun (xb : β) (h : Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xb))) => Exists.dcases_on.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xb)) (fun (h : Exists.{0} (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (fun (H : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) => Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xb))) => False) h (fun (hfb : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (hgb : Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xb)) => Iff.mp (Disjoint.{u3} (Finset.{u3} γ) (Finset.partialOrder.{u3} γ) (Finset.orderBot.{u3} γ) (g xa) (g xb)) (forall {{a : γ}}, (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) a (g xa)) -> (Not (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) a (g xb)))) (Finset.disjoint_left.{u3} γ (g xa) (g xb)) (h2 xa (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f xa h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) hfa))) xb (Iff.mpr (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (Finset.disjUnionₓ.{u1, u2} α β s f h1)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f a)))) (Finset.mem_disjUnionᵢ.{u1, u2} α β s f xb h1) (Exists.intro.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f a))) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) (Exists.intro.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) b) hfb))) (id.{0} (Ne.{succ u2} β xa xb) (fun (ᾰ : Eq.{succ u2} β xa xb) => Eq.ndrec.{0, succ u2} β xa (fun (xb : β) => (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xb (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) -> (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xb)) -> False) (fun (hfb : Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) xa (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (hgb : Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x (g xa)) => Iff.mp (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (forall {{a_1 : β}}, (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a))) -> (Not (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) a_1 (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))))) (Finset.disjoint_left.{u2} β (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)) (f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b))) (h1 ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) a) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) b) (Subtype.prop.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) b) (Function.Injective.ne.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (HasLiftT.mk.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (CoeTCₓ.coe.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeBase.{succ u1, succ u1} (Subtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)))))) (Subtype.coe_injective.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s)) a b hab)) xa hfa hfb) xb ᾰ hfb hgb))) x hga hgb)))))))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Type.{u1}} (s : Finset.{u3} α) (f : α -> (Finset.{u2} β)) (g : β -> (Finset.{u1} γ)) (h1 : Set.PairwiseDisjoint.{u2, u3} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) (Finset.toSet.{u3} α s) f) (h2 : Set.PairwiseDisjoint.{u1, u2} (Finset.{u1} γ) β (Finset.partialOrder.{u1} γ) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} γ) (Finset.toSet.{u2} β (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) g), Eq.{succ u1} (Finset.{u1} γ) (Finset.disjUnionᵢ.{u2, u1} β γ (Finset.disjUnionᵢ.{u3, u2} α β s f h1) g h2) (Finset.disjUnionᵢ.{u3, u1} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) γ (Finset.attach.{u3} α s) (fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) (fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) (ha : Membership.mem.{u3, u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) (Set.{u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s))) (Set.instMembershipSet.{u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s))) a (Finset.toSet.{u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) (Finset.attach.{u3} α s))) (b : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) (hb : Membership.mem.{u3, u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) (Set.{u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s))) (Set.instMembershipSet.{u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s))) b (Finset.toSet.{u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) (Finset.attach.{u3} α s))) (hab : Ne.{succ u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) a b) => Iff.mpr (Disjoint.{u1} (Finset.{u1} γ) (Finset.partialOrder.{u1} γ) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} γ) ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) a) ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) b)) (forall {{a_1 : γ}}, (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) a_1 ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) a)) -> (Not (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) a_1 ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) b)))) (Finset.disjoint_left.{u1} γ ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) a) ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) b)) (fun (x : γ) (hxa : Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) a)) (hxb : Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x ((fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) b)) => Exists.casesOn.{succ u2} β (fun (a_1 : β) => And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g a_1))) (fun (_fresh.631.39471 : Exists.{succ u2} β (fun (a_1 : β) => And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g a_1)))) => False) (Iff.mp (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc)))))) (Exists.{succ u2} β (fun (a_1 : β) => And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g a_1)))) (Finset.mem_disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) g x (fun (b : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)))) => h2 b (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hc))))) hxa) (fun (xa : β) (h : And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xa))) => And.casesOn.{0} (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xa)) (fun (h : And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xa))) => False) h (fun (hfa : Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (hga : Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xa)) => Exists.casesOn.{succ u2} β (fun (a : β) => And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g a))) (fun (_fresh.631.39588 : Exists.{succ u2} β (fun (a : β) => And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g a)))) => False) (Iff.mp (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (Finset.disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b)) g (fun (b_1 : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b_1 (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b)))) => h2 b_1 (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b_1 h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) hc)))))) (Exists.{succ u2} β (fun (a : β) => And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g a)))) (Finset.mem_disjUnionᵢ.{u2, u1} β γ (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b)) g x (fun (b_1 : β) (hb : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) b_1 (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b)))) (c : β) (hc : Membership.mem.{u2, u2} β (Set.{u2} β) (Set.instMembershipSet.{u2} β) c (Finset.toSet.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b)))) => h2 b_1 (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f b_1 h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) hb))) c (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f c h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) c (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) hc))))) hxb) (fun (xb : β) (h : And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xb))) => And.casesOn.{0} (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xb)) (fun (h : And (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xb))) => False) h (fun (hfb : Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (hgb : Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xb)) => Iff.mp (Disjoint.{u1} (Finset.{u1} γ) (Finset.partialOrder.{u1} γ) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} γ) (g xa) (g xb)) (forall {{a : γ}}, (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) a (g xa)) -> (Not (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) a (g xb)))) (Finset.disjoint_left.{u1} γ (g xa) (g xb)) (h2 xa (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f xa h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) hfa))) xb (Iff.mpr (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (Finset.disjUnionᵢ.{u3, u2} α β s f h1)) (Exists.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f a)))) (Finset.mem_disjUnionᵢ.{u3, u2} α β s f xb h1) (Exists.intro.{succ u3} α (fun (a : α) => And (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f a))) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (And.intro (Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) hfb))) (fun (a._@.Init.Prelude.139.Mathlib.Data.Finset.Basic._hyg.32958 : Eq.{succ u2} β xa xb) => Eq.ndrec.{0, succ u2} β xa (fun (xb : β) => (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xb (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) -> (Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xb)) -> False) (fun (hfb : Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) xa (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (hgb : Membership.mem.{u1, u1} γ (Finset.{u1} γ) (Finset.instMembershipFinset.{u1} γ) x (g xa)) => Iff.mp (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (forall {{a_1 : β}}, (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a))) -> (Not (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) a_1 (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))))) (Finset.disjoint_left.{u2} β (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a)) (f (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b))) (h1 (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (Subtype.prop.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) b) (Function.Injective.ne.{succ u3, succ u3} (Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) α (fun (a : Subtype.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) => Subtype.val.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s) a) (Subtype.coe_injective.{succ u3} α (fun (x : α) => Membership.mem.{u3, u3} α (Finset.{u3} α) (Finset.instMembershipFinset.{u3} α) x s)) a b hab)) xa hfa hfb) xb a._@.Init.Prelude.139.Mathlib.Data.Finset.Basic._hyg.32958 hfb hgb)) x hga hgb)))))))
Case conversion may be inaccurate. Consider using '#align finset.disj_Union_disj_Union Finset.disjUnionᵢ_disjUnionᵢₓ'. -/
theorem disjUnionᵢ_disjUnionᵢ (s : Finset α) (f : α → Finset β) (g : β → Finset γ) (h1 h2) :
(s.disjUnionₓ f h1).disjUnionₓ g h2 =
s.attach.disjUnionₓ
(fun a =>
(f a).disjUnionₓ g fun b hb c hc =>
h2 (mem_disjUnionᵢ.mpr ⟨_, a.Prop, hb⟩) (mem_disjUnionᵢ.mpr ⟨_, a.Prop, hc⟩))
fun a ha b hb hab =>
disjoint_left.mpr fun x hxa hxb =>
by
obtain ⟨xa, hfa, hga⟩ := mem_disj_Union.mp hxa
obtain ⟨xb, hfb, hgb⟩ := mem_disj_Union.mp hxb
refine'
disjoint_left.mp
(h2 (mem_disj_Union.mpr ⟨_, a.prop, hfa⟩) (mem_disj_Union.mpr ⟨_, b.prop, hfb⟩) _) hga
hgb
rintro rfl
exact disjoint_left.mp (h1 a.prop b.prop <| subtype.coe_injective.ne hab) hfa hfb :=
eq_of_veq <| Multiset.bind_assoc.trans (Multiset.attach_bind_coe _ _).symm
#align finset.disj_Union_disj_Union Finset.disjUnionᵢ_disjUnionᵢ
#print Finset.disjUnionᵢ_filter_eq_of_maps_to /-
theorem disjUnionᵢ_filter_eq_of_maps_to [DecidableEq β] {s : Finset α} {t : Finset β} {f : α → β}
(h : ∀ x ∈ s, f x ∈ t) :
(t.disjUnionₓ (fun a => s.filterₓ fun c => f c = a) fun x' hx y' hy hne =>
disjoint_filter_filter' _ _
(by
simp_rw [Pi.disjoint_iff, Prop.disjoint_iff]
rintro i ⟨rfl, rfl⟩
exact hne rfl)) =
s :=
ext fun b => by simpa using h b
#align finset.disj_Union_filter_eq_of_maps_to Finset.disjUnionᵢ_filter_eq_of_maps_to
-/
end DisjUnion
section BUnion
/-!
### bUnion
This section is about the bounded union of an indexed family `t : α → finset β` of finite sets
over a finite set `s : finset α`.
-/
variable [DecidableEq β] {s s₁ s₂ : Finset α} {t t₁ t₂ : α → Finset β}
#print Finset.bunionᵢ /-
/-- `bUnion s t` is the union of `t x` over `x ∈ s`.
(This was formerly `bind` due to the monad structure on types with `decidable_eq`.) -/
protected def bunionᵢ (s : Finset α) (t : α → Finset β) : Finset β :=
(s.1.bind fun a => (t a).1).toFinset
#align finset.bUnion Finset.bunionᵢ
-/
/- warning: finset.bUnion_val -> Finset.bunionᵢ_val is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u1} α) (t : α -> (Finset.{u2} β)), Eq.{succ u2} (Multiset.{u2} β) (Finset.val.{u2} β (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t)) (Multiset.dedup.{u2} β (fun (a : β) (b : β) => _inst_1 a b) (Multiset.bind.{u1, u2} α β (Finset.val.{u1} α s) (fun (a : α) => Finset.val.{u2} β (t a))))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] (s : Finset.{u2} α) (t : α -> (Finset.{u1} β)), Eq.{succ u1} (Multiset.{u1} β) (Finset.val.{u1} β (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s t)) (Multiset.dedup.{u1} β (fun (a : β) (b : β) => _inst_1 a b) (Multiset.bind.{u2, u1} α β (Finset.val.{u2} α s) (fun (a : α) => Finset.val.{u1} β (t a))))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_val Finset.bunionᵢ_valₓ'. -/
@[simp]
theorem bunionᵢ_val (s : Finset α) (t : α → Finset β) :
(s.bunionᵢ t).1 = (s.1.bind fun a => (t a).1).dedup :=
rfl
#align finset.bUnion_val Finset.bunionᵢ_val
#print Finset.bunionᵢ_empty /-
@[simp]
theorem bunionᵢ_empty : Finset.bunionᵢ ∅ t = ∅ :=
rfl
#align finset.bUnion_empty Finset.bunionᵢ_empty
-/
/- warning: finset.mem_bUnion -> Finset.mem_bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} {b : β}, Iff (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t)) (Exists.{succ u1} α (fun (a : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) => Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) b (t a))))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} {b : β}, Iff (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t)) (Exists.{succ u1} α (fun (a : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) a s) (Membership.mem.{u2, u2} β (Finset.{u2} β) (Finset.instMembershipFinset.{u2} β) b (t a))))
Case conversion may be inaccurate. Consider using '#align finset.mem_bUnion Finset.mem_bunionᵢₓ'. -/
@[simp]
theorem mem_bunionᵢ {b : β} : b ∈ s.bunionᵢ t ↔ ∃ a ∈ s, b ∈ t a := by
simp only [mem_def, bUnion_val, mem_dedup, mem_bind, exists_prop]
#align finset.mem_bUnion Finset.mem_bunionᵢ
#print Finset.coe_bunionᵢ /-
@[simp, norm_cast]
theorem coe_bunionᵢ : (s.bunionᵢ t : Set β) = ⋃ x ∈ (s : Set α), t x := by
simp only [Set.ext_iff, mem_bUnion, Set.mem_unionᵢ, iff_self_iff, mem_coe, imp_true_iff]
#align finset.coe_bUnion Finset.coe_bunionᵢ
-/
/- warning: finset.bUnion_insert -> Finset.bunionᵢ_insert is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)} [_inst_2 : DecidableEq.{succ u1} α] {a : α}, Eq.{succ u2} (Finset.{u2} β) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) (Insert.insert.{u1, u1} α (Finset.{u1} α) (Finset.hasInsert.{u1} α (fun (a : α) (b : α) => _inst_2 a b)) a s) t) (Union.union.{u2} (Finset.{u2} β) (Finset.hasUnion.{u2} β (fun (a : β) (b : β) => _inst_1 a b)) (t a) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] {s : Finset.{u2} α} {t : α -> (Finset.{u1} β)} [_inst_2 : DecidableEq.{succ u2} α] {a : α}, Eq.{succ u1} (Finset.{u1} β) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) (Insert.insert.{u2, u2} α (Finset.{u2} α) (Finset.instInsertFinset.{u2} α (fun (a : α) (b : α) => _inst_2 a b)) a s) t) (Union.union.{u1} (Finset.{u1} β) (Finset.instUnionFinset.{u1} β (fun (a : β) (b : β) => _inst_1 a b)) (t a) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s t))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_insert Finset.bunionᵢ_insertₓ'. -/
@[simp]
theorem bunionᵢ_insert [DecidableEq α] {a : α} : (insert a s).bunionᵢ t = t a ∪ s.bunionᵢ t :=
ext fun x => by
simp only [mem_bUnion, exists_prop, mem_union, mem_insert, or_and_right, exists_or,
exists_eq_left]
#align finset.bUnion_insert Finset.bunionᵢ_insert
/- warning: finset.bUnion_congr -> Finset.bunionᵢ_congr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s₁ : Finset.{u1} α} {s₂ : Finset.{u1} α} {t₁ : α -> (Finset.{u2} β)} {t₂ : α -> (Finset.{u2} β)}, (Eq.{succ u1} (Finset.{u1} α) s₁ s₂) -> (forall (a : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s₁) -> (Eq.{succ u2} (Finset.{u2} β) (t₁ a) (t₂ a))) -> (Eq.{succ u2} (Finset.{u2} β) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s₁ t₁) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s₂ t₂))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] {s₁ : Finset.{u2} α} {s₂ : Finset.{u2} α} {t₁ : α -> (Finset.{u1} β)} {t₂ : α -> (Finset.{u1} β)}, (Eq.{succ u2} (Finset.{u2} α) s₁ s₂) -> (forall (a : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) a s₁) -> (Eq.{succ u1} (Finset.{u1} β) (t₁ a) (t₂ a))) -> (Eq.{succ u1} (Finset.{u1} β) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s₁ t₁) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s₂ t₂))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_congr Finset.bunionᵢ_congrₓ'. -/
-- ext $ λ x, by simp [or_and_distrib_right, exists_or_distrib]
theorem bunionᵢ_congr (hs : s₁ = s₂) (ht : ∀ a ∈ s₁, t₁ a = t₂ a) : s₁.bunionᵢ t₁ = s₂.bunionᵢ t₂ :=
ext fun x => by simp (config := { contextual := true }) [hs, ht]
#align finset.bUnion_congr Finset.bunionᵢ_congr
/- warning: finset.disj_Union_eq_bUnion -> Finset.disjUnionᵢ_eq_bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u1} α) (f : α -> (Finset.{u2} β)) (hf : Set.PairwiseDisjoint.{u2, u1} (Finset.{u2} β) α (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) f), Eq.{succ u2} (Finset.{u2} β) (Finset.disjUnionₓ.{u1, u2} α β s f hf) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s f)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] (s : Finset.{u2} α) (f : α -> (Finset.{u1} β)) (hf : Set.PairwiseDisjoint.{u1, u2} (Finset.{u1} β) α (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (Finset.toSet.{u2} α s) f), Eq.{succ u1} (Finset.{u1} β) (Finset.disjUnionᵢ.{u2, u1} α β s f hf) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s f)
Case conversion may be inaccurate. Consider using '#align finset.disj_Union_eq_bUnion Finset.disjUnionᵢ_eq_bunionᵢₓ'. -/
@[simp]
theorem disjUnionᵢ_eq_bunionᵢ (s : Finset α) (f : α → Finset β) (hf) :
s.disjUnionₓ f hf = s.bunionᵢ f :=
by
dsimp [disj_Union, Finset.bunionᵢ, Function.comp]
generalize_proofs h
exact eq_of_veq h.dedup.symm
#align finset.disj_Union_eq_bUnion Finset.disjUnionᵢ_eq_bunionᵢ
#print Finset.bunionᵢ_subset /-
theorem bunionᵢ_subset {s' : Finset β} : s.bunionᵢ t ⊆ s' ↔ ∀ x ∈ s, t x ⊆ s' := by
simp only [subset_iff, mem_bUnion] <;>
exact ⟨fun H a ha b hb => H ⟨a, ha, hb⟩, fun H b ⟨a, ha, hb⟩ => H a ha hb⟩
#align finset.bUnion_subset Finset.bunionᵢ_subset
-/
#print Finset.singleton_bunionᵢ /-
@[simp]
theorem singleton_bunionᵢ {a : α} : Finset.bunionᵢ {a} t = t a := by
classical rw [← insert_emptyc_eq, bUnion_insert, bUnion_empty, union_empty]
#align finset.singleton_bUnion Finset.singleton_bunionᵢ
-/
/- warning: finset.bUnion_inter -> Finset.bunionᵢ_inter is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u1} α) (f : α -> (Finset.{u2} β)) (t : Finset.{u2} β), Eq.{succ u2} (Finset.{u2} β) (Inter.inter.{u2} (Finset.{u2} β) (Finset.hasInter.{u2} β (fun (a : β) (b : β) => _inst_1 a b)) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s f) t) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s (fun (x : α) => Inter.inter.{u2} (Finset.{u2} β) (Finset.hasInter.{u2} β (fun (a : β) (b : β) => _inst_1 a b)) (f x) t))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] (s : Finset.{u2} α) (f : α -> (Finset.{u1} β)) (t : Finset.{u1} β), Eq.{succ u1} (Finset.{u1} β) (Inter.inter.{u1} (Finset.{u1} β) (Finset.instInterFinset.{u1} β (fun (a : β) (b : β) => _inst_1 a b)) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s f) t) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s (fun (x : α) => Inter.inter.{u1} (Finset.{u1} β) (Finset.instInterFinset.{u1} β (fun (a : β) (b : β) => _inst_1 a b)) (f x) t))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_inter Finset.bunionᵢ_interₓ'. -/
theorem bunionᵢ_inter (s : Finset α) (f : α → Finset β) (t : Finset β) :
s.bunionᵢ f ∩ t = s.bunionᵢ fun x => f x ∩ t :=
by
ext x
simp only [mem_bUnion, mem_inter]
tauto
#align finset.bUnion_inter Finset.bunionᵢ_inter
#print Finset.inter_bunionᵢ /-
theorem inter_bunionᵢ (t : Finset β) (s : Finset α) (f : α → Finset β) :
t ∩ s.bunionᵢ f = s.bunionᵢ fun x => t ∩ f x := by
rw [inter_comm, bUnion_inter] <;> simp [inter_comm]
#align finset.inter_bUnion Finset.inter_bunionᵢ
-/
/- warning: finset.bUnion_bUnion -> Finset.bunionᵢ_bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u2} β] [_inst_2 : DecidableEq.{succ u3} γ] (s : Finset.{u1} α) (f : α -> (Finset.{u2} β)) (g : β -> (Finset.{u3} γ)), Eq.{succ u3} (Finset.{u3} γ) (Finset.bunionᵢ.{u2, u3} β γ (fun (a : γ) (b : γ) => _inst_2 a b) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s f) g) (Finset.bunionᵢ.{u1, u3} α γ (fun (a : γ) (b : γ) => _inst_2 a b) s (fun (a : α) => Finset.bunionᵢ.{u2, u3} β γ (fun (a : γ) (b : γ) => _inst_2 a b) (f a) g))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u1} β] [_inst_2 : DecidableEq.{succ u3} γ] (s : Finset.{u2} α) (f : α -> (Finset.{u1} β)) (g : β -> (Finset.{u3} γ)), Eq.{succ u3} (Finset.{u3} γ) (Finset.bunionᵢ.{u1, u3} β γ (fun (a : γ) (b : γ) => _inst_2 a b) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s f) g) (Finset.bunionᵢ.{u2, u3} α γ (fun (a : γ) (b : γ) => _inst_2 a b) s (fun (a : α) => Finset.bunionᵢ.{u1, u3} β γ (fun (a : γ) (b : γ) => _inst_2 a b) (f a) g))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_bUnion Finset.bunionᵢ_bunionᵢₓ'. -/
theorem bunionᵢ_bunionᵢ [DecidableEq γ] (s : Finset α) (f : α → Finset β) (g : β → Finset γ) :
(s.bunionᵢ f).bunionᵢ g = s.bunionᵢ fun a => (f a).bunionᵢ g :=
by
ext
simp only [Finset.mem_bunionᵢ, exists_prop]
simp_rw [← exists_and_right, ← exists_and_left, and_assoc']
rw [exists_comm]
#align finset.bUnion_bUnion Finset.bunionᵢ_bunionᵢ
/- warning: finset.bind_to_finset -> Finset.bind_toFinset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] [_inst_2 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (t : α -> (Multiset.{u2} β)), Eq.{succ u2} (Finset.{u2} β) (Multiset.toFinset.{u2} β (fun (a : β) (b : β) => _inst_1 a b) (Multiset.bind.{u1, u2} α β s t)) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_2 a b) s) (fun (a : α) => Multiset.toFinset.{u2} β (fun (a : β) (b : β) => _inst_1 a b) (t a)))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] [_inst_2 : DecidableEq.{succ u2} α] (s : Multiset.{u2} α) (t : α -> (Multiset.{u1} β)), Eq.{succ u1} (Finset.{u1} β) (Multiset.toFinset.{u1} β (fun (a : β) (b : β) => _inst_1 a b) (Multiset.bind.{u2, u1} α β s t)) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) (Multiset.toFinset.{u2} α (fun (a : α) (b : α) => _inst_2 a b) s) (fun (a : α) => Multiset.toFinset.{u1} β (fun (a : β) (b : β) => _inst_1 a b) (t a)))
Case conversion may be inaccurate. Consider using '#align finset.bind_to_finset Finset.bind_toFinsetₓ'. -/
theorem bind_toFinset [DecidableEq α] (s : Multiset α) (t : α → Multiset β) :
(s.bind t).toFinset = s.toFinset.bunionᵢ fun a => (t a).toFinset :=
ext fun x => by simp only [Multiset.mem_toFinset, mem_bUnion, Multiset.mem_bind, exists_prop]
#align finset.bind_to_finset Finset.bind_toFinset
/- warning: finset.bUnion_mono -> Finset.bunionᵢ_mono is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t₁ : α -> (Finset.{u2} β)} {t₂ : α -> (Finset.{u2} β)}, (forall (a : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s) -> (HasSubset.Subset.{u2} (Finset.{u2} β) (Finset.hasSubset.{u2} β) (t₁ a) (t₂ a))) -> (HasSubset.Subset.{u2} (Finset.{u2} β) (Finset.hasSubset.{u2} β) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t₁) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t₂))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] {s : Finset.{u2} α} {t₁ : α -> (Finset.{u1} β)} {t₂ : α -> (Finset.{u1} β)}, (forall (a : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) a s) -> (HasSubset.Subset.{u1} (Finset.{u1} β) (Finset.instHasSubsetFinset.{u1} β) (t₁ a) (t₂ a))) -> (HasSubset.Subset.{u1} (Finset.{u1} β) (Finset.instHasSubsetFinset.{u1} β) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s t₁) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s t₂))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_mono Finset.bunionᵢ_monoₓ'. -/
theorem bunionᵢ_mono (h : ∀ a ∈ s, t₁ a ⊆ t₂ a) : s.bunionᵢ t₁ ⊆ s.bunionᵢ t₂ :=
by
have : ∀ b a, a ∈ s → b ∈ t₁ a → ∃ a : α, a ∈ s ∧ b ∈ t₂ a := fun b a ha hb =>
⟨a, ha, Finset.mem_of_subset (h a ha) hb⟩
simpa only [subset_iff, mem_bUnion, exists_imp, and_imp, exists_prop]
#align finset.bUnion_mono Finset.bunionᵢ_mono
#print Finset.bunionᵢ_subset_bunionᵢ_of_subset_left /-
theorem bunionᵢ_subset_bunionᵢ_of_subset_left (t : α → Finset β) (h : s₁ ⊆ s₂) :
s₁.bunionᵢ t ⊆ s₂.bunionᵢ t := by
intro x
simp only [and_imp, mem_bUnion, exists_prop]
exact Exists.imp fun a ha => ⟨h ha.1, ha.2⟩
#align finset.bUnion_subset_bUnion_of_subset_left Finset.bunionᵢ_subset_bunionᵢ_of_subset_left
-/
#print Finset.subset_bunionᵢ_of_mem /-
theorem subset_bunionᵢ_of_mem (u : α → Finset β) {x : α} (xs : x ∈ s) : u x ⊆ s.bunionᵢ u :=
singleton_bunionᵢ.Superset.trans <|
bunionᵢ_subset_bunionᵢ_of_subset_left u <| singleton_subset_iff.2 xs
#align finset.subset_bUnion_of_mem Finset.subset_bunionᵢ_of_mem
-/
/- warning: finset.bUnion_subset_iff_forall_subset -> Finset.bunionᵢ_subset_iff_forall_subset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_2 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : Finset.{u2} β} {f : α -> (Finset.{u2} β)}, Iff (HasSubset.Subset.{u2} (Finset.{u2} β) (Finset.hasSubset.{u2} β) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_2 a b) s f) t) (forall (x : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) -> (HasSubset.Subset.{u2} (Finset.{u2} β) (Finset.hasSubset.{u2} β) (f x) t))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_2 : DecidableEq.{succ u1} β] {s : Finset.{u2} α} {t : Finset.{u1} β} {f : α -> (Finset.{u1} β)}, Iff (HasSubset.Subset.{u1} (Finset.{u1} β) (Finset.instHasSubsetFinset.{u1} β) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_2 a b) s f) t) (forall (x : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s) -> (HasSubset.Subset.{u1} (Finset.{u1} β) (Finset.instHasSubsetFinset.{u1} β) (f x) t))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_subset_iff_forall_subset Finset.bunionᵢ_subset_iff_forall_subsetₓ'. -/
@[simp]
theorem bunionᵢ_subset_iff_forall_subset {α β : Type _} [DecidableEq β] {s : Finset α}
{t : Finset β} {f : α → Finset β} : s.bunionᵢ f ⊆ t ↔ ∀ x ∈ s, f x ⊆ t :=
⟨fun h x hx => (subset_bunionᵢ_of_mem f hx).trans h, fun h x hx =>
let ⟨a, ha₁, ha₂⟩ := mem_bunionᵢ.mp hx
h _ ha₁ ha₂⟩
#align finset.bUnion_subset_iff_forall_subset Finset.bunionᵢ_subset_iff_forall_subset
#print Finset.bunionᵢ_singleton_eq_self /-
@[simp]
theorem bunionᵢ_singleton_eq_self [DecidableEq α] : s.bunionᵢ (singleton : α → Finset α) = s :=
ext fun x => by simp only [mem_bUnion, mem_singleton, exists_prop, exists_eq_right']
#align finset.bUnion_singleton_eq_self Finset.bunionᵢ_singleton_eq_self
-/
/- warning: finset.filter_bUnion -> Finset.filter_bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u1} α) (f : α -> (Finset.{u2} β)) (p : β -> Prop) [_inst_2 : DecidablePred.{succ u2} β p], Eq.{succ u2} (Finset.{u2} β) (Finset.filter.{u2} β p (fun (a : β) => _inst_2 a) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s f)) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s (fun (a : α) => Finset.filter.{u2} β p (fun (a : β) => _inst_2 a) (f a)))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] (s : Finset.{u2} α) (f : α -> (Finset.{u1} β)) (p : β -> Prop) [_inst_2 : DecidablePred.{succ u1} β p], Eq.{succ u1} (Finset.{u1} β) (Finset.filter.{u1} β p (fun (a : β) => _inst_2 a) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s f)) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s (fun (a : α) => Finset.filter.{u1} β p (fun (a : β) => _inst_2 a) (f a)))
Case conversion may be inaccurate. Consider using '#align finset.filter_bUnion Finset.filter_bunionᵢₓ'. -/
theorem filter_bunionᵢ (s : Finset α) (f : α → Finset β) (p : β → Prop) [DecidablePred p] :
(s.bunionᵢ f).filterₓ p = s.bunionᵢ fun a => (f a).filterₓ p :=
by
ext b
simp only [mem_bUnion, exists_prop, mem_filter]
constructor
· rintro ⟨⟨a, ha, hba⟩, hb⟩
exact ⟨a, ha, hba, hb⟩
· rintro ⟨a, ha, hba, hb⟩
exact ⟨⟨a, ha, hba⟩, hb⟩
#align finset.filter_bUnion Finset.filter_bunionᵢ
/- warning: finset.bUnion_filter_eq_of_maps_to -> Finset.bunionᵢ_filter_eq_of_maps_to is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] [_inst_2 : DecidableEq.{succ u1} α] {s : Finset.{u1} α} {t : Finset.{u2} β} {f : α -> β}, (forall (x : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) -> (Membership.Mem.{u2, u2} β (Finset.{u2} β) (Finset.hasMem.{u2} β) (f x) t)) -> (Eq.{succ u1} (Finset.{u1} α) (Finset.bunionᵢ.{u2, u1} β α (fun (a : α) (b : α) => _inst_2 a b) t (fun (a : β) => Finset.filter.{u1} α (fun (c : α) => Eq.{succ u2} β (f c) a) (fun (a_1 : α) => _inst_1 (f a_1) a) s)) s)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] [_inst_2 : DecidableEq.{succ u2} α] {s : Finset.{u2} α} {t : Finset.{u1} β} {f : α -> β}, (forall (x : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s) -> (Membership.mem.{u1, u1} β (Finset.{u1} β) (Finset.instMembershipFinset.{u1} β) (f x) t)) -> (Eq.{succ u2} (Finset.{u2} α) (Finset.bunionᵢ.{u1, u2} β α (fun (a : α) (b : α) => _inst_2 a b) t (fun (a : β) => Finset.filter.{u2} α (fun (c : α) => Eq.{succ u1} β (f c) a) (fun (a_1 : α) => _inst_1 (f a_1) a) s)) s)
Case conversion may be inaccurate. Consider using '#align finset.bUnion_filter_eq_of_maps_to Finset.bunionᵢ_filter_eq_of_maps_toₓ'. -/
theorem bunionᵢ_filter_eq_of_maps_to [DecidableEq α] {s : Finset α} {t : Finset β} {f : α → β}
(h : ∀ x ∈ s, f x ∈ t) : (t.bunionᵢ fun a => s.filterₓ fun c => f c = a) = s := by
simpa only [disj_Union_eq_bUnion] using disj_Union_filter_eq_of_maps_to h
#align finset.bUnion_filter_eq_of_maps_to Finset.bunionᵢ_filter_eq_of_maps_to
#print Finset.erase_bunionᵢ /-
theorem erase_bunionᵢ (f : α → Finset β) (s : Finset α) (b : β) :
(s.bunionᵢ f).eraseₓ b = s.bunionᵢ fun x => (f x).eraseₓ b :=
by
ext
simp only [Finset.mem_bunionᵢ, iff_self_iff, exists_and_left, Finset.mem_erase]
#align finset.erase_bUnion Finset.erase_bunionᵢ
-/
/- warning: finset.bUnion_nonempty -> Finset.bunionᵢ_nonempty is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)}, Iff (Finset.Nonempty.{u2} β (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t)) (Exists.{succ u1} α (fun (x : α) => Exists.{0} (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) (fun (H : Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) => Finset.Nonempty.{u2} β (t x))))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)}, Iff (Finset.Nonempty.{u2} β (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t)) (Exists.{succ u1} α (fun (x : α) => And (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) x s) (Finset.Nonempty.{u2} β (t x))))
Case conversion may be inaccurate. Consider using '#align finset.bUnion_nonempty Finset.bunionᵢ_nonemptyₓ'. -/
@[simp]
theorem bunionᵢ_nonempty : (s.bunionᵢ t).Nonempty ↔ ∃ x ∈ s, (t x).Nonempty := by
simp [Finset.Nonempty, ← exists_and_left, @exists_swap α]
#align finset.bUnion_nonempty Finset.bunionᵢ_nonempty
/- warning: finset.nonempty.bUnion -> Finset.Nonempty.bunionᵢ is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] {s : Finset.{u1} α} {t : α -> (Finset.{u2} β)}, (Finset.Nonempty.{u1} α s) -> (forall (x : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s) -> (Finset.Nonempty.{u2} β (t x))) -> (Finset.Nonempty.{u2} β (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s t))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] {s : Finset.{u2} α} {t : α -> (Finset.{u1} β)}, (Finset.Nonempty.{u2} α s) -> (forall (x : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s) -> (Finset.Nonempty.{u1} β (t x))) -> (Finset.Nonempty.{u1} β (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s t))
Case conversion may be inaccurate. Consider using '#align finset.nonempty.bUnion Finset.Nonempty.bunionᵢₓ'. -/
theorem Nonempty.bunionᵢ (hs : s.Nonempty) (ht : ∀ x ∈ s, (t x).Nonempty) :
(s.bunionᵢ t).Nonempty :=
bunionᵢ_nonempty.2 <| hs.imp fun x hx => ⟨hx, ht x hx⟩
#align finset.nonempty.bUnion Finset.Nonempty.bunionᵢ
/- warning: finset.disjoint_bUnion_left -> Finset.disjoint_bunionᵢ_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u1} α) (f : α -> (Finset.{u2} β)) (t : Finset.{u2} β), Iff (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) s f) t) (forall (i : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i s) -> (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) (f i) t))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} β] (s : Finset.{u2} α) (f : α -> (Finset.{u1} β)) (t : Finset.{u1} β), Iff (Disjoint.{u1} (Finset.{u1} β) (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (Finset.bunionᵢ.{u2, u1} α β (fun (a : β) (b : β) => _inst_1 a b) s f) t) (forall (i : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) i s) -> (Disjoint.{u1} (Finset.{u1} β) (Finset.partialOrder.{u1} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} β) (f i) t))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_bUnion_left Finset.disjoint_bunionᵢ_leftₓ'. -/
theorem disjoint_bunionᵢ_left (s : Finset α) (f : α → Finset β) (t : Finset β) :
Disjoint (s.bunionᵢ f) t ↔ ∀ i ∈ s, Disjoint (f i) t := by
classical
refine' s.induction _ _
· simp only [forall_mem_empty_iff, bUnion_empty, disjoint_empty_left]
· intro i s his ih
simp only [disjoint_union_left, bUnion_insert, his, forall_mem_insert, ih]
#align finset.disjoint_bUnion_left Finset.disjoint_bunionᵢ_left
/- warning: finset.disjoint_bUnion_right -> Finset.disjoint_bunionᵢ_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u2} β) (t : Finset.{u1} α) (f : α -> (Finset.{u2} β)), Iff (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) s (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) t f)) (forall (i : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i t) -> (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.orderBot.{u2} β) s (f i)))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} [_inst_1 : DecidableEq.{succ u2} β] (s : Finset.{u2} β) (t : Finset.{u1} α) (f : α -> (Finset.{u2} β)), Iff (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) s (Finset.bunionᵢ.{u1, u2} α β (fun (a : β) (b : β) => _inst_1 a b) t f)) (forall (i : α), (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) i t) -> (Disjoint.{u2} (Finset.{u2} β) (Finset.partialOrder.{u2} β) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u2} β) s (f i)))
Case conversion may be inaccurate. Consider using '#align finset.disjoint_bUnion_right Finset.disjoint_bunionᵢ_rightₓ'. -/
theorem disjoint_bunionᵢ_right (s : Finset β) (t : Finset α) (f : α → Finset β) :
Disjoint s (t.bunionᵢ f) ↔ ∀ i ∈ t, Disjoint s (f i) := by
simpa only [disjoint_comm] using disjoint_bUnion_left t f s
#align finset.disjoint_bUnion_right Finset.disjoint_bunionᵢ_right
end BUnion
/-! ### choose -/
section Choose
variable (p : α → Prop) [DecidablePred p] (l : Finset α)
#print Finset.chooseX /-
/-- Given a finset `l` and a predicate `p`, associate to a proof that there is a unique element of
`l` satisfying `p` this unique element, as an element of the corresponding subtype. -/
def chooseX (hp : ∃! a, a ∈ l ∧ p a) : { a // a ∈ l ∧ p a } :=
Multiset.chooseX p l.val hp
#align finset.choose_x Finset.chooseX
-/
#print Finset.choose /-
/-- Given a finset `l` and a predicate `p`, associate to a proof that there is a unique element of
`l` satisfying `p` this unique element, as an element of the ambient type. -/
def choose (hp : ∃! a, a ∈ l ∧ p a) : α :=
chooseX p l hp
#align finset.choose Finset.choose
-/
#print Finset.choose_spec /-
theorem choose_spec (hp : ∃! a, a ∈ l ∧ p a) : choose p l hp ∈ l ∧ p (choose p l hp) :=
(chooseX p l hp).property
#align finset.choose_spec Finset.choose_spec
-/
#print Finset.choose_mem /-
theorem choose_mem (hp : ∃! a, a ∈ l ∧ p a) : choose p l hp ∈ l :=
(choose_spec _ _ _).1
#align finset.choose_mem Finset.choose_mem
-/
#print Finset.choose_property /-
theorem choose_property (hp : ∃! a, a ∈ l ∧ p a) : p (choose p l hp) :=
(choose_spec _ _ _).2
#align finset.choose_property Finset.choose_property
-/
end Choose
section Pairwise
variable {s : Finset α}
/- warning: finset.pairwise_subtype_iff_pairwise_finset' -> Finset.pairwise_subtype_iff_pairwise_finset' is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {s : Finset.{u1} α} (r : β -> β -> Prop) (f : α -> β), Iff (Pairwise.{u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) (Function.onFun.{succ u1, succ u2, 1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) β Prop r (fun (x : coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) => f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) x)))) (Set.Pairwise.{u1} α ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (Finset.{u1} α) (Set.{u1} α) (HasLiftT.mk.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (CoeTCₓ.coe.{succ u1, succ u1} (Finset.{u1} α) (Set.{u1} α) (Finset.Set.hasCoeT.{u1} α))) s) (Function.onFun.{succ u1, succ u2, 1} α β Prop r f))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {s : Finset.{u2} α} (r : β -> β -> Prop) (f : α -> β), Iff (Pairwise.{u2} (Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s)) (Function.onFun.{succ u2, succ u1, 1} (Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s)) β Prop r (fun (x : Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s)) => f (Subtype.val.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s) x)))) (Set.Pairwise.{u2} α (Finset.toSet.{u2} α s) (Function.onFun.{succ u2, succ u1, 1} α β Prop r f))
Case conversion may be inaccurate. Consider using '#align finset.pairwise_subtype_iff_pairwise_finset' Finset.pairwise_subtype_iff_pairwise_finset'ₓ'. -/
theorem pairwise_subtype_iff_pairwise_finset' (r : β → β → Prop) (f : α → β) :
Pairwise (r on fun x : s => f x) ↔ (s : Set α).Pairwise (r on f) :=
pairwise_subtype_iff_pairwise_set (s : Set α) (r on f)
#align finset.pairwise_subtype_iff_pairwise_finset' Finset.pairwise_subtype_iff_pairwise_finset'
#print Finset.pairwise_subtype_iff_pairwise_finset /-
theorem pairwise_subtype_iff_pairwise_finset (r : α → α → Prop) :
Pairwise (r on fun x : s => x) ↔ (s : Set α).Pairwise r :=
pairwise_subtype_iff_pairwise_finset' r id
#align finset.pairwise_subtype_iff_pairwise_finset Finset.pairwise_subtype_iff_pairwise_finset
-/
/- warning: finset.pairwise_cons' -> Finset.pairwise_cons' is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {s : Finset.{u1} α} {a : α} (ha : Not (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) a s)) (r : β -> β -> Prop) (f : α -> β), Iff (Pairwise.{u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) (Function.onFun.{succ u1, succ u2, 1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) β Prop r (fun (a_1 : coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) => f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) (Finset.cons.{u1} α a s ha)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x (Finset.cons.{u1} α a s ha)))))) a_1)))) (And (Pairwise.{u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) (Function.onFun.{succ u1, succ u2, 1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) β Prop r (fun (a : coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) => f ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Finset.{u1} α) Type.{u1} (Finset.hasCoeToSort.{u1} α) s) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) x s))))) a)))) (forall (b : α), (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) b s) -> (And (r (f a) (f b)) (r (f b) (f a)))))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} {s : Finset.{u2} α} {a : α} (ha : Not (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) a s)) (r : β -> β -> Prop) (f : α -> β), Iff (Pairwise.{u2} (Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x (Finset.cons.{u2} α a s ha))) (Function.onFun.{succ u2, succ u1, 1} (Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x (Finset.cons.{u2} α a s ha))) β Prop r (fun (a_1 : Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x (Finset.cons.{u2} α a s ha))) => f (Subtype.val.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x (Finset.cons.{u2} α a s ha)) a_1)))) (And (Pairwise.{u2} (Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s)) (Function.onFun.{succ u2, succ u1, 1} (Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s)) β Prop r (fun (a : Subtype.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s)) => f (Subtype.val.{succ u2} α (fun (x : α) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) x s) a)))) (forall (b : α), (Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) b s) -> (And (r (f a) (f b)) (r (f b) (f a)))))
Case conversion may be inaccurate. Consider using '#align finset.pairwise_cons' Finset.pairwise_cons'ₓ'. -/
theorem pairwise_cons' {a : α} (ha : a ∉ s) (r : β → β → Prop) (f : α → β) :
Pairwise (r on fun a : s.cons a ha => f a) ↔
Pairwise (r on fun a : s => f a) ∧ ∀ b ∈ s, r (f a) (f b) ∧ r (f b) (f a) :=
by
simp only [pairwise_subtype_iff_pairwise_finset', Finset.coe_cons, Set.pairwise_insert,
Finset.mem_coe, and_congr_right_iff]
exact fun hsr =>
⟨fun h b hb =>
h b hb <| by
rintro rfl
contradiction,
fun h b hb _ => h b hb⟩
#align finset.pairwise_cons' Finset.pairwise_cons'
#print Finset.pairwise_cons /-
theorem pairwise_cons {a : α} (ha : a ∉ s) (r : α → α → Prop) :
Pairwise (r on fun a : s.cons a ha => a) ↔
Pairwise (r on fun a : s => a) ∧ ∀ b ∈ s, r a b ∧ r b a :=
pairwise_cons' ha r id
#align finset.pairwise_cons Finset.pairwise_cons
-/
end Pairwise
end Finset
namespace Equiv
#print Equiv.sigmaEquivOptionOfInhabited /-
/--
Inhabited types are equivalent to `option β` for some `β` by identifying `default α` with `none`.
-/
def sigmaEquivOptionOfInhabited (α : Type u) [Inhabited α] [DecidableEq α] :
Σβ : Type u, α ≃ Option β :=
⟨{ x : α // x ≠ default },
{ toFun := fun x : α => if h : x = default then none else some ⟨x, h⟩
invFun := Option.elim' default coe
left_inv := fun x => by
dsimp only
split_ifs <;> simp [*]
right_inv := by
rintro (_ | ⟨x, h⟩)
· simp
· dsimp only
split_ifs with hi
· simpa [h] using hi
· simp }⟩
#align equiv.sigma_equiv_option_of_inhabited Equiv.sigmaEquivOptionOfInhabited
-/
end Equiv
namespace Multiset
variable [DecidableEq α]
/- warning: multiset.disjoint_to_finset -> Multiset.disjoint_toFinset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {m1 : Multiset.{u1} α} {m2 : Multiset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) m1) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) m2)) (Multiset.Disjoint.{u1} α m1 m2)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {m1 : Multiset.{u1} α} {m2 : Multiset.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) m1) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) m2)) (Multiset.Disjoint.{u1} α m1 m2)
Case conversion may be inaccurate. Consider using '#align multiset.disjoint_to_finset Multiset.disjoint_toFinsetₓ'. -/
theorem disjoint_toFinset {m1 m2 : Multiset α} :
Disjoint m1.toFinset m2.toFinset ↔ m1.Disjoint m2 :=
by
rw [Finset.disjoint_iff_ne]
refine' ⟨fun h a ha1 ha2 => _, _⟩
· rw [← Multiset.mem_toFinset] at ha1 ha2
exact h _ ha1 _ ha2 rfl
· rintro h a ha b hb rfl
rw [Multiset.mem_toFinset] at ha hb
exact h ha hb
#align multiset.disjoint_to_finset Multiset.disjoint_toFinset
end Multiset
namespace List
variable [DecidableEq α] {l l' : List α}
/- warning: list.disjoint_to_finset_iff_disjoint -> List.disjoint_toFinset_iff_disjoint is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {l : List.{u1} α} {l' : List.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.orderBot.{u1} α) (List.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) l) (List.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) l')) (List.Disjoint.{u1} α l l')
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] {l : List.{u1} α} {l' : List.{u1} α}, Iff (Disjoint.{u1} (Finset.{u1} α) (Finset.partialOrder.{u1} α) (Finset.instOrderBotFinsetToLEToPreorderPartialOrder.{u1} α) (List.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) l) (List.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) l')) (List.Disjoint.{u1} α l l')
Case conversion may be inaccurate. Consider using '#align list.disjoint_to_finset_iff_disjoint List.disjoint_toFinset_iff_disjointₓ'. -/
theorem disjoint_toFinset_iff_disjoint : Disjoint l.toFinset l'.toFinset ↔ l.Disjoint l' :=
Multiset.disjoint_toFinset
#align list.disjoint_to_finset_iff_disjoint List.disjoint_toFinset_iff_disjoint
end List
-- Assert that we define `finset` without the material on `list.sublists`.
-- Note that we cannot use `list.sublists` itself as that is defined very early.
assert_not_exists List.sublistsLen
assert_not_exists Multiset.powerset
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/Data/Finset/Basic.lean"}
|
import collections
import itertools
import jax.numpy as np
from jax import jit
import numpy.random as rnd
import numpy
import mdp.search_spaces as search_spaces
def onehot(x, N):
return np.eye(N)[x]
def entropy(p):
return -np.sum(np.log(p+1e-8) * p)
def sigmoid(x):
return 1/(1+np.exp(-x))
def softmax(x, axis=-1):
return np.exp(x)/np.sum(np.exp(x), axis=-1, keepdims=True)
def normalize(x):
mags = np.linalg.norm(x, axis=1, keepdims=True)
return x/mags
def clip_by_norm(x, norm):
v = np.linalg.norm(x)
p = sigmoid(v - norm)
# p * corrected vs 1-p * uncorrected
return p * x * norm / v + (1-p) * x
MDP = collections.namedtuple('mdp', ['S', 'A', 'P', 'r', 'discount', 'd0'])
def build_random_mdp(n_states, n_actions, discount):
P = rnd.random((n_states, n_states, n_actions))
r = rnd.standard_normal((n_states, n_actions))
d0 = rnd.random((n_states, 1))
return MDP(n_states, n_actions, P/P.sum(axis=0, keepdims=True), r, discount, d0/d0.sum(axis=0, keepdims=True))
def build_random_sparse_mdp(n_states, n_actions, discount):
P = sparsify(rnd.random((n_states, n_states, n_actions)))
r = sparsify(rnd.standard_normal((n_states, n_actions)))
d0 = rnd.random((n_states, 1))
return MDP(n_states, n_actions, P/P.sum(axis=0, keepdims=True), r, discount, d0/d0.sum(axis=0, keepdims=True))
def sparsify(x):
mask = rnd.random(x.shape) > 0.5
return x * mask.astype(x.dtype)
######################
def gen_grid_policies(N):
# special case for 2 x 2
p1s, p2s = np.linspace(0,1,N), np.linspace(0,1,N)
p1s = p1s.ravel()
p2s = p2s.ravel()
return [np.array([[p1, 1-p1],[1-p2, p2]]) for p1 in p1s for p2 in p2s]
# TODO! need this!
# def gen_grid_policies(N, n_states=2, n_actions=2):
# # special case for 2 x 2
# p1s, p2s = np.linspace(0,1,N), np.linspace(0,1,N)
# p1s = p1s.ravel()
# p2s = p2s.ravel()
# return [np.array([[p1, 1-p1],[1-p2, p2]]) for p1 in p1s for p2 in p2s]
def get_deterministic_policies(n_states, n_actions):
simplicies = list([np.eye(n_actions)[i] for i in range(n_actions)])
pis = list(itertools.product(*[simplicies for _ in range(n_states)]))
return [np.stack(p) for p in pis]
def get_random_policy_2x2():
p1 = rnd.random()
p2 = rnd.random()
return np.array([[p1, 1-p1], [p2, 1-p2]])
def rnd_simplex(d):
pts = rnd.uniform(0, 1, d-1)
return numpy.diff([0]+ sorted(pts) + [1]).astype(np.float64)
def random_policy(n_states, n_actions):
return np.vstack([rnd_simplex(n_actions) for _ in range(n_states)])
def random_det_policy(n_states, n_actions):
return np.vstack([onehot(rnd.randint(0, n_actions), n_actions) for _ in range(n_states)])
# @jit
def polytope(P, r, discount, pis):
# print('n pis:{}'.format(len(pis)))
vs = np.vstack([np.sum(value_functional(P, r, pi, discount), axis=1) for pi in pis])
return vs
"""
Some useful functions that will be repeately used.
- `value_functional`: evaluates a policy within a mdp
- `bellman_optimality_operator`: calculates a step of the bellman operator
"""
@jit
def value_functional(P, r, pi, discount):
"""
V = r_{\pi} + \gamma P_{\pi} V
= (I-\gamma P_{\pi})^{-1}r_{\pi}
Args:
P (np.ndarray): [n_states x n_states x n_actions]
r (np.ndarray): [n_states x n_actions]
pi (np.ndarray): [n_states x n_actions]
discount (float): the temporal discount value
"""
n = P.shape[0]
# P_{\pi}(s_t+1 | s_t) = sum_{a_t} P(s_{t+1} | s_t, a_t)\pi(a_t | s_t)
P_pi = np.einsum('ijk,jk->ij', P, pi)
r_pi = np.expand_dims(np.einsum('ij,ij->i', pi, r), 1)
# assert np.isclose(pi/pi.sum(axis=1, keepdims=True), pi).all()
# assert np.isclose(P_pi/P_pi.sum(axis=0, keepdims=True), P_pi, atol=1e-4).all()
# BUG why transpose here?!?!
vs = np.dot(np.linalg.inv(np.eye(n) - discount*P_pi.T), r_pi)
# print(vs.shape, P_pi.shape)
return vs
def bellman_optimality_operator(P, r, Q, discount):
"""
Args:
P (np.ndarray): [n_states x n_states x n_actions]
r (np.ndarray): [n_states x n_actions]
Q (np.ndarray): [n_states x n_actions]
discount (float): the temporal discount value
Returns:
(np.ndarray): [n_states, n_actions]
"""
assert len(Q.shape) == 2
assert Q.shape[1] != 1
# Q(s, a) = r(s, a) + \gamma max_a' E_{s'~P(s' | s, a)} Q(s', a')
return r + discount*np.max(np.einsum('ijk,il->jkl', P, Q), axis=-1)
def bellman_operator(P, r, V, discount):
"""
Args:
P (np.ndarray): [n_states x n_states x n_actions]
r (np.ndarray): [n_states x n_actions]
V (np.ndarray): [n_states x 1]
discount (float): the temporal discount value
Returns:
(np.ndarray): [n_states, n_actions]
"""
if len(V.shape) == 1:
V = np.expand_dims(V, 1)
# Q(s, a) = r(s, a) + \gamma E_{s'~P(s' | s, a)} V(s')
return r + discount*np.einsum('ijk,il->jk', P, V)
"""
Tools for simulating dyanmical systems.
"""
def isclose(x, y, atol=1e-8):
if isinstance(x, np.ndarray):
return np.isclose(x, y, atol=atol).all()
elif isinstance(x, list):
# return all(np.isclose(x[0], y[0], atol=1e-03).all() for i in range(len(x)))
return np.isclose(search_spaces.build(x), search_spaces.build(y), atol=atol).all()
elif isinstance(x, tuple) and isinstance(x[0], np.ndarray):
return np.isclose(x[0], y[0], atol=atol).all()
elif isinstance(x, tuple) and isinstance(x[0], list):
return np.isclose(search_spaces.build(x[0]), search_spaces.build(y[0]), atol=atol).all()
else:
raise ValueError('wrong format')
def converged(l):
if len(l)>1:
if len(l)>5000 and isclose(l[-1], l[-2], 1e-6):
return True
if len(l)>10000 and isclose(l[-1], l[-2], 1e-4):
return True
if isclose(l[-1], l[-2], 1e-8):
return True
if len(l)>20000:
print(l[-5:-1])
raise ValueError('not converged...')
# if np.isnan(l[-1]).any():
# raise ValueError('NaNs')
return False
def solve(update_fn, init):
xs = [init]
x = init
while not converged(xs):
x = update_fn(x)
xs.append(x)
print('\rStep: {}'.format(len(xs)), end='', flush=True)
return xs
@jit
def discounted_rewards(rs, discount):
discounts = discount ** np.arange(len(rs))
return (1-discount) * np.sum(discounts * np.array(rs))
@jit
def sample(p, temperature=1.0):
g = -np.log(-np.log(rnd.random(p.shape))) * temperature
idx = np.argmax(np.log(p) + g, axis=-1)
return idx
def rollout(transition_fn, reward_fn, d0, pi, T):
zeta = []
s = sample(d0[:, 0])
for _ in range(T):
# a = rnd.choice(range(mdp.A), p=renorm(pi[s,:]))
a = sample(pi[s,:])
r = reward_fn[s,a]
zeta.append((s,a,r))
s = sample(transition_fn[:,s,a])
return zeta
|
{"hexsha": "cf28613d94b294f2ea778de9e2ea1df9921fecde", "size": 6974, "ext": "py", "lang": "Python", "max_stars_repo_path": "mdp/utils.py", "max_stars_repo_name": "act65/mdps", "max_stars_repo_head_hexsha": "59f35467baa83b953ccdac5290acfcc31f33fd28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mdp/utils.py", "max_issues_repo_name": "act65/mdps", "max_issues_repo_head_hexsha": "59f35467baa83b953ccdac5290acfcc31f33fd28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mdp/utils.py", "max_forks_repo_name": "act65/mdps", "max_forks_repo_head_hexsha": "59f35467baa83b953ccdac5290acfcc31f33fd28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2735426009, "max_line_length": 114, "alphanum_fraction": 0.6055348437, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 2203}
|
!Author Huang Yihan
!This is the main program of this homework
!1.read inputs
!2.read resonance table
!3.calculate
Program main
use input_mod
use table_mod
use calculate_mod
implicit none
call read_inputs
call res_table_init
call U8_xs_init
call H1_xs_init
call calculate_sigmae(radius,sigmae)
call calculate_xs
call deallocate_array
end program main
|
{"hexsha": "21af554bd53690dc8dfc72713fcb91744a46b348", "size": 411, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/main.f90", "max_stars_repo_name": "Huang-Yihan/Recentyl-Physicals-homework", "max_stars_repo_head_hexsha": "5569fa99987bf97982dbc4ac10faf45d5f3ce62b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main.f90", "max_issues_repo_name": "Huang-Yihan/Recentyl-Physicals-homework", "max_issues_repo_head_hexsha": "5569fa99987bf97982dbc4ac10faf45d5f3ce62b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.f90", "max_forks_repo_name": "Huang-Yihan/Recentyl-Physicals-homework", "max_forks_repo_head_hexsha": "5569fa99987bf97982dbc4ac10faf45d5f3ce62b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8333333333, "max_line_length": 43, "alphanum_fraction": 0.7201946472, "num_tokens": 102}
|
lemma {u} FOIL {R : Type u} [ring R] : ∀ a b c d : R, (a + b) * (c + d) = a*c + a*d + b*c + b*d :=
by { intros, rw [left_distrib, right_distrib, right_distrib], ac_refl }
lemma {u} FOIL_neg_square {R : Type u} [comm_ring R] : ∀ a b : R, (a - b) * (a - b) = a*a + (-(a*b)+ -(a*b)) + b*b :=
by { intros, rw sub_eq_add_neg, rw FOIL,
rw neg_mul_neg, repeat { rw add_assoc }, congr,
rw neg_mul_eq_mul_neg, rw [mul_comm a b, neg_mul_eq_neg_mul] }
lemma {u} FOIL_sub {R : Type u} [ring R] : ∀ a b c d : R, (a - b) * (c - d) = a*c - a*d - b*c + b*d :=
by { intros, repeat { rw sub_eq_add_neg }, rw FOIL, rw neg_mul_eq_mul_neg, rw neg_mul_eq_neg_mul,
apply congr_arg, rw [← neg_mul_eq_mul_neg, ← neg_mul_eq_neg_mul, neg_neg] }
lemma difference_of_squares {R} [comm_ring R] : ∀ x y : R, x * x - y*y = (x - y)*(x + y) :=
begin
intros, rw sub_eq_add_neg, rw sub_eq_add_neg,
rw FOIL, rw add_assoc (x*x), rw mul_comm (-y) x,
rw ← neg_mul_eq_mul_neg, rw ← sub_eq_add_neg (x*y),
rw sub_self, rw add_zero, congr, rw neg_mul_eq_mul_neg,
rw mul_comm
end
lemma div_of_div_eq_self {F} [field F] : ∀ x y : F, x ≠ 0 → y ≠ 0 → x/(x/y) = y :=
begin
intros, rw div_eq_mul_one_div x y,
rw field.div_mul_eq_div_mul_one_div _ a,
rw div_self a, rw one_mul,
rw [one_div_eq_inv, one_div_eq_inv],
rw division_ring.inv_inv a_1,
apply one_div_ne_zero a_1,
end
lemma square_neg_one {R} [ring R] : (-1)*(-1) = (1 : R) :=
begin
apply eq_of_sub_eq_zero, rw sub_eq_add_neg,
transitivity (-1)*(-1) + (-1)*(1:R), rw mul_one,
rw ← left_distrib, rw neg_add_self, rw mul_zero
end
lemma weird_analysis_trick {R} [ring R]
: ∀ a b c d : R, a * c = (a - b) * (c - d) + a * d + b * c - b * d :=
begin
intros, rw [sub_eq_add_neg a, sub_eq_add_neg c],
rw FOIL, rw [← neg_mul_eq_neg_mul b (-d), mul_neg_eq_neg_mul_symm b d, neg_neg],
rw add_right_comm _ (b*d), rw add_right_comm _ (b*d), rw add_sub_assoc,
rw [sub_self, add_zero], rw add_right_comm _ (-b*c),
rw [← neg_mul_eq_neg_mul, add_assoc, neg_add_self, add_zero],
rw [add_assoc, mul_neg_eq_neg_mul_symm, neg_add_self, add_zero]
end
lemma eq_zero_of_sqr_eq_zero {F} [division_ring F] [decidable_eq F]
: ∀ {x : F}, x * x = 0 → x = 0 :=
begin
intros x h, by_contra h', refine (_ : x * x ≠ 0) h,
apply division_ring.mul_ne_zero h' h'
end
lemma classical.l_or_r_eq_zero_of_mul_eq_zero {F} [division_ring F]
: ∀ {x y : F}, x * y = 0 → x = 0 ∨ y = 0 :=
begin
intros x y h, apply classical.by_contradiction,
rw @decidable.not_or_iff_and_not _ _ (classical.prop_decidable _) (classical.prop_decidable _),
intro h', apply division_ring.mul_ne_zero h'.left h'.right, assumption
end
lemma eq_sub_implies_sub_zero {G} [add_comm_group G]
: ∀ x y : G, x = x - y → y = 0 :=
begin
intros,
transitivity x - (x - y), { rw sub_sub_self x y },
rw ← a, apply sub_self
end
|
{"author": "Shamrock-Frost", "repo": "FormalComplexAnalysis", "sha": "3cac79d9b603b1edf7df1bc7e948c74eb86a2cc0", "save_path": "github-repos/lean/Shamrock-Frost-FormalComplexAnalysis", "path": "github-repos/lean/Shamrock-Frost-FormalComplexAnalysis/FormalComplexAnalysis-3cac79d9b603b1edf7df1bc7e948c74eb86a2cc0/algebra.lean"}
|
(* Title: HOL/HOLCF/IOA/Seq.thy
Author: Olaf Müller
*)
section \<open>Partial, Finite and Infinite Sequences (lazy lists), modeled as domain\<close>
theory Seq
imports HOLCF
begin
default_sort pcpo
domain (unsafe) 'a seq = nil ("nil") | cons (HD :: 'a) (lazy TL :: "'a seq") (infixr "##" 65)
inductive Finite :: "'a seq \<Rightarrow> bool"
where
sfinite_0: "Finite nil"
| sfinite_n: "Finite tr \<Longrightarrow> a \<noteq> UU \<Longrightarrow> Finite (a ## tr)"
declare Finite.intros [simp]
definition Partial :: "'a seq \<Rightarrow> bool"
where "Partial x \<longleftrightarrow> seq_finite x \<and> \<not> Finite x"
definition Infinite :: "'a seq \<Rightarrow> bool"
where "Infinite x \<longleftrightarrow> \<not> seq_finite x"
subsection \<open>Recursive equations of operators\<close>
subsubsection \<open>\<open>smap\<close>\<close>
fixrec smap :: "('a \<rightarrow> 'b) \<rightarrow> 'a seq \<rightarrow> 'b seq"
where
smap_nil: "smap \<cdot> f \<cdot> nil = nil"
| smap_cons: "x \<noteq> UU \<Longrightarrow> smap \<cdot> f \<cdot> (x ## xs) = (f \<cdot> x) ## smap \<cdot> f \<cdot> xs"
lemma smap_UU [simp]: "smap \<cdot> f \<cdot> UU = UU"
by fixrec_simp
subsubsection \<open>\<open>sfilter\<close>\<close>
fixrec sfilter :: "('a \<rightarrow> tr) \<rightarrow> 'a seq \<rightarrow> 'a seq"
where
sfilter_nil: "sfilter \<cdot> P \<cdot> nil = nil"
| sfilter_cons:
"x \<noteq> UU \<Longrightarrow>
sfilter \<cdot> P \<cdot> (x ## xs) =
(If P \<cdot> x then x ## (sfilter \<cdot> P \<cdot> xs) else sfilter \<cdot> P \<cdot> xs)"
lemma sfilter_UU [simp]: "sfilter \<cdot> P \<cdot> UU = UU"
by fixrec_simp
subsubsection \<open>\<open>sforall2\<close>\<close>
fixrec sforall2 :: "('a \<rightarrow> tr) \<rightarrow> 'a seq \<rightarrow> tr"
where
sforall2_nil: "sforall2 \<cdot> P \<cdot> nil = TT"
| sforall2_cons: "x \<noteq> UU \<Longrightarrow> sforall2 \<cdot> P \<cdot> (x ## xs) = ((P \<cdot> x) andalso sforall2 \<cdot> P \<cdot> xs)"
lemma sforall2_UU [simp]: "sforall2 \<cdot> P \<cdot> UU = UU"
by fixrec_simp
definition "sforall P t \<longleftrightarrow> sforall2 \<cdot> P \<cdot> t \<noteq> FF"
subsubsection \<open>\<open>stakewhile\<close>\<close>
fixrec stakewhile :: "('a \<rightarrow> tr) \<rightarrow> 'a seq \<rightarrow> 'a seq"
where
stakewhile_nil: "stakewhile \<cdot> P \<cdot> nil = nil"
| stakewhile_cons:
"x \<noteq> UU \<Longrightarrow> stakewhile \<cdot> P \<cdot> (x ## xs) = (If P \<cdot> x then x ## (stakewhile \<cdot> P \<cdot> xs) else nil)"
lemma stakewhile_UU [simp]: "stakewhile \<cdot> P \<cdot> UU = UU"
by fixrec_simp
subsubsection \<open>\<open>sdropwhile\<close>\<close>
fixrec sdropwhile :: "('a \<rightarrow> tr) \<rightarrow> 'a seq \<rightarrow> 'a seq"
where
sdropwhile_nil: "sdropwhile \<cdot> P \<cdot> nil = nil"
| sdropwhile_cons:
"x \<noteq> UU \<Longrightarrow> sdropwhile \<cdot> P \<cdot> (x ## xs) = (If P \<cdot> x then sdropwhile \<cdot> P \<cdot> xs else x ## xs)"
lemma sdropwhile_UU [simp]: "sdropwhile \<cdot> P \<cdot> UU = UU"
by fixrec_simp
subsubsection \<open>\<open>slast\<close>\<close>
fixrec slast :: "'a seq \<rightarrow> 'a"
where
slast_nil: "slast \<cdot> nil = UU"
| slast_cons: "x \<noteq> UU \<Longrightarrow> slast \<cdot> (x ## xs) = (If is_nil \<cdot> xs then x else slast \<cdot> xs)"
lemma slast_UU [simp]: "slast \<cdot> UU = UU"
by fixrec_simp
subsubsection \<open>\<open>sconc\<close>\<close>
fixrec sconc :: "'a seq \<rightarrow> 'a seq \<rightarrow> 'a seq"
where
sconc_nil: "sconc \<cdot> nil \<cdot> y = y"
| sconc_cons': "x \<noteq> UU \<Longrightarrow> sconc \<cdot> (x ## xs) \<cdot> y = x ## (sconc \<cdot> xs \<cdot> y)"
abbreviation sconc_syn :: "'a seq \<Rightarrow> 'a seq \<Rightarrow> 'a seq" (infixr "@@" 65)
where "xs @@ ys \<equiv> sconc \<cdot> xs \<cdot> ys"
lemma sconc_UU [simp]: "UU @@ y = UU"
by fixrec_simp
lemma sconc_cons [simp]: "(x ## xs) @@ y = x ## (xs @@ y)"
by (cases "x = UU") simp_all
declare sconc_cons' [simp del]
subsubsection \<open>\<open>sflat\<close>\<close>
fixrec sflat :: "'a seq seq \<rightarrow> 'a seq"
where
sflat_nil: "sflat \<cdot> nil = nil"
| sflat_cons': "x \<noteq> UU \<Longrightarrow> sflat \<cdot> (x ## xs) = x @@ (sflat \<cdot> xs)"
lemma sflat_UU [simp]: "sflat \<cdot> UU = UU"
by fixrec_simp
lemma sflat_cons [simp]: "sflat \<cdot> (x ## xs) = x @@ (sflat \<cdot> xs)"
by (cases "x = UU") simp_all
declare sflat_cons' [simp del]
subsubsection \<open>\<open>szip\<close>\<close>
fixrec szip :: "'a seq \<rightarrow> 'b seq \<rightarrow> ('a \<times> 'b) seq"
where
szip_nil: "szip \<cdot> nil \<cdot> y = nil"
| szip_cons_nil: "x \<noteq> UU \<Longrightarrow> szip \<cdot> (x ## xs) \<cdot> nil = UU"
| szip_cons: "x \<noteq> UU \<Longrightarrow> y \<noteq> UU \<Longrightarrow> szip \<cdot> (x ## xs) \<cdot> (y ## ys) = (x, y) ## szip \<cdot> xs \<cdot> ys"
lemma szip_UU1 [simp]: "szip \<cdot> UU \<cdot> y = UU"
by fixrec_simp
lemma szip_UU2 [simp]: "x \<noteq> nil \<Longrightarrow> szip \<cdot> x \<cdot> UU = UU"
by (cases x) (simp_all, fixrec_simp)
subsection \<open>\<open>scons\<close>, \<open>nil\<close>\<close>
lemma scons_inject_eq: "x \<noteq> UU \<Longrightarrow> y \<noteq> UU \<Longrightarrow> x ## xs = y ## ys \<longleftrightarrow> x = y \<and> xs = ys"
by simp
lemma nil_less_is_nil: "nil \<sqsubseteq> x \<Longrightarrow> nil = x"
by (cases x) simp_all
subsection \<open>\<open>sfilter\<close>, \<open>sforall\<close>, \<open>sconc\<close>\<close>
lemma if_and_sconc [simp]:
"(if b then tr1 else tr2) @@ tr = (if b then tr1 @@ tr else tr2 @@ tr)"
by simp
lemma sfiltersconc: "sfilter \<cdot> P \<cdot> (x @@ y) = (sfilter \<cdot> P \<cdot> x @@ sfilter \<cdot> P \<cdot> y)"
apply (induct x)
text \<open>adm\<close>
apply simp
text \<open>base cases\<close>
apply simp
apply simp
text \<open>main case\<close>
apply (rule_tac p = "P\<cdot>a" in trE)
apply simp
apply simp
apply simp
done
lemma sforallPstakewhileP: "sforall P (stakewhile \<cdot> P \<cdot> x)"
apply (simp add: sforall_def)
apply (induct x)
text \<open>adm\<close>
apply simp
text \<open>base cases\<close>
apply simp
apply simp
text \<open>main case\<close>
apply (rule_tac p = "P\<cdot>a" in trE)
apply simp
apply simp
apply simp
done
lemma forallPsfilterP: "sforall P (sfilter \<cdot> P \<cdot> x)"
apply (simp add: sforall_def)
apply (induct x)
text \<open>adm\<close>
apply simp
text \<open>base cases\<close>
apply simp
apply simp
text \<open>main case\<close>
apply (rule_tac p="P\<cdot>a" in trE)
apply simp
apply simp
apply simp
done
subsection \<open>Finite\<close>
(*
Proofs of rewrite rules for Finite:
1. Finite nil (by definition)
2. \<not> Finite UU
3. a \<noteq> UU \<Longrightarrow> Finite (a ## x) = Finite x
*)
lemma Finite_UU_a: "Finite x \<longrightarrow> x \<noteq> UU"
apply (rule impI)
apply (erule Finite.induct)
apply simp
apply simp
done
lemma Finite_UU [simp]: "\<not> Finite UU"
using Finite_UU_a [where x = UU] by fast
lemma Finite_cons_a: "Finite x \<longrightarrow> a \<noteq> UU \<longrightarrow> x = a ## xs \<longrightarrow> Finite xs"
apply (intro strip)
apply (erule Finite.cases)
apply fastforce
apply simp
done
lemma Finite_cons: "a \<noteq> UU \<Longrightarrow> Finite (a##x) \<longleftrightarrow> Finite x"
apply (rule iffI)
apply (erule (1) Finite_cons_a [rule_format])
apply fast
apply simp
done
lemma Finite_upward: "Finite x \<Longrightarrow> x \<sqsubseteq> y \<Longrightarrow> Finite y"
apply (induct arbitrary: y set: Finite)
apply (case_tac y, simp, simp, simp)
apply (case_tac y, simp, simp)
apply simp
done
lemma adm_Finite [simp]: "adm Finite"
by (rule adm_upward) (rule Finite_upward)
subsection \<open>Induction\<close>
text \<open>Extensions to Induction Theorems.\<close>
lemma seq_finite_ind_lemma:
assumes "\<And>n. P (seq_take n \<cdot> s)"
shows "seq_finite s \<longrightarrow> P s"
apply (unfold seq.finite_def)
apply (intro strip)
apply (erule exE)
apply (erule subst)
apply (rule assms)
done
lemma seq_finite_ind:
assumes "P UU"
and "P nil"
and "\<And>x s1. x \<noteq> UU \<Longrightarrow> P s1 \<Longrightarrow> P (x ## s1)"
shows "seq_finite s \<longrightarrow> P s"
apply (insert assms)
apply (rule seq_finite_ind_lemma)
apply (erule seq.finite_induct)
apply assumption
apply simp
done
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/HOLCF/IOA/Seq.thy"}
|
#Author : Zoumpekas Athanasios
#codename : thzou
import os
import numpy as np
import pandas as pd
import pickle
import quandl
import datetime
import time
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import sys
import io
from itertools import product
import warnings
from plotly import tools
import plotly.offline as py
import plotly.graph_objs as go
import plotly.figure_factory as ff
import plotly.io as pio
def get_quandl_data(quandl_id):
'''Download and cache Quandl dataseries'''
cache_path = 'data/' + '{}.pkl'.format(quandl_id).replace('/','-')
try:
f = open(cache_path, 'rb')
df = pickle.load(f)
print('Loaded {} from cache'.format(quandl_id))
except (OSError, IOError) as e:
print('Downloading {} from Quandl'.format(quandl_id))
df = quandl.get(quandl_id, returns="pandas")
df.to_pickle(cache_path)
print('Cached {} at {}'.format(quandl_id, cache_path))
return df
def merge_dfs_on_column(dataframes, labels, col):
'''Merge a single column of each dataframe into a new combined dataframe'''
series_dict = {}
for index in range(len(dataframes)):
series_dict[labels[index]] = dataframes[index][col]
return pd.DataFrame(series_dict)
def get_json_data(json_url, cache_path):
'''Download and cache JSON data, return as a dataframe.'''
try:
f = open(cache_path, 'rb')
df = pickle.load(f)
print('Loaded {} from cache'.format(json_url))
except (OSError, IOError) as e:
print('Downloading {}'.format(json_url))
df = pd.read_json(json_url)
df.to_pickle(cache_path)
print('Cached {} at {}'.format(json_url, cache_path))
return df
def get_poloniex_data(poloniex_pair):
base_polo_url = 'https://poloniex.com/public?command=returnChartData¤cyPair={}&start={}&end={}&period={}'
start_date = datetime.datetime.strptime('2014-01-01', '%Y-%m-%d') # get data from the start of 2014
end_date = datetime.datetime.now() # up until today
pediod = 86400 # pull daily data (86,400 seconds per day)
#Retrieve cryptocurrency data from poloniex#
json_url = base_polo_url.format(poloniex_pair, start_date.timestamp(), end_date.timestamp(), pediod)
data_df = get_json_data(json_url, 'data/'+poloniex_pair)
data_df = data_df.set_index('date')
return data_df
def df_scatter(df, title, seperate_y_axis=False, y_axis_label='', scale='linear', initial_hide=False):
#Generate a scatter plot of the entire dataframe#
label_arr = list(df)
series_arr = list(map(lambda col: df[col], label_arr))
layout = go.Layout(
title=title,
legend=dict(orientation="h"),
xaxis=dict(type='date'),
yaxis=dict(
title=y_axis_label,
showticklabels= not seperate_y_axis,
type=scale
)
)
y_axis_config = dict(
overlaying='y',
showticklabels=False,
type=scale )
visibility = True
if initial_hide:
visibility = 'legendonly'
# Form Trace For Each Series
trace_arr = []
for index, series in enumerate(series_arr):
trace = go.Scatter(
x=series.index,
y=series,
name=label_arr[index],
visible=visibility
)
# Add seperate axis for the series
if seperate_y_axis:
trace['yaxis'] = 'y{}'.format(index + 1)
layout['yaxis{}'.format(index + 1)] = y_axis_config
trace_arr.append(trace)
fig = go.Figure(data=trace_arr, layout=layout)
py.plot(fig, filename = 'images/cryptos.html', auto_open=False)
pio.write_image(fig, 'images/cryptos.png')
#py.iplot(fig)
def btc_average_data():
# Pull Kraken BTC price exchange data
btc_usd_price_kraken = get_quandl_data('BCHARTS/KRAKENUSD')
# Pull pricing data for 3 more BTC exchanges
exchanges = ['COINBASE','BITSTAMP','ITBIT']
exchange_data = {}
exchange_data['KRAKEN'] = btc_usd_price_kraken
for exchange in exchanges:
exchange_code = 'BCHARTS/{}USD'.format(exchange)
btc_exchange_df = get_quandl_data(exchange_code)
exchange_data[exchange] = btc_exchange_df
# Merge the BTC price dataseries' into a single dataframe
btc_usd_datasets = merge_dfs_on_column(list(exchange_data.values()), list(exchange_data.keys()), 'Weighted Price')
#btc_usd_datasets_close = merge_dfs_on_column(list(exchange_data.values()), list(exchange_data.keys()), 'Close')
btc_usd_datasets.replace(0, np.nan, inplace=True)
# Calculate the average BTC price as a new column
btc_usd_datasets['avg_btc_price_usd'] = btc_usd_datasets.mean(axis=1)
#btc_usd_datasets_close['close'] = btc_usd_datasets_close.mean(axis=1)
return btc_usd_datasets
def get_altcoins_data():
altcoins = ['XRP','ETH','XMR','STR','LTC','DGB','BTS','DOGE','BCH','BCN','ZRX','DASH','ZEC','MAID','ETC']
altcoin_data = {}
for altcoin in altcoins:
coinpair = 'BTC_{}'.format(altcoin)
crypto_price_df = get_poloniex_data(coinpair)
altcoin_data[altcoin] = crypto_price_df
return altcoin_data
def convert_and_combine(altcoin_data,btc_usd_datasets):
# Calculate USD Price as a new column in each altcoin dataframe
for altcoin in altcoin_data.keys():
altcoin_data[altcoin]['price_usd'] = altcoin_data[altcoin]['weightedAverage'] * btc_usd_datasets['avg_btc_price_usd']
combined_df = merge_dfs_on_column(list(altcoin_data.values()), list(altcoin_data.keys()), 'price_usd')
# Add BTC price to the dataframe
combined_df['BTC'] = btc_usd_datasets['avg_btc_price_usd']
df_scatter(combined_df, 'Cryptocurrency Prices (USD)', seperate_y_axis=False, y_axis_label='Coin Value (USD)', scale='log')
return combined_df
|
{"hexsha": "3bb36b49c612f1750363628f6edd4b7cc219e495", "size": 5753, "ext": "py", "lang": "Python", "max_stars_repo_path": "python_scripts/data_acquisition.py", "max_stars_repo_name": "thzou/crypto_analysis", "max_stars_repo_head_hexsha": "7eb4f4c988e4d5a94d36ed61a002041e44d72c10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python_scripts/data_acquisition.py", "max_issues_repo_name": "thzou/crypto_analysis", "max_issues_repo_head_hexsha": "7eb4f4c988e4d5a94d36ed61a002041e44d72c10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python_scripts/data_acquisition.py", "max_forks_repo_name": "thzou/crypto_analysis", "max_forks_repo_head_hexsha": "7eb4f4c988e4d5a94d36ed61a002041e44d72c10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8742857143, "max_line_length": 124, "alphanum_fraction": 0.6867721189, "include": true, "reason": "import numpy", "num_tokens": 1494}
|
import argparse, random, sys, os
# import librosa
import numpy as np
import soundfile
def main(args):
y, sr = soundfile.read(args.filename, always_2d=True)
print(f"File loaded with y {y.shape}, sr = {sr}")
# multichannel
if y.shape[1] > 1:
y = y.mean(axis=1, keepdims=True)
print(f"File downmix y {y.shape}, sr = {sr}")
for i in range(args.numslices):
offset = np.random.randint(0, y.shape[0] - args.duration - 1)
print(f"Sampling offset {offset}")
y_new = y[offset:offset+args.duration]
print(f"Sampling slice at offset {y_new.shape}")
# write slice
filename_new = args.filename[:-4] + f"-slice-{offset}.wav"
print(f"Writing file to {filename_new}")
soundfile.write(filename_new, y_new, sr, 'PCM_16', format='WAV')
if __name__ == '__main__':
print(sys.argv)
parser = argparse.ArgumentParser()
# parser.add_argument('-c', '--conf', help='Config key to load from autovoice configuration module [sco_2]', default='sco_2', type=str)
parser.add_argument('-d', '--duration', help='Output duration (samples) to select from input file [4096]',
default=4096, type=int)
parser.add_argument('-f', '--filename', help='Sound file to process', default=None, type=str)
parser.add_argument('-n', '--numslices', help='Number of slices [1]', default=1, type=int)
parser.add_argument('-s', '--seed', help='Random seed [0]', default=0, type=int)
args = parser.parse_args()
print(args)
main(args)
|
{"hexsha": "8cd26954f74e2432d71f478e52c6c3f1b3207b0b", "size": 1552, "ext": "py", "lang": "Python", "max_stars_repo_path": "playground/audio_slicer.py", "max_stars_repo_name": "x75/smp_audio", "max_stars_repo_head_hexsha": "6e293d6419132e34e38efde90efb58cabb9c623b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-30T01:47:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-10T11:11:16.000Z", "max_issues_repo_path": "playground/audio_slicer.py", "max_issues_repo_name": "x75/smp_audio", "max_issues_repo_head_hexsha": "6e293d6419132e34e38efde90efb58cabb9c623b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "playground/audio_slicer.py", "max_forks_repo_name": "x75/smp_audio", "max_forks_repo_head_hexsha": "6e293d6419132e34e38efde90efb58cabb9c623b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8, "max_line_length": 139, "alphanum_fraction": 0.6327319588, "include": true, "reason": "import numpy", "num_tokens": 411}
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2010 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
Require Import List.
Require Import Setoid.
Require Import BinPos.
Require Import BinList.
Require Import Znumtheory.
Require Export Morphisms Setoid Bool.
Require Import ZArith.
Open Scope Z_scope.
Require Import Algebra_syntax.
Require Export Ring2.
Require Import Ring2_polynom.
Require Import Ring2_initial.
Set Implicit Arguments.
(* Reification with Type Classes, inspired from B.Grégoire and A.Spiewack *)
Class is_in_list_at (R:Type) (t:R) (l:list R) (i:nat) := {}.
Instance Ifind0 (R:Type) (t:R) l
: is_in_list_at t (t::l) 0.
Instance IfindS (R:Type) (t2 t1:R) l i
`{is_in_list_at R t1 l i}
: is_in_list_at t1 (t2::l) (S i) | 1.
Class reifyPE (R:Type) (e:PExpr Z) (lvar:list R) (t:R) := {}.
Instance reify_zero (R:Type) (RR:Ring R) lvar
: reifyPE (PEc 0%Z) lvar ring0.
Instance reify_one (R:Type) (RR:Ring R) lvar
: reifyPE (PEc 1%Z) lvar ring1.
Instance reify_plus (R:Type) (RR:Ring R)
e1 lvar t1 e2 t2
`{reifyPE R e1 lvar t1}
`{reifyPE R e2 lvar t2}
: reifyPE (PEadd e1 e2) lvar (ring_plus t1 t2).
Instance reify_mult (R:Type) (RR:Ring R)
e1 lvar t1 e2 t2
`{reifyPE R e1 lvar t1}
`{reifyPE R e2 lvar t2}
: reifyPE (PEmul e1 e2) lvar (ring_mult t1 t2).
Instance reify_sub (R:Type) (RR:Ring R)
e1 lvar t1 e2 t2
`{reifyPE R e1 lvar t1}
`{reifyPE R e2 lvar t2}
: reifyPE (PEsub e1 e2) lvar (ring_sub t1 t2).
Instance reify_opp (R:Type) (RR:Ring R)
e1 lvar t1
`{reifyPE R e1 lvar t1}
: reifyPE (PEopp e1) lvar (ring_opp t1).
Instance reify_var (R:Type) t lvar i
`{is_in_list_at R t lvar i}
: reifyPE (PEX Z (P_of_succ_nat i)) lvar t
| 100.
Class reifyPElist (R:Type) (lexpr:list (PExpr Z)) (lvar:list R)
(lterm:list R) := {}.
Instance reifyPE_nil (R:Type) lvar
: @reifyPElist R nil lvar (@nil R).
Instance reifyPE_cons (R:Type) e1 lvar t1 lexpr2 lterm2
`{reifyPE R e1 lvar t1} `{reifyPElist R lexpr2 lvar lterm2}
: reifyPElist (e1::lexpr2) lvar (t1::lterm2).
Class is_closed_list T (l:list T) := {}.
Instance Iclosed_nil T
: is_closed_list (T:=T) nil.
Instance Iclosed_cons T t l
`{is_closed_list (T:=T) l}
: is_closed_list (T:=T) (t::l).
Definition list_reifyl (R:Type) lexpr lvar lterm
`{reifyPElist R lexpr lvar lterm}
`{is_closed_list (T:=R) lvar} := (lvar,lexpr).
Unset Implicit Arguments.
Instance multiplication_phi_ring{R:Type}{Rr:Ring R} : Multiplication :=
{multiplication x y := ring_mult (gen_phiZ Rr x) y}.
(*
Print HintDb typeclass_instances.
*)
(* Reification *)
Ltac lterm_goal g :=
match g with
ring_eq ?t1 ?t2 => constr:(t1::t2::nil)
| ring_eq ?t1 ?t2 -> ?g => let lvar :=
lterm_goal g in constr:(t1::t2::lvar)
end.
Ltac reify_goal lvar lexpr lterm:=
(* idtac lvar; idtac lexpr; idtac lterm;*)
match lexpr with
nil => idtac
| ?e::?lexpr1 =>
match lterm with
?t::?lterm1 => (* idtac "t="; idtac t;*)
let x := fresh "T" in
set (x:= t);
change x with
(@PEeval Z Zr _ _ (@gen_phiZ_morph _ _) N
(fun n:N => n) (@Ring_theory.pow_N _ ring1 ring_mult)
lvar e);
clear x;
reify_goal lvar lexpr1 lterm1
end
end.
Existing Instance gen_phiZ_morph.
Existing Instance Zr.
Lemma comm: forall (R:Type)(Rr:Ring R)(c : Z) (x : R),
x * [c] == [c] * x.
induction c. intros. ring_simpl. gen_ring_rewrite. simpl. intros.
ring_rewrite_rev same_gen.
induction p. simpl. gen_ring_rewrite. ring_rewrite IHp. rrefl.
simpl. gen_ring_rewrite. ring_rewrite IHp. rrefl.
simpl. gen_ring_rewrite.
simpl. intros. ring_rewrite_rev same_gen.
induction p. simpl. generalize IHp. clear IHp.
gen_ring_rewrite. intro IHp. ring_rewrite IHp. rrefl.
simpl. generalize IHp. clear IHp.
gen_ring_rewrite. intro IHp. ring_rewrite IHp. rrefl.
simpl. gen_ring_rewrite. Qed.
Lemma Zeqb_ok: forall x y : Z, Zeq_bool x y = true -> x == y.
intros x y H. rewrite (Zeq_bool_eq x y H). rrefl. Qed.
Ltac ring_gen :=
match goal with
|- ?g => let lterm := lterm_goal g in (* les variables *)
match eval red in (list_reifyl (lterm:=lterm)) with
| (?fv, ?lexpr) =>
(* idtac "variables:";idtac fv;
idtac "terms:"; idtac lterm;
idtac "reifications:"; idtac lexpr;
*)
reify_goal fv lexpr lterm;
match goal with
|- ?g =>
set_ring_notations;
apply (@ring_correct Z Zr _ _ (@gen_phiZ_morph _ _)
(@comm _ _) Zeq_bool Zeqb_ok N (fun n:N => n)
(@Ring_theory.pow_N _ 1 multiplication));
[apply mkpow_th; rrefl
|vm_compute; reflexivity]
end
end
end.
(* Pierre L: these tests should be done in a section, otherwise
global axioms are generated. Ideally such tests should go in
the test-suite directory *)
Section Tests.
Ltac ring2:=
unset_ring_notations; intros;
match goal with
|- (@ring_eq ?r ?rd _ _ ) =>
simpl; ring_gen
end.
Variable R: Type.
Variable Rr: Ring R.
Existing Instance Rr.
Goal forall x y z:R, x == x .
ring2.
Qed.
Goal forall x y z:R, x * y * z == x * (y * z).
ring2.
Qed.
Goal forall x y z:R, [3]* x *([2]* y * z) == [6] * (x * y) * z.
ring2.
Qed.
Goal forall x y z:R, 3 * x * (2 * y * z) == 6 * (x * y) * z.
ring2.
Qed.
(* Fails with Multiplication: A -> B -> C.
Goal forall x:R, 2%Z * (x * x) == 3%Z * x.
Admitted.
*)
End Tests.
|
{"author": "mattam82", "repo": "Coq-misc", "sha": "60bc3cbe72083f4fa1aa759914936e4fa3d6b42e", "save_path": "github-repos/coq/mattam82-Coq-misc", "path": "github-repos/coq/mattam82-Coq-misc/Coq-misc-60bc3cbe72083f4fa1aa759914936e4fa3d6b42e/plugins/setoid_ring/Ring2_tac.v"}
|
import logging
import numpy as np
import os
try:
import matplotlib.pyplot as plt
from matplotlib import gridspec
is_matplotlib = True
except:
is_matplotlib = False
from pystella.util.phys_var import phys
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = 'bakl'
eve_elements = ("Ni56", "H", "He", "C", "N", "O", "Ne", "Na", "Mg", "Al"
, "Si", "S", "Ar", "Ca", "Fe", "Ni")
eve_colors = dict(Ni56="red", H="blue", He="cyan", C="darkorange", N="coral",
O="violet", Ne="green", Na="sandybrown",
Mg="skyblue", Si="olive", Al="lime",
S="indigo", Ar="brown", Ca="purple",
Fe='maroon', Ni='magenta',
Fe52='blue', Cr48='cyan',
Z='black', )
eve_lntypes = dict((k, '--') for k, v in eve_colors.items()) # no y-shift
eve_lntypes['H'] = '-'
eve_lntypes['He'] = '-'
eve_lntypes['O'] = '-'
eve_lntypes['C'] = '-'
eve_lntypes['Ni56'] = '-'
eve_lntypes['Z'] = '-' # metals
eve_el_m = {'H': 1.008, 'He': 4.003, 'C': 12.011, 'N': 14.007, 'O': 15.999,
'F': 18.998, 'Ne': 20.180, 'Na': 22.990, 'Mg': 24.305,
'Al': 26.982, 'Si': 28.086, 'P': 30.974, 'S': 32.066,
'Cl': 35.453, 'Ar': 39.948, 'K': 39.098, 'Ca': 40.078,
'Sc': 44.956, 'Ti': 47.867, 'V': 50.942, 'Cr': 51.996,
'Mn': 54.938, 'Fe': 55.845, 'Co': 58.933, 'Ni': 58.693
}
eve_el_m['Ni56'] = eve_el_m['Ni']
class PreSN(object):
"""
A class that holds data of presupernova
"""
sRho = 'Rho'
sM = 'M'
sMcore = 'm_core'
sT = 'T'
sR = 'R'
sV = 'V'
presn_hydro = (sM, sR, sT, sRho, sV)
stl_elements = ("H", "He", "C", "N", "O", "Ne", "Na", "Mg", "Al"
, "Si", "S", "Ar", "Ca", "Fe", "Ni", "Ni56")
stl_elements_iso = ("H", "He", "C", "N", "O", "Ne", "Na", "Mg", "Al"
, "Si", "S", "Ar", "Ca", "Fe", "Ni", "Ni56", 'Fe52', 'Cr48')
def __init__(self, name, nzon, elements=stl_elements):
"""Creates a PreSN model instance. Required parameters: name, nzon"""
self._name = name
self._nzon = nzon
self._elements = elements
self._data_hyd = np.empty(nzon, dtype=PreSN.dtype_hyd())
self._data_chem = np.empty(nzon, dtype=self.dtype_chem())
self._params = {}
self._loads = []
@staticmethod
def dtype_hyd():
dt = np.dtype({'names': PreSN.presn_hydro, 'formats': np.repeat('f8', len(PreSN.presn_hydro))})
return dt
def dtype_chem(self):
dt = np.dtype({'names': self.Elements, 'formats': np.repeat('f8', self.Nelements)})
return dt
def show_info(self):
print("-" * 20)
print(" Name: %s nzon: %d" % (self.Name, self.nzon))
print(" m_tot: {:5.3f} r_cen: {:12.6e}".format(self.m_tot, self.r_cen))
@property
def Name(self):
return self._name
@property
def Elements(self):
return self._elements
@property
def Nelements(self):
return len(self.Elements)
@property
def nzon(self):
""" time start"""
return self._nzon
@property
def time_start(self):
""" time start"""
return self.par('time_start', 0.)
@property
def r_cen(self):
"""Center radius"""
p = 'r_cen'
if self.is_set(PreSN.sR):
d = self.hyd(PreSN.sR)[0] * 0.99 # / 2. # todo check Rcen
else:
d = 0.
return self.par(p, d)
@property
def m_core(self):
"""Core mass"""
p = PreSN.sMcore
if self.is_set(PreSN.sM):
d = self.hyd(PreSN.sM)[0]
else:
d = 0.
return self.par(p, d)
@property
def m_tot(self):
"""Total mass"""
p = 'm_tot'
if self.is_set(PreSN.sM):
d = self.hyd(PreSN.sM)[-1]
else:
d = 0.
return self.par(p, d)
@property
def rho_cen(self):
""" Center density"""
p = 'rho_cen'
if self.is_set(PreSN.sRho):
d = self.hyd(PreSN.sRho)[0]
else:
d = 0.
return self.par(p, d)
@property
def lg_rho(self):
"""logarithmic density"""
return np.log10(self.rho)
@property
def rho(self):
"""density"""
return self.hyd(PreSN.sRho)
@property
def lg_r(self):
"""logarithmic radius"""
return np.log10(self.r)
@property
def r(self):
"""logarithmic radius"""
return self.hyd(PreSN.sR)
@property
def m(self):
"""Mass"""
return self.hyd(PreSN.sM)
@property
def lgT(self):
"""Log T"""
return np.log10(self.T)
@property
def T(self):
"""Temperature"""
return self.hyd(PreSN.sT)
@property
def V(self):
"""Velocity"""
return self.hyd(PreSN.sV)
@property
def lgV(self):
"""Log Velocity"""
return np.log10(self.V)
def hyd(self, v):
"""Hydro data"""
if v not in self._loads:
raise ValueError("There is no information about the parameter [%s]. You should set it." % v)
return self._data_hyd[v]
@property
def chem(self):
"""Full hydro data"""
return self._data_chem
@property
def params_keys(self):
return self._params.keys()
def par(self, name, d=None):
return self._params.get(name, d)
def is_par(self, key):
return key in self._params
def set_par(self, name, v):
self._params[name] = v
def copy_par(self, src, keys=None):
if keys is None:
keys = src.params_keys
for k in keys:
try:
self.set_par(k, getattr(src, k))
except AttributeError:
self.set_par(k, src.par(k))
def lg_el(self, el):
return np.log10(self.el(el))
def mass_tot_el(self, el=None, is_diff=False):
"""
Compute the total mass of element el. Return dict of elements with total mass ff el = None
:param el: the name of element. Default: None
:param is_diff: if True use np.sum(self.el(e)*np.diff(self.m))
:return: the total mass of the element el
"""
def m_el(e):
return np.trapz(self.el(e), self.m)
def m_el_diff(e):
dmass = np.diff(self.m)
dmass = np.insert(dmass, -1, dmass[-1])
return np.sum(self.el(e) * dmass)
fm = m_el
if is_diff:
fm = m_el_diff
elements = self.Elements
if el is not None:
if isinstance(el, str):
return fm(el)
# return m_el_diff(el)
else:
elements = el
mass = {}
for el in elements:
mass[el] = fm(el)
return mass
def mass_tot_rho(self):
"""
Compute the total mass via Radius and Density
"""
dm = np.zeros(self.nzon)
dm[0] = 4.*np.pi/3. * (self.r[0]**3 - self.r_cen**3)*self.rho[0]
for i in range(1, self.nzon):
dm[i] = 4./3.*np.pi*( self.r[i]**3 - self.r[i-1]**3 )*self.rho[i]
#print(f' M_tot(Density) = {np.sum(dm)/phys.M_sun:.3f}')
return np.sum(dm)
def abund(self, k=None):
"""
Abundances in k-zone. k in [1, Nzon]
:param k: zone. If None, return 2d array for all zones
:return: array
"""
if k is None:
abun = np.zeros((self.nzon, len(self.Elements)))
for i, ename in enumerate(self.Elements):
abun[:, i] = self.el(ename)
return abun
else:
abun = [self.el(e)[k - 1] for e in self.Elements]
return abun
def chem_norm(self, k=None, norm=None):
if k is None:
for j in range(self.nzon):
self.chem_norm(k=j + 1, norm=norm)
return
if norm is None:
norm = sum(self.abund(k))
for e in self.Elements:
self._data_chem[e][k - 1] = self.el(e)[k - 1] / norm
def el(self, el):
"""
Get abundances for the element
:param el: the Element name
:return: array
"""
if el not in self.Elements:
raise ValueError("There is no element [%s] in elements" % el)
if el not in self._loads:
raise ValueError("There is no information about the element [%s]. You should set it." % el)
return self._data_chem[el]
def xyz(self, k=-1, xy=('H', 'He'), is_norm=False):
"""
Compute XYZ for chemical abundances
:param k: zone, default: -1, last zone. If k = None, return the array for all zones
:param xy: array Not-metal elements, default: ('H', 'He')
:param is_norm: normalize to 1, default: False
:return: XYZ value(s) for zone(s)
"""
if any([el not in self.Elements for el in xy]):
raise ValueError("There is no elements of xy [{}] in Elements [{}]".format(xy, self.Elements))
norm = 1.
metals = [el for el in self.Elements if el not in xy]
if k is None:
if is_norm:
norm = np.sum([self.el(ze) for ze in self.Elements], axis=0)
ed = {el: self.el(el) / norm for el in xy}
y = np.sum([self.el(ze) for ze in metals], axis=0)
ed['Z'] = y / norm
else:
if is_norm:
norm = np.sum([self.el(ze)[k] for ze in self.Elements], axis=0)
ed = {el: self.el(el)[k] / norm for el in xy}
y = np.sum([self.el(ze)[k] for ze in metals])
ed['Z'] = y / norm
return ed
def write_hyd(self, fname):
"""
Code readheger.trf:
BM1=cutmass; -- core Mass
r(0)=Rcen;
dum=0.;
write(12,'(1p,e12.3,i6,2e13.5)') timeStart, NzoneHyd, BM1, Rcen;
do km=1,NzoneHyd;
write(12,'(1x,i4,1p,e12.4,e18.10,3e15.7,2e12.4)')
km, dum, rHyd(km), rhoHyd(km), TpHyd(km), uHyd(km), aMr(km), dum;
end do;
:return:
"""
dum = np.zeros(self.nzon)
logger.info(' Write hyd-data to %s' % fname)
zones = range(1, self._nzon + 1)
with open(fname, 'w') as f:
f.write('{:12.3e} {:6d} {:13.5e} {:13.5e} {:13.5e}\n'
.format(self.time_start, self.nzon, self.m_core / phys.M_sun, self.r_cen, self.rho_cen))
# a = '#No. Mr dM R dR Rho PRE T V'.split()
# f.write(' '.join(a)+'\n')
# for _ in zip(zones, self.m/phys.M_sun, dum, self.r, dum, self.rho, dum, self.T, self.V):
# f.write(' %4d %12.4e %12.4e %12.4e %12.4e %12.4e %12.4e %12.4e %12.4e \n' % _)
# 'evehyd.trf: idum,dum,Radius(j),RHOeve(j),TMPR(j),VELOC(j), dum,dum; '
a = '#No. M R Rho T V M dum '.split()
f.write(' ' + ' '.join(a) + '\n')
#for _ in zip(zones, self.m / phys.M_sun, self.r, np.log10(self.rho), np.log10(self.T), self.V, self.m / phys.M_sun, dum):
for _ in zip(zones, self.m / phys.M_sun, self.r, self.rho, self.T, self.V, self.m / phys.M_sun, dum):
f.write(' %4d %15.8e %15.8e %15.7e %15.7e %15.7e %15.7e %8.1e\n' % _)
# f.write(' %4d %15.5e %15.5e %15.5e %15.5e %15.5e %15.5e %8.1e\n' % _)
return os.path.isfile(fname)
def plot_chem(self, x='m', elements=eve_elements, ax=None, xlim=None, ylim=None, **kwargs):
"""
Plot the chemical composition.
ls = kwargs.get('ls', eve_lntypes), if ls is str then ls is the same for all elements
colors = kwargs.get('colors', eve_colors), if colors is str then colors is the same for all elements
loc = kwargs.get('leg_loc', 'best')
leg_ncol = kwargs.get('leg_ncol', 4)
lw = kwargs.get('lw', 2), if lw is number then lw is the same for all elements
marker = kwargs.get('marker', None)
markersize = kwargs.get('markersize', 4)
alpha = kwargs.get('alpha', 1)
figsize = kwargs.get('figsize', (8, 8))
fontsize = kwargs.get('fontsize', 14)
is_legend = kwargs.get('is_legend', True)
"""
if not is_matplotlib:
return
# elements = kwargs.get('elements', eve_elements)
# lntypes = kwargs.get('lntypes', eve_lntypes)
lntypes = kwargs.get('ls', eve_lntypes)
if isinstance(lntypes, str):
lntypes = {el: lntypes for el in elements}
colors = kwargs.get('colors', eve_colors)
if isinstance(colors, str):
colors = {el: colors for el in elements}
lw = kwargs.get('lw', 2)
if isinstance(lw, (int, float)):
lw = {el: lw for el in elements}
loc = kwargs.get('leg_loc', 'best')
leg_ncol = kwargs.get('leg_ncol', 4)
marker = kwargs.get('marker', None)
markersize = kwargs.get('markersize', 4)
alpha = kwargs.get('alpha', 1)
figsize = kwargs.get('figsize', (8, 8))
fontsize = kwargs.get('fontsize', 14)
is_legend = kwargs.get('is_legend', True)
if isinstance(lntypes, str):
tmp = lntypes
lntypes = {e: tmp for e in elements}
is_new_plot = ax is None
# setup figure
if is_new_plot:
plt.matplotlib.rcParams.update({'font.size': fontsize})
fig = plt.figure(num=None, figsize=figsize, dpi=100, facecolor='w', edgecolor='k')
gs1 = gridspec.GridSpec(1, 1)
# gs1.update(wspace=0.1, hspace=0.1, top=0.97, left=0.12, right=0.98)
gs1.update(wspace=0.1, hspace=0.1, top=0.97, left=0.12, right=0.87)
ax = fig.add_subplot(gs1[0, 0])
if is_new_plot:
if x == 'rsun':
ax.set_xlabel(r'R [$R_\odot$]')
elif x == 'm':
ax.set_xlabel(r'M [$M_\odot$]')
elif x == 'v':
ax.set_xlabel(r'V [$km\, s^{-1}$]')
elif x == 'z':
ax.set_xlabel(r'Zone')
else:
ax.set_xscale('log')
ax.set_xlabel(r'R [cm]')
is_x_lim = xlim is not None
is_y_lim = ylim is not None
if x == 'rsun':
x = self.r / phys.R_sun
elif x == 'm':
x = self.m / phys.M_sun
elif x == 'v':
x = self.V / 1e5 # to km/s
elif x == 'z': # zones
x = np.arange(0, stop=self.nzon, dtype=np.int) + 1
else:
x = self.r
y_min = []
y_max = []
for el in elements:
if self.is_set(el):
# y = self.lg_el(el)
y = self.el(el)
# x = y[np.nonzero(y)]
# y = y[np.nonzero(y)]
# y[y<=0] == 1e-15
ax.plot(x, y, label='{0}'.format(el), color=colors[el], ls=lntypes[el], linewidth=lw[el]
, marker=marker, markersize=markersize, alpha=alpha)
# ax.semilogy(x, y, label='{0}'.format(el), color=colors[el], ls=lntypes[el], linewidth=lw
# , marker=marker, markersize=markersize)
if not is_y_lim:
y_min.append(np.min(y))
y_max.append(np.max(y))
if not is_y_lim and len(y_min) > 0:
ylim = [np.min(y_min), np.max(y_min)]
if not is_x_lim:
xlim = np.min(x), np.max(x)
if is_x_lim or not is_new_plot:
ax.set_xlim(xlim)
if is_y_lim or not is_new_plot:
ax.set_ylim(ylim)
ax.set_yscale('log')
if is_new_plot:
ax.set_ylabel(r'$X_i$')
if is_legend:
ax.legend(prop={'size': 9}, loc=loc, ncol=leg_ncol, fancybox=False, frameon=False,
markerscale=0, handlelength=3)
# ax.legend(prop={'size': 9}, loc=3, ncol=4, fancybox=True, shadow=True)
# plt.grid()
# plt.show()
return ax
def write_abn(self, fname, is_header=False, is_dum=False):
"""
Write data to file in abn format.
See code readheger.trf:
_do km=1,NzoneHyd;
write(13,'(i4,1p,19e10.3)')km,dum,dum,dum,
-- No. Mr X(H He C N O Ne Na Mg Al Si S Ar Ca Fe Co Ni 56Ni)
bh(km), bhe(km),bc(km),
bn(km),bo(km),bne(km),bna(km),bmg(km),bal(km),bsi(km),
bs(km),bar(km),bca(km),bfe(km),
bni58(km),bni56(km); -- with Ni58 separated
_od;
:return:
"""
dum = 0.
logger.info(' Write abn-data to %s' % fname)
with open(fname, 'w') as f:
# f.write('%d\n' % self.nzon_abn)
if is_header:
if is_dum:
s = '%4s %10s %10s %10s' % ('# zn', ' ', ' ', ' ')
else:
s = '%4s' % '# zn'
for ename in self.Elements:
s += ' %10s' % ename
f.write('%s\n' % s)
for i in range(self.nzon):
if is_dum:
s = '%4d %10.3e %10.3e %10.3e' % (i + 1, dum, dum, dum)
else:
s = '%4d' % (i + 1)
for ename in self.Elements:
s += ' %10.3e' % self.el(ename)[i]
f.write('%s\n' % s)
return os.path.isfile(fname)
def plot_rho(self, x='m', ax=None, xlim=None, ylim=None, **kwargs):
if not is_matplotlib:
return
lw = kwargs.get('lw', 2)
ls = kwargs.get('ls', '-')
label = kwargs.get('label', '')
color = kwargs.get('color', 'black')
xnorm = kwargs.get('xnorm', 1)
marker = kwargs.get('marker', None)
markersize = kwargs.get('markersize', 4)
alpha = kwargs.get('alpha', 1)
is_new_plot = ax is None
# setup figure
if is_new_plot:
plt.matplotlib.rcParams.update({'font.size': 14})
fig = plt.figure(num=None, figsize=(9, 5), dpi=100, facecolor='w', edgecolor='k')
gs1 = gridspec.GridSpec(1, 1)
gs1.update(wspace=0.1, hspace=0.1, top=None, left=0.13, right=0.98)
ax = fig.add_subplot(gs1[0, 0])
ax.set_ylabel(r'$\rho, [g/cm^3]$ ')
if x == 'r':
ax.set_xlabel(r'R [cm]')
elif x == 'm':
ax.set_xlabel(r'M [$M_\odot$]')
elif x == 'v':
ax.set_xlabel(r'V [$km\, s^{-1}$]')
elif x == 'z':
ax.set_xlabel(r'Zone')
else:
ax.set_xscale('log')
ax.set_xlabel(r'R [cm]')
is_x_lim = xlim is not None
is_y_lim = ylim is not None
if x == 'm':
xi = self.m / phys.M_sun * xnorm
elif x == 'v':
xi = self.V * xnorm
elif x == 'z':
xi = np.arange(0, self.nzon, dtype=np.int) + 1
else:
xi = self.r * xnorm
y = self.rho
ax.semilogy(xi, y, color=color, ls=ls, linewidth=lw,
marker=marker, markersize=markersize, label=label, alpha=alpha)
if is_new_plot:
if not is_x_lim and len(xi) > 0:
xlim = [np.min(xi), np.max(xi)]
ax.set_xlim(xlim)
if not is_y_lim and len(y) > 0:
ylim = [np.min(y), np.max(y)]
ax.set_ylim(ylim)
return ax
def plot_structure(self, elements=eve_elements, xlimR=None, xlimM=None, ylimRho=None, ylimChem=None,
title=None, figsize=(12, 8)):
def set_xlim(ax, lim):
if lim is not None:
# ax.set_xlim(lim[0] * 0.5, lim[1] * 2.)
ax.set_xlim(lim)
def set_ylim(ax, lim):
if lim is not None:
# ax.set_ylim(lim[0]*0.1, lim[1]*10.)
ax.set_ylim(lim)
def lims(ain, aout, lim):
res = np.interp(lim, ain, aout)
return res
# if xlimR is not None and xlimM is None:
# xlimM = lims(self.r, self.m / phys.M_sun, xlimR)
# print("xlimM = {} for xlimR={}".format(xlimM, xlimR))
# elif xlimM is not None and xlimR is None:
# xlimR = lims(self.m / phys.M_sun, self.r, xlimM)
# print("xlimR = {} for xlimM={}".format(xlimR, xlimM))
# Set up the axes with gridspec
fig = plt.figure(figsize=figsize)
# fig.subplots_adjust(hspace=0.4, wspace=0.4)
grid = plt.GridSpec(2, 3, hspace=0.2, wspace=0.4)
axR = fig.add_subplot(grid[0, 0:2])
axM = fig.add_subplot(grid[1, 0:2])
axRhoR = fig.add_subplot(grid[0, 2])
axRhoM = fig.add_subplot(grid[1, 2])
self.plot_chem(ax=axR, x='lgR', elements=elements)
axR.set_xlabel('R, cm')
axR.set_ylabel(r'$X_i$')
axR.set_xscale('log')
axR.legend(frameon=False, ncol=4)
set_xlim(axR, xlimR)
set_ylim(axR, ylimChem)
self.plot_chem(ax=axM, x='m', elements=elements)
axM.set_xlabel(r'$M, M_\odot$')
axM.set_ylabel(r'$X_i$')
set_xlim(axM, xlimM)
set_ylim(axM, ylimChem)
self.plot_rho(ax=axRhoR, x='lgR')
axRhoR.set_xlabel('R, cm')
axRhoR.set_xscale('log')
axRhoR.set_ylabel(r'$\rho, g/cm^3$')
set_xlim(axRhoR, xlimR)
set_ylim(axRhoR, ylimRho)
self.plot_rho(ax=axRhoM, x='m')
axRhoM.set_xlabel(r'$M, M_\odot$')
axRhoM.set_ylabel(r'$\rho, g/cm^3$')
set_xlim(axRhoM, xlimM)
set_ylim(axRhoM, ylimRho)
if title is not None:
axR.text(0.5, 1.07, title, transform=axR.transAxes, fontsize=14)
return fig
def is_set(self, name):
return name in self._loads
def set_hyd(self, name, vec, is_exp=False):
if len(vec) != self.nzon:
raise ValueError("The length of vector [%d] should be %d" % (len(vec), self.nzon))
if name not in self._loads:
self._loads.append(name)
if is_exp:
self._data_hyd[name] = 10. ** vec
else:
self._data_hyd[name] = vec
def set_chem(self, name, vec, is_exp=False):
if len(vec) != self.nzon:
raise ValueError("The length of vector [%d] should be %d" % (len(vec), self.nzon))
if name not in self._loads:
self._loads.append(name)
if is_exp:
self._data_chem[name] = 10. ** vec
else:
self._data_chem[name] = vec
def zone_reduce(self, by=sM, diff=1.01, start=0, end=None, mode='g'):
"""
:param by: 'Rho' 'M' 'T' 'R' 'V', default: 'M'
:param diff: geom progression, default: 1.01
:param start:
:param end:
:param mode:
:return:
"""
from pystella.util.math import shrink, portion_index
x = self.hyd(by)
def where(a):
return shrink(a, diff=diff, mode=mode)
idxs = portion_index(x, where, start=start, end=end, isByEl=False)
newPreSN = PreSN(self.Name, len(idxs), elements=self.Elements)
# hyd reshape
for v in PreSN.presn_hydro:
old = self.hyd(v)
new = old[idxs]
newPreSN.set_hyd(v, new)
# abn reshape
for el in self.Elements:
old = self.el(el)
new = old[idxs]
newPreSN.set_chem(el, new)
# copy parameters
# copy parameters
newPreSN.copy_par(self) # keys=['time_start', 'm_tot', 'm_core', 'r_cen'])
return newPreSN
def set_composition(self, zones, sample=None, is_add=True, is_normalize=True):
"""
Set abundances with solar composition
:return:
"""
if sample is None:
sample = sample_sol()
# abn reshape
for el, Xi in sample.items():
y = self.el(el)
for k in zones:
if is_add:
y[k - 1] += Xi
else:
y[k - 1] = Xi
self.set_chem(el, y)
if is_normalize:
for k in zones:
self.chem_norm(k)
def bad_zone_reduce(self, diff=1.05, start=0, end=None, mode='g'):
from pystella.util.math import shrink
x = self.m
if end is None:
end = len(x)
idxs = np.arange(len(x))
xx = x[start:end]
idx = shrink(xx, diff=diff, mode=mode)
idxs = np.concatenate((np.arange(start), idx, np.arange(end, len(x))))
if start > 0:
idxs = idxs[:start - 1]
else:
idxs = []
newPreSN = PreSN(self.Name, len(idxs), elements=self.Elements)
# hyd reshape
for v in PreSN.presn_hydro:
old = self.hyd(v)
new = old[idxs]
newPreSN.set_hyd(v, new)
# abn reshape
for el in self.Elements:
old = self.el(el)
new = old[idxs]
newPreSN.set_chem(el, new)
return newPreSN
def cut(self, name=None, start=0, end=None, elements=None, pars=None):
"""
Cut zones in the envelope between nstart:nend
@param name: the name of new PreSN. Take from parent, if it's None.
@param start: zone number of the left edge. Default: 0 (first zone)
@param end: zone number of the right edge. Default: None, (equal last zone)
@param elements: the elements to be left on hold.. Take from parent, if it's None.
@return: new PreSN
"""
if name is None:
name = self.Name
if end is None:
end = self.nzon
if elements is None:
elements = self.Elements
nznew = end - start
newPreSN = PreSN(name, nznew, elements=elements)
for v in PreSN.presn_hydro:
old = self.hyd(v)
new = old[start:end]
newPreSN.set_hyd(v, new)
# abn reshape
for el in elements:
old = self.el(el)
new = old[start:end]
newPreSN.set_chem(el, new)
# copy parameters
newPreSN.copy_par(self)
# for p in ['m_core', 'r_cen']:
# v = getattr(newPreSN, p)
# newPreSN.set_par(p, v)
# # newPreSN.copy_par(self) # keys=['time_start', 'm_tot', 'm_core', 'r_cen'])
# newPreSN.copy_par(self, keys=['time_start', 'm_tot'])
return newPreSN
def reshape(self, nz: int, name=None, start: int = 0, end=None, axis=sM, xmode='resize', kind='np'):
"""
Reshape parameters of envelope from nstart to nend to nz-zones
:param nz: new zones
:param name: the name of new PreSN. Take from parent, if it's <=0.
:param start: zone number to start reshaping. Default: 0 (first zone)
:param end: zone number to end reshaping. Default: None, (equal last zone)
:param axis: [M OR R OR V] - reshape along mass or radius or velocity coordinate. Default: M
:param xmode: [lin OR rlog OR resize] - linear OR reversed log10 OR add/remove points. Default: resize
:param kind: [np OR interp1d(..kind)], kind is ('np=np.interp', 'linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'). Default: np
:return: new preSN with reshaping zones
"""
from scipy.interpolate import interp1d
# from scipy.interpolate import splev, splrep
from scipy.interpolate import UnivariateSpline
from scipy.ndimage import gaussian_filter1d
def rlogspace(s, e, n):
r = np.exp(np.linspace(np.log(s), np.log(e), n))
r = (e - r + s)
return r[::-1]
def add_point(x, mode='lin'): # 'lin' 'log'
"""
Find max interval in x and insert the new point in the middle (lin or geom) of it
:param x: array
:param mode:
:return:
"""
dif = np.diff(x)
idx = np.argmax(dif)
if mode == 'lin':
p = (x[idx] + x[idx + 1]) / 2.
elif mode == 'geom':
p = np.sqrt(x[idx] * x[idx + 1])
else:
raise ValueError('Mode should be "lin" lor "geom"')
print(' To interval {}[{:.6e} - {:.6e}] added {} '.format(idx, x[idx], x[idx+1], p))
xn = np.insert(x, idx + 1, p)
return xn
def remove_point(x): # 'lin' 'log'
"""
Find min delta and remove the right point
"""
dif = np.diff(x)
idx = np.argmin(dif)
xn = np.delete(x, idx + 1)
return xn
def resize_points(x, n: int, mode: str = 'lin'):
"""
Add or remove points in the array x
:param x: the array is not changed
:param n: number points to add or remove
:param mode: should be "lin" or "geom". Default: lin
:return: the resized array
"""
n_old = len(x)
xn = np.copy(x)
if n == n_old: # nothing to do, return the copy
return xn
if n > n_old:
f = lambda xxx: add_point(xxx, mode=mode)
else:
f = lambda xxx: remove_point(xxx)
for i in range(abs(n - n_old)):
xn = f(xn)
return xn
def x_reshaped(x, n):
if xmode == 'lin':
res = np.linspace(x[0], x[-1], n)
elif xmode == 'rlog':
res = rlogspace(x[0], x[-1], n)
elif xmode == 'resize':
res = resize_points(x, n)
else:
raise ValueError('Such xmode "{}" is not supported.'.format(xmode))
return res
def interp(xn, x, v, s: int, e: int, kind: str, is_log: bool = False):
res = []
if s > 0:
res = v[:s] # save points before start
xi = x[s:e]
yi = v[s:e]
if is_log:
yi = np.log10(yi)
if kind == 'np':
yy = np.interp(xn, xi, yi)
elif kind == 'spline':
spl = UnivariateSpline(xi, yi)
yy = spl(xn)
elif kind == 'gauss':
yii = gaussian_filter1d(yi, 3)
yy = np.interp(xn, xi, yii)
else:
interp_linear = interp1d(xi, yi, kind=kind)
yy = interp_linear(xn)
if is_log:
yy = 10.**yy
res = np.append(res, yy)
return res
if nz <= 0:
nz = self.nzon
nznew = start + nz
if name is None:
name = self.Name
newPreSN = PreSN(name, nznew, elements=self.Elements)
if end is None:
end = self.nzon
print(f'axis= {axis} nz= {nz} nznew= {nznew} start= {start} end= {end}')
# hyd reshape
if axis == PreSN.sM:
xx = self.m
elif axis == PreSN.sR:
xx = self.r
elif axis == PreSN.sV:
xx = self.V
else:
raise ValueError('Such axis "{}" is not supported.'.format(axis))
xx = xx / max(abs(xx)) # norm
xxx = x_reshaped(xx, nz)
if np.any(np.diff(xxx) < 0.):
print('ERROR:', xxx)
raise ValueError('Some of {} elements is < 0.'.format(len(xxx)))
#from pprint import pprint
for vv in PreSN.presn_hydro:
old = self.hyd(vv)
new = interp(xxx, xx, old, s=start, e=end, kind=kind, is_log=False)
#if vv == PreSN.sRho:
# rho_new = interp(xxx, xx, old, s=start, e=end, kind='next') #, is_log=True)
#else:
# new = interp(xxx, xx, old, s=start, e=end, kind=kind)
newPreSN.set_hyd(vv, new)
#print(f'{vv} before: old[{len(xx)}-1]= {old[len(xx)-2]:12.7e} new[{len(xxx)}-1]= {new[len(xxx)-2]:12.7e}')
print(f'{vv} before: old[{len(xx)}]= {old[len(xx)-1]:12.7e} new[{len(xxx)}]= {new[len(xxx)-1]:12.7e}')
# print(f'\n{vv} before: {len(xx)}')
# pprint(list(zip(range(1, len(xx)+1), xx, old)))
# print(f'{vv} after: {len(xxx)}')
# pprint(list(zip(range(1, len(xxx)+1), xxx, new)))
# Density Normalization: m_tot(NEW) should be equal m_tot(OLD)
m_rho = newPreSN.mass_tot_rho() + newPreSN.m_core
rho = newPreSN.rho * newPreSN.m_tot / m_rho
newPreSN.set_hyd(PreSN.sRho, rho)
# abn reshape
for el in self.Elements:
old = self.el(el)
new = interp(xxx, xx, old, s=start, e=end, kind='np')
# new = interp(xxx, xx, old, s=start, e=end, kind=kind, is_log=True)
newPreSN.set_chem(el, new)
# copy parameters
newPreSN.copy_par(self) # keys=['time_start', 'm_tot', 'm_core', 'r_cen'])
# for p in ['time_start', 'm_tot', 'm_core', 'r_cen']:
# v = getattr(self, p)
# newPreSN.set_par(p, v)
return newPreSN
def clone(self):
presn = PreSN(self.Name, self.nzon, elements=self.Elements)
presn.copy_par(self)
# hydro
for k in self.presn_hydro:
presn.set_hyd(k, self.hyd(k))
# chem
for ename in self.Elements:
presn.set_chem(ename, self.el(ename))
return presn
def boxcar(self, box_dm: float = 0.5, n: int = 4, el_included=None, is_info: bool = False):
"""
The function runs a boxcar average to emulate the mixing of chemical composition.
:param box_dm: float. The boxcar width. Default value is 0.5 Msun.
:param n: int. The number of repeats. Default value is 4
:param el_included: the tuple of included elements. If None = all elements are included. Default: None
:param is_info: bool. Prints some debug information. Default value is False
"""
clone = self.clone()
abund = clone.abund()
# abun = np.zeros((clone.nzon, len(clone.Elements)))
if el_included is None:
el_included = clone.Elements
m = clone.m / phys.M_sun
dmass = np.diff(m)
dmass = np.insert(dmass, -1, dmass[-1])
# todo Check left boundary condition fo Fe, Si
for l in range(n): # the iteration number
if is_info:
print(f'Attempt # {l}')
for k in range(clone.nzon):
kk = k + 1
dm = dmass[k]
while dm < box_dm and kk <= clone.nzon:
kk += 1
dm = np.sum(dmass[k:kk])
if is_info:
print(f'{k}: kk= {kk} dm= {dm:.4f} m= {m[k]:.4f}')
if dm > 1e-6:
for i, ename in enumerate(clone.Elements):
if ename in el_included:
dm_e = np.dot(abund[k:kk, i], dmass[k:kk])
abund[k, i] = dm_e / dm
# abun[k,i] = x[k]
#
for i, ename in enumerate(clone.Elements):
# print(ename, ': ', abun[:,i])
clone.set_chem(ename, abund[:, i])
if is_info:
print(clone.el(ename))
return clone
# ==============================================
def load_rho(fname, path: str = None):
if path is not None:
fname = os.path.join(path, fname)
if not os.path.isfile(fname):
logger.error(' No rho-data for %s' % fname)
raise ValueError(' No rho-data for %s' % fname)
# return None
logger.info(' Load rho-data from %s' % fname)
col_names = "zone mass lgR lgTp lgRho u Ni56 H He C N O Ne Na Mg Al Si S Ar Ca Fe Ni"
dt = np.dtype({'names': col_names.split(), 'formats': np.repeat('f8', len(col_names))})
data = np.loadtxt(fname, comments='#', skiprows=2, dtype=dt)
nz = len(data['lgR'])
###
name = os.path.basename(os.path.splitext(fname)[0])
col_map = {PreSN.sR: 'lgR', PreSN.sM: 'mass', PreSN.sT: 'lgTp', PreSN.sRho: 'lgRho', PreSN.sV: 'u'}
presn = PreSN(name, nz)
presn.set_hyd('V', np.zeros(nz))
for k, v in col_map.items():
presn.set_hyd(k, data[v], is_exp=v.startswith('lg'))
# CGS
presn.set_hyd('M', presn.m * phys.M_sun)
for ename in presn.Elements:
presn.set_chem(ename, data[ename], is_exp=True)
return presn
def load_hyd_abn(name, path='.', abn_elements=PreSN.stl_elements, skiprows=0, comments='#',
is_rho=False, is_dm=True, is_dum=False):
"""
Load progenitor from hyd- + abn- files.
is_dm: if True, the column 2 is used as dM. Default: False, he column 2 is used as M.
if is_dum:
col_names = ("zone dum1 dum2 dum3 " + ' '.join(abn_elements)).split()
else:
col_names = ("zone " + ' '.join(abn_elements)).split()
Code readheger.trf:
BM1=cutmass; -- core Mass
r(0)=Rcen;
dum=0.;
write(12,'(1p,e12.3,i6,2e13.5)') timeStart, NzoneHyd, BM1, Rcen;
do km=1,NzoneHyd;
write(12,'(1x,i4,1p,e12.4,e18.10,3e15.7,2e12.4)')
km, dum, rHyd(km), rhoHyd(km), TpHyd(km), uHyd(km), aMr(km), dum;
enddo;
:return: PreSN
"""
# abn_elements = 'H He C N O Ne Na Mg Al Si S Ar Ca Fe Ni Ni56'.split()
# hydro
ext_hyd = '.hyd'
hyd_file = os.path.join(path, name + ext_hyd)
if not os.path.isfile(hyd_file):
logger.error(' No file for %s' % hyd_file)
return None
logger.info(' Load hyd-data from %s' % hyd_file)
def set_params(pre, a):
if len(a) > 0:
if len(a) == 5:
time_start, nzon, m_core, r_cen, rho_cen = a
pre.set_par('time_start', time_start)
pre.set_par('m_core', m_core * phys.M_sun)
pre.set_par('r_cen', r_cen)
pre.set_par('rho_cen', rho_cen)
elif len(a) == 4:
time_start, nzon, m_core, r_cen = a
pre.set_par('time_start', time_start)
pre.set_par('m_core', m_core * phys.M_sun)
pre.set_par('r_cen', r_cen)
elif len(a) == 2:
time_start, nzon = a
pre.set_par('time_start', time_start)
return pre
# read table data
if is_dm:
col_names = "zone dm R Rho T V M".split()
else:
col_names = "zone M R Rho T V M2".split()
a = []
# Load header
with open(hyd_file, 'r') as f:
header_line = f.readline()
if len(header_line) > 0:
a = [float(x) for x in header_line.split()]
# Load data
dt = np.dtype({'names': col_names,
'formats': ['i4'] + list(np.repeat('f8', len(col_names) - 1))})
data_hyd = np.loadtxt(hyd_file, comments='#', skiprows=1, dtype=dt, usecols=np.arange(len(col_names)))
nz = len(data_hyd['R'])
presn = PreSN(name, nz, elements=abn_elements)
set_params(presn, a)
col_map = {PreSN.sR, PreSN.sT, PreSN.sRho, PreSN.sV}
for v in col_map:
presn.set_hyd(v, data_hyd[v], is_exp=v.startswith('lg'))
# Set header data
set_params(presn, a)
# Set Mass
if is_rho:
r = presn.r
rho = presn.rho
r = np.insert(r, 0, presn.r_cen)
# rho = np.insert(rho, 0, presn.rho_cen)
dm = np.zeros(nz)
for i in range(nz):
dm[i] = (r[i+1]**3 - r[i]**3) * rho[i] * 4./3. * np.pi
# dm[i] = (r[i + 1] ** 3 - r[i] ** 3) * rho[i + 1] * 4. * np.pi / 3.
m = np.cumsum(dm)
m += presn.m_core
else:
m = data_hyd[PreSN.sM] * phys.M_sun
presn.set_hyd(PreSN.sM, m)
# Set chemical composition
ext_abn = '.abn'
abn_file = os.path.join(path, name + ext_abn)
if not os.path.isfile(abn_file):
logger.error(' No file for %s' % abn_file)
return None
logger.info(' Load abn-data from %s' % abn_file)
col_names = ("zone " + ' '.join(abn_elements)).split()
if is_dum:
col_names = ("zone dum1 dum2 dum3 " + ' '.join(abn_elements)).split()
# dt = np.dtype({'names': col_names, 'formats': np.repeat('f8', len(col_names))})
dt = np.dtype({'names': col_names,
'formats': ['i4'] + list(np.repeat('f8', len(col_names) - 1))})
# logger.info(dt)
data_chem = np.loadtxt(abn_file, comments=comments, skiprows=skiprows, dtype=dt)
for ename in abn_elements:
presn.set_chem(ename, data_chem[ename])
return presn
def sample_sol():
el = dict(H=7.0600E-01, He=2.7500E-01, C=3.0700E-03, N=1.1100E-03, O=9.6100E-03, Ne=1.7500E-03, Na=3.3400E-05,
Mg=6.6000E-04, Al=5.8100E-05, Si=7.1100E-04, S=4.1800E-04, Ar=9.2800E-05, Ca=6.2000E-05,
Fe=1.3700E-03, Ni=7.3400e-05)
norm = sum(el.values())
el = {e: v / norm for e, v in el.items()}
return el
|
{"hexsha": "7ab7519fe6f9117ac1ad56dbbd92861e31bd11c6", "size": 41252, "ext": "py", "lang": "Python", "max_stars_repo_path": "pystella/model/sn_eve.py", "max_stars_repo_name": "cradesto/pystella", "max_stars_repo_head_hexsha": "f6f44ed12d9648585a52a09e15d494daa4c70c59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-08T13:11:57.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-08T13:11:57.000Z", "max_issues_repo_path": "pystella/model/sn_eve.py", "max_issues_repo_name": "cradesto/pystella", "max_issues_repo_head_hexsha": "f6f44ed12d9648585a52a09e15d494daa4c70c59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pystella/model/sn_eve.py", "max_forks_repo_name": "cradesto/pystella", "max_forks_repo_head_hexsha": "f6f44ed12d9648585a52a09e15d494daa4c70c59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0925619835, "max_line_length": 146, "alphanum_fraction": 0.5070057209, "include": true, "reason": "import numpy,from scipy", "num_tokens": 12034}
|
#-*-coding:utf-8-*-
import numpy as np
def coarse_forecast(SimRes, Bath, idx_list, lowlim, highlim):
'''
Takes wave height from coarse forecast, crops at lowlim bathymetry value and forecasts using Green's Law to highlim.
SimRes: Coarse Grid simulation results
Bath: Coarse Grid Bathymetry
idx_list: Index List of sorted bathymetry
lowlim: Cut off for value for which wave heights are taken directly from the coarse results.
highlim: Upper limit of bathymetry value at which the Green's law is used.
'''
print("Regional Forecasts")
print("lowlim_coarse = ", lowlim)
print("highlim_coarse = ", highlim)
east,north,N =np.shape(SimRes)
Computed_heights = np.copy(SimRes)
### Domain max/min #####
max_x = np.max(idx_list[0,:])
min_x = np.min(idx_list[0,:])
max_y = np.max(idx_list[1,:])
min_y = np.min(idx_list[1,:])
u,v = int(idx_list[0,0]),int(idx_list[1,0])
#copy results for bathy>500m
for i in range(0, east*north):
#use Green's law between 500 and 50m
x,y=u,v
u,v= int(idx_list[0,i]),int(idx_list[1,i])
### Neighbour Region #####
xind = [u-1,u,u+1]
yind = [v-1,v,v+1]
if Bath[u,v]<lowlim:
for n in range (N):
Computed_heights[u,v,n] = SimRes[u,v,n]
elif lowlim <= Bath[u,v] < highlim:
'''
Use Geen ’s law
'''
if (not x in xind) and (not y in yind):
neighx = u
neighy = v
count = 3
x,y,check = neighbour_check(neighx,neighy,Bath,xind,yind,min_x,max_x,min_y,max_y,count)
if (check == False):
x,y = int(idx_list[0,i-1]),int(idx_list[1,i-1])
for n in range (N):
Computed_heights[u,v,n] = Computed_heights[x,y,n]*(Bath[x,y]/Bath[u,v])**(1/4) #0 if bathy<50m
elif highlim<=Bath[u,v]<0.0:
for n in range (N):
Computed_heights[u,v,n] = 0.0
print("Regional Forecasts completed \n")
return Computed_heights
def neighbour_check(neighx,neighy,FineBath,xind,yind,min_x,max_x,min_y,max_y,count):
tempBath = FineBath[neighx,neighy]
check = False
num = 0
radius = 1
while check == False and num <= count:
for k in xind:
for j in yind:
if (min_x < k < max_x) and (min_y < j < max_y):
if (FineBath[k,j] < tempBath):
neighx = k
neighy = j
tempBath = FineBath[neighx,neighy]
check = True
else:
radius += 1
count += 1
xind = np.arange(neighx-radius, neighx+radius+1, 1)
yind = np.arange(neighy-radius, neighy+radius+1, 1)
return neighx,neighy,check
|
{"hexsha": "064e0ad8c2fd3eee46194771690c4374420b9b13", "size": 2399, "ext": "py", "lang": "Python", "max_stars_repo_path": "transfer_function/src/coarse_forecast.py", "max_stars_repo_name": "DanGiles/Localised-Tsunami-Response", "max_stars_repo_head_hexsha": "33b041c94b55aec66b7940d3979e9e4f788cd702", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "transfer_function/src/coarse_forecast.py", "max_issues_repo_name": "DanGiles/Localised-Tsunami-Response", "max_issues_repo_head_hexsha": "33b041c94b55aec66b7940d3979e9e4f788cd702", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "transfer_function/src/coarse_forecast.py", "max_forks_repo_name": "DanGiles/Localised-Tsunami-Response", "max_forks_repo_head_hexsha": "33b041c94b55aec66b7940d3979e9e4f788cd702", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1558441558, "max_line_length": 117, "alphanum_fraction": 0.6581909129, "include": true, "reason": "import numpy", "num_tokens": 833}
|
import numpy as np
import tensorflow as tf
from stable_baselines.common.tf_layers import linear
from tensorflow.python.ops import math_ops
from gym import spaces
class ProbabilityDistribution(object):
def __init__(self):
super(ProbabilityDistribution, self).__init__()
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return -self.neglogp(x)
class ProbabilityDistributionType(object):
def probability_distribution_class(self):
raise NotImplementedError
def proba_distribution_from_flat(self, flat):
return self.probability_distribution_class()(flat)
def proba_distribution_from_latent(
self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0
):
raise NotImplementedError
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.compat.v1.placeholder(
dtype=tf.float32, shape=prepend_shape + self.param_shape(), name=name
)
def sample_placeholder(self, prepend_shape, name=None):
return tf.compat.v1.placeholder(
dtype=self.sample_dtype(),
shape=prepend_shape + self.sample_shape(),
name=name,
)
class CategoricalProbabilityDistributionType(ProbabilityDistributionType):
def __init__(self, n_cat):
self.n_cat = n_cat
def probability_distribution_class(self):
return CategoricalProbabilityDistribution
def proba_distribution_from_latent(
self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0
):
pdparam = linear(
pi_latent_vector,
'pi',
self.n_cat,
init_scale=init_scale,
init_bias=init_bias,
)
q_values = linear(
vf_latent_vector,
'q',
self.n_cat,
init_scale=init_scale,
init_bias=init_bias,
)
return self.proba_distribution_from_flat(pdparam), pdparam, q_values
def param_shape(self):
return [self.n_cat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int64
class MultiCategoricalProbabilityDistributionType(ProbabilityDistributionType):
def __init__(self, n_vec):
self.n_vec = n_vec.astype(np.int32)
assert (self.n_vec > 0).all(), "Casting uint32 to int32 was invalid"
def probability_distribution_class(self):
return MultiCategoricalProbabilityDistribution
def proba_distribution_from_flat(self, flat):
return MultiCategoricalProbabilityDistribution(self.n_vec, flat)
def proba_distribution_from_latent(
self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0
):
pdparam = linear(
pi_latent_vector,
'pi',
sum(self.n_vec),
init_scale=init_scale,
init_bias=init_bias,
)
q_values = linear(
vf_latent_vector,
'q',
sum(self.n_vec),
init_scale=init_scale,
init_bias=init_bias,
)
return self.proba_distribution_from_flat(pdparam), pdparam, q_values
def param_shape(self):
return [sum(self.n_vec)]
def sample_shape(self):
return [len(self.n_vec)]
def sample_dtype(self):
return tf.int64
class DiagGaussianProbabilityDistributionType(ProbabilityDistributionType):
def __init__(self, size):
self.size = size
def probability_distribution_class(self):
return DiagGaussianProbabilityDistribution
def proba_distribution_from_flat(self, flat):
return self.probability_distribution_class()(flat)
def proba_distribution_from_latent(
self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0
):
mean = linear(
pi_latent_vector,
'pi',
self.size,
init_scale=init_scale,
init_bias=init_bias,
)
logstd = tf.compat.v1.get_variable(
name='pi/logstd',
shape=[1, self.size],
initializer=tf.compat.v1.zeros_initializer(),
)
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
q_values = linear(
vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias
)
return self.proba_distribution_from_flat(pdparam), mean, q_values
def param_shape(self):
return [2 * self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliProbabilityDistributionType(ProbabilityDistributionType):
def __init__(self, size):
self.size = size
def probability_distribution_class(self):
return BernoulliProbabilityDistribution
def proba_distribution_from_latent(
self, pi_latent_vector, vf_latent_vector, init_scale=1.0, init_bias=0.0
):
pdparam = linear(
pi_latent_vector,
'pi',
self.size,
init_scale=init_scale,
init_bias=init_bias,
)
q_values = linear(
vf_latent_vector, 'q', self.size, init_scale=init_scale, init_bias=init_bias
)
return self.proba_distribution_from_flat(pdparam), pdparam, q_values
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
class CategoricalProbabilityDistribution(ProbabilityDistribution):
def __init__(self, logits):
self.logits = logits
super(CategoricalProbabilityDistribution, self).__init__()
def flatparam(self):
return self.logits
def mode(self):
return tf.argmax(input=self.logits, axis=-1)
def neglogp(self, x):
one_hot_actions = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
return tf.nn.softmax_cross_entropy_with_logits(
logits=self.logits, labels=tf.stop_gradient(one_hot_actions)
)
def kl(self, other):
a_0 = self.logits - tf.reduce_max(
input_tensor=self.logits, axis=-1, keepdims=True
)
a_1 = other.logits - tf.reduce_max(
input_tensor=other.logits, axis=-1, keepdims=True
)
exp_a_0 = tf.exp(a_0)
exp_a_1 = tf.exp(a_1)
z_0 = tf.reduce_sum(input_tensor=exp_a_0, axis=-1, keepdims=True)
z_1 = tf.reduce_sum(input_tensor=exp_a_1, axis=-1, keepdims=True)
p_0 = exp_a_0 / z_0
return tf.reduce_sum(
input_tensor=p_0 * (a_0 - tf.math.log(z_0) - a_1 + tf.math.log(z_1)),
axis=-1,
)
def entropy(self):
a_0 = self.logits - tf.reduce_max(
input_tensor=self.logits, axis=-1, keepdims=True
)
exp_a_0 = tf.exp(a_0)
z_0 = tf.reduce_sum(input_tensor=exp_a_0, axis=-1, keepdims=True)
p_0 = exp_a_0 / z_0
return tf.reduce_sum(input_tensor=p_0 * (tf.math.log(z_0) - a_0), axis=-1)
def sample(self):
uniform = tf.random.uniform(
tf.shape(input=self.logits), dtype=self.logits.dtype
)
return tf.argmax(
input=self.logits - tf.math.log(-tf.math.log(uniform)), axis=-1
)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalProbabilityDistribution(ProbabilityDistribution):
def __init__(self, nvec, flat):
self.flat = flat
self.categoricals = list(
map(CategoricalProbabilityDistribution, tf.split(flat, nvec, axis=-1))
)
super(MultiCategoricalProbabilityDistribution, self).__init__()
def flatparam(self):
return self.flat
def mode(self):
return tf.stack([p.mode() for p in self.categoricals], axis=-1)
def neglogp(self, x):
return tf.add_n(
[p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))]
)
def kl(self, other):
return tf.add_n(
[p.kl(q) for p, q in zip(self.categoricals, other.categoricals)]
)
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return tf.stack([p.sample() for p in self.categoricals], axis=-1)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianProbabilityDistribution(ProbabilityDistribution):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(
axis=len(flat.shape) - 1, num_or_size_splits=2, value=flat
)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
super(DiagGaussianProbabilityDistribution, self).__init__()
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return (
0.5
* tf.reduce_sum(input_tensor=tf.square((x - self.mean) / self.std), axis=-1)
+ 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(input=x)[-1], tf.float32)
+ tf.reduce_sum(input_tensor=self.logstd, axis=-1)
)
def kl(self, other):
assert isinstance(other, DiagGaussianProbabilityDistribution)
return tf.reduce_sum(
input_tensor=other.logstd
- self.logstd
+ (tf.square(self.std) + tf.square(self.mean - other.mean))
/ (2.0 * tf.square(other.std))
- 0.5,
axis=-1,
)
def entropy(self):
return tf.reduce_sum(
input_tensor=self.logstd + 0.5 * np.log(2.0 * np.pi * np.e), axis=-1
)
def sample(self):
return self.mean + self.std * tf.random.normal(
tf.shape(input=self.mean), dtype=self.mean.dtype
)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliProbabilityDistribution(ProbabilityDistribution):
def __init__(self, logits):
self.logits = logits
self.probabilities = tf.sigmoid(logits)
super(BernoulliProbabilityDistribution, self).__init__()
def flatparam(self):
return self.logits
def mode(self):
return tf.round(self.probabilities)
def neglogp(self, x):
return tf.reduce_sum(
input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logits, labels=tf.cast(x, tf.float32)
),
axis=-1,
)
def kl(self, other):
return tf.reduce_sum(
input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(
logits=other.logits, labels=self.probabilities
),
axis=-1,
) - tf.reduce_sum(
input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logits, labels=self.probabilities
),
axis=-1,
)
def entropy(self):
return tf.reduce_sum(
input_tensor=tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.logits, labels=self.probabilities
),
axis=-1,
)
def sample(self):
samples_from_uniform = tf.random.uniform(tf.shape(input=self.probabilities))
return tf.cast(
math_ops.less(samples_from_uniform, self.probabilities), tf.float32
)
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_proba_dist_type(ac_space):
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1, "Error: the action space must be a vector"
return DiagGaussianProbabilityDistributionType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
return CategoricalProbabilityDistributionType(ac_space.n)
elif isinstance(ac_space, spaces.MultiDiscrete):
return MultiCategoricalProbabilityDistributionType(ac_space.nvec)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliProbabilityDistributionType(ac_space.n)
else:
raise NotImplementedError(
"Error: probability distribution, not implemented for action space of type {}.".format(
type(ac_space)
)
+ " Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary."
)
def shape_el(tensor, index):
maybe = tensor.get_shape()[index]
if maybe is not None:
return maybe
else:
return tf.shape(input=tensor)[index]
|
{"hexsha": "7488eabde4b2ee9ad1d32e477efd51215fc13424", "size": 13016, "ext": "py", "lang": "Python", "max_stars_repo_path": "stable_baselines/common/distributions.py", "max_stars_repo_name": "emadboctorx/stable-baselines", "max_stars_repo_head_hexsha": "9bce185538e8bf69836371286e23919fd85eec64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stable_baselines/common/distributions.py", "max_issues_repo_name": "emadboctorx/stable-baselines", "max_issues_repo_head_hexsha": "9bce185538e8bf69836371286e23919fd85eec64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stable_baselines/common/distributions.py", "max_forks_repo_name": "emadboctorx/stable-baselines", "max_forks_repo_head_hexsha": "9bce185538e8bf69836371286e23919fd85eec64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7848970252, "max_line_length": 99, "alphanum_fraction": 0.6278426552, "include": true, "reason": "import numpy", "num_tokens": 2947}
|
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import inf_def
def main():
inf_net = inf_def.InferenceNetwork()
targets = tf.placeholder(tf.float32, [None, 10])
correct_prediction = tf.equal(tf.argmax(inf_net.logits, 1), tf.argmax(targets, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
mnist = input_data.read_data_sets('./data/mnist/', one_hot=True)
test_images_reshaped = np.reshape(mnist.test.images, [10000, 28, 28, 1])
feed_dict = {inf_net.x_ph: test_images_reshaped,
targets: mnist.test.labels,
inf_net.keep_prob: 1.0}
print inf_net.sess.run(accuracy, feed_dict=feed_dict)
if __name__ == '__main__':
main()
|
{"hexsha": "82f8851a05eb881ff55df5cbf5bbfb6911119946", "size": 768, "ext": "py", "lang": "Python", "max_stars_repo_path": "commons/mnist/inf/test_inf_net.py", "max_stars_repo_name": "hamedhaghighi/Usupervised_Image_Restoration", "max_stars_repo_head_hexsha": "a3fefbf54891b9e984987fe15bd6b434b59fec3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 86, "max_stars_repo_stars_event_min_datetime": "2018-02-14T00:12:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T17:22:36.000Z", "max_issues_repo_path": "commons/mnist/inf/test_inf_net.py", "max_issues_repo_name": "hamedhaghighi/Usupervised_Image_Restoration", "max_issues_repo_head_hexsha": "a3fefbf54891b9e984987fe15bd6b434b59fec3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-07-16T15:07:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-10T10:14:29.000Z", "max_forks_repo_path": "commons/mnist/inf/test_inf_net.py", "max_forks_repo_name": "hamedhaghighi/Usupervised_Image_Restoration", "max_forks_repo_head_hexsha": "a3fefbf54891b9e984987fe15bd6b434b59fec3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-02-19T06:24:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T01:50:37.000Z", "avg_line_length": 32.0, "max_line_length": 86, "alphanum_fraction": 0.7018229167, "include": true, "reason": "import numpy", "num_tokens": 196}
|
# # unfished
# add_theme(:ggplot2_base,
# bglegend = _invisible,
# fg = :white,
# fglegend = _invisible,
# fgguide = :black)
#
# add_theme(:ggplot2,
# base = :ggplot2_base,
# bginside = :lightgray,
# fg = :lightgray,
# fgtext = :gray,
# fglegend = :gray,
# fgguide = :black)
#
# add_theme(:ggplot2_grey, base = :ggplot2)
#
# add_theme(:ggplot2_bw,
# base = :ggplot2_base,
# bginside = :white,
# fg = :black,
# fgtext = :lightgray,
# fglegend = :lightgray,
# fgguide = :black)
_themes[:ggplot2] = PlotTheme(
bg = :white,
bginside = :lightgray,
bglegend = plot_color(:lightgray, 0.8),
framestyle = :grid,
gridcolor = :white,
gridalpha = 1,
fgtext = :grey,
fglegend = :white,
fgguide = :black,
)
|
{"hexsha": "c071fe6a1793cf3e06d71b9a581dae400929bcd6", "size": 891, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ggplot2.jl", "max_stars_repo_name": "greimel/PlotThemes.jl", "max_stars_repo_head_hexsha": "8e7f69e05369f570ffdcb125144b46f754b92d05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ggplot2.jl", "max_issues_repo_name": "greimel/PlotThemes.jl", "max_issues_repo_head_hexsha": "8e7f69e05369f570ffdcb125144b46f754b92d05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ggplot2.jl", "max_forks_repo_name": "greimel/PlotThemes.jl", "max_forks_repo_head_hexsha": "8e7f69e05369f570ffdcb125144b46f754b92d05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0810810811, "max_line_length": 43, "alphanum_fraction": 0.5140291807, "num_tokens": 275}
|
#include <boost/intrusive/list.hpp>
|
{"hexsha": "3781a4261a26b4a86a9eae1397afa18273d9d79c", "size": 36, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_intrusive_list.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_intrusive_list.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_intrusive_list.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 18.0, "max_line_length": 35, "alphanum_fraction": 0.7777777778, "num_tokens": 9}
|
#!/usr/bin/python -u
import numpy as np
import os, sys, random
import cv2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.models import load_model
np.random.seed(1337)
n_rows, n_cols, n_ch = 100, 100, 1
n_classes = 2
# ========= Model ========
def GetModel():
'''
Defining Neural Net layers
'''
model = Sequential()
model.add(Convolution2D(16, 5, 5, border_mode='valid', input_shape=(n_rows, n_cols, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Convolution2D(32, 5, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# ========================
def GetSample():
'''
Generate random class:
0 = circle
1 = square
Draw circle / square of random size.
Normalize output image by dividing by 255. Convert image to float32
'''
cls = random.randint(0, n_classes-1)
img = np.zeros((n_rows, n_cols, n_ch), dtype='uint8')
if cls == 0:
cv2.circle(img, (n_cols/2, n_rows/2), random.randint(10, n_cols/2 - 10), (255), -1)
elif cls == 1:
side = random.randint(10, n_cols/2 - 10)
cv2.rectangle(img, (n_cols/2 - side, n_cols/2 - side), (n_cols/2 + side, n_cols/2 + side), (255), -1)
channels = cv2.split(img)
out_image = np.array(channels, dtype='float32') / 255.
return cls, out_image
def GetBatch(batch_size):
'''
Return an array of multiple samples (labels, images)
'''
batch_labels, batch_images = [], []
for i in range(0, batch_size):
cls, image = GetSample()
batch_images.append(image)
batch_labels.append(cls)
return np.array(batch_labels).reshape(batch_size, 1), np.array(batch_images)
def train():
'''
Train the neural net and test the accuracy every couple iterations
'''
model = GetModel()
n_iter = 61
batch_size = 64
test_interval = 20
test_size = 100
for it in range(n_iter):
print '\rIteration:', it,
sys.stdout.flush()
'''
Get a batch of data (batch_size many images with corresponding labels)
'''
label_train, imgs_train = GetBatch(batch_size)
imgs_train = imgs_train.reshape(batch_size, n_rows, n_cols, 1)
label_train = np_utils.to_categorical(label_train, n_classes)
'''
train the neureal net with this batch of data
'''
model.train_on_batch(imgs_train, label_train)
if it % test_interval == 0:
print '\nTesting...\n',
'''
Get a batch of data to test on
'''
labels, imgs = GetBatch(test_size)
imgs_test = imgs.reshape(test_size, n_rows, n_cols, 1)
labels_test = np_utils.to_categorical(labels, n_classes)
'''
evaluate how well the neural net alraedy classifies this batch of data
'''
score = model.test_on_batch(imgs_test, labels_test)
print "Accuracy:{0:.0f}%\n".format( score[1]*100)
'''
Save to file for later use
'''
model.save('weights.h5')
def classify():
'''
Load the trained network
'''
model = load_model('weights.h5')
'''
For fancy output
'''
vis_dictionary = {0:"circle",1:"square"}
'''
Get an image, let the network predict what it is,
output the image and the prediciton
'''
while(True):
'''
Get data (images )
'''
label, img = GetBatch(1)
img_test = img.reshape(1, n_rows, n_cols, 1)
label_test = np_utils.to_categorical(label, n_classes)
'''
Predict whats in the image with our neural net
'''
score = model.predict_on_batch(img_test)
'''
create fancy window pop up
'''
img2 = np.zeros((50, 100, 1), dtype='uint8')
cv2.putText(img2,str(vis_dictionary[score[0].argmax()]),(2,45),cv2.FONT_HERSHEY_PLAIN , 1, 255,1)
cv2.putText(img2,"PREDICTION: ",(2,25),cv2.FONT_HERSHEY_PLAIN , 1, 255,1)
img = np.array(img * 255, dtype='uint8').reshape(n_rows, n_cols, n_ch)
img2 = np.concatenate((img, img2), axis=0)
'''
Show prediction with fancy image pop up, 'q' to quit
'''
cv2.imshow("image ",img2)
if cv2.waitKey(0)== 113:
break
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 't':
train()
elif len(sys.argv) == 2 and sys.argv[1] == 'c':
classify()
else:
print "please specify if you want to 'train : t' or 'classify: c' "
|
{"hexsha": "11eda219f5512cb86e70c99cabf59941692357d5", "size": 4584, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras_train.py", "max_stars_repo_name": "mashgin/basic_deep_learning_keras", "max_stars_repo_head_hexsha": "4d6597c29ee9e929f7e4b17e146266fbdcf848a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2017-02-17T00:25:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T23:36:09.000Z", "max_issues_repo_path": "keras_train.py", "max_issues_repo_name": "mashgin/basic_deep_learning_keras", "max_issues_repo_head_hexsha": "4d6597c29ee9e929f7e4b17e146266fbdcf848a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keras_train.py", "max_forks_repo_name": "mashgin/basic_deep_learning_keras", "max_forks_repo_head_hexsha": "4d6597c29ee9e929f7e4b17e146266fbdcf848a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.3209302326, "max_line_length": 103, "alphanum_fraction": 0.6655759162, "include": true, "reason": "import numpy", "num_tokens": 1363}
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
from cugraph.experimental import PropertyGraph
from dgl.contrib.cugraph import CuGraphStorage
import numpy as np
import random
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def read_cora(graph_path, feat_path, self_loop=False):
cora_M = cudf.read_csv(graph_path, sep='\t', header=None)
cora_content = cudf.read_csv(feat_path, sep='\t', header=None)
# the last column is true label
labels = cora_content['1434']
cora_content.drop(columns='1434', inplace=True)
# add weight into graph
cora_M['weight'] = 1.0
# add features to nodes and edges
pg = PropertyGraph()
pg.add_edge_data(cora_M, vertex_col_names=("0", "1"))
pg.add_vertex_data(cora_content, vertex_col_name="0")
pg._vertex_prop_dataframe.drop(columns=['0'], inplace=True)
pg._edge_prop_dataframe.drop(columns=['0', '1'], inplace=True)
gstore = CuGraphStorage(pg)
# define train, test and val splits
indices = np.arange(len(labels))
random.shuffle(indices)
idx_train, idx_val, idx_test = np.split(indices, [1000, 1500])
return gstore, labels, idx_train, idx_val, idx_test
|
{"hexsha": "7cec56e1c798fc354c776f88e311e342be5d47e8", "size": 1803, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/cugraph-pytorch/cugraph-local/data_loader/read_cora.py", "max_stars_repo_name": "wangxiaoyunNV/dgl", "max_stars_repo_head_hexsha": "abcf92cdee4c571b72e9347595e8b8f158ff66d4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/cugraph-pytorch/cugraph-local/data_loader/read_cora.py", "max_issues_repo_name": "wangxiaoyunNV/dgl", "max_issues_repo_head_hexsha": "abcf92cdee4c571b72e9347595e8b8f158ff66d4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/cugraph-pytorch/cugraph-local/data_loader/read_cora.py", "max_forks_repo_name": "wangxiaoyunNV/dgl", "max_forks_repo_head_hexsha": "abcf92cdee4c571b72e9347595e8b8f158ff66d4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7818181818, "max_line_length": 74, "alphanum_fraction": 0.7188019967, "include": true, "reason": "import numpy", "num_tokens": 458}
|
@testset "ch04_sim02" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
circlings = Array{Agent,1}(undef, 0)
robots = Array{RealRobot,1}(undef, 0)
for i = 1:10
circling = Agent(0.2, 10.0 / 180 * pi)
robot =
RealRobot([0.0, 0.0, 0.0], circling, nothing; radius = 0.05, color = "black")
push!(circlings, circling)
push!(robots, robot)
push!(world, robot)
end
dt = 0.1
anim = @animate for i = 1:300
t = dt * i
annota = "t = $(round(t, sigdigits=3))[s]"
p = draw(world, annota)
for j = 1:10
obsv = observations(robots[j].sensor_, robots[j].pose_)
@assert obsv == nothing
v, ω = decision(circlings[j], obsv)
state_transition(robots[j], v, ω, dt; move_noise = true)
end
end
if GUI
gif(anim, "ch04_sim02.gif", fps = 20)
end
end
@testset "ch04_sim03" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
circling_agent = Agent(0.2, 10.0 / 180 * pi)
nobias_robot = IdealRobot([0.0, 0.0, 0.0], circling_agent, nothing, 0.05, "gray")
push!(world, nobias_robot)
biased_robot = RealRobot(
[0.0, 0.0, 0.0],
circling_agent,
nothing;
radius = 0.05,
color = "red",
bias_rate_stds = (0.2, 0.2),
)
push!(world, biased_robot)
dt = 0.1
anim = @animate for i = 1:300
annota = "t = $(round(dt * i, sigdigits=3))[s]"
obsv1 = observations(nobias_robot.sensor_, nobias_robot.pose_)
v1, ω1 = decision(circling_agent, obsv1)
obsv2 = observations(biased_robot.sensor_, biased_robot.pose_)
v2, ω2 = decision(circling_agent, obsv2)
p = draw(world, annota)
state_transition(nobias_robot, v1, ω1, dt)
state_transition(biased_robot, v2, ω2, dt; vel_bias_noise = true)
end
if GUI
gif(anim, "ch04_sim03.gif", fps = 20)
end
end
@testset "ch04_sim04" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
circling_agent = Agent(0.2, 10.0 / 180 * pi)
robots = Array{RealRobot,1}(undef, 0)
for i = 1:10
robot = RealRobot(
[0.0, 0.0, 0.0],
circling_agent,
nothing;
radius = 0.05,
color = "gray",
expected_stuck_time = 60.0,
expected_escape_time = 60.0,
)
push!(robots, robot)
push!(world, robot)
end
ideal_robot = IdealRobot([0.0, 0.0, 0.0], circling_agent, nothing, 0.05, "red")
push!(world, ideal_robot)
dt = 0.1
anim = @animate for i = 1:300
annota = "t = $(round(dt * i, sigdigits=3))[s]"
zs = []
for j = 1:10
obsv = observations(robots[j].sensor_, robots[j].pose_)
if obsv == nothing
push!(zs, nothing)
else
push!(zs, copy(obsv))
end
end
obsv1 = observations(ideal_robot.sensor_, ideal_robot.pose_)
p = draw(world, annota)
for j = 1:10
v, ω = decision(circling_agent, zs[j])
state_transition(robots[j], v, ω, dt; stuck_noise = true)
end
v, ω = decision(circling_agent, obsv1)
state_transition(ideal_robot, v, ω, dt)
end
if GUI
gif(anim, "ch04_sim04.gif", fps = 20)
end
end
@testset "ch04_sim05" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
circling_agent = Agent(0.2, 10.0 / 180 * pi)
robots = Array{RealRobot,1}(undef, 0)
for i = 1:10
robot = RealRobot(
[0.0, 0.0, 0.0],
circling_agent,
nothing;
radius = 0.05,
color = "gray",
expected_kidnap_time = 5.0,
)
push!(robots, robot)
push!(world, robot)
end
ideal_robot = IdealRobot([0.0, 0.0, 0.0], circling_agent, nothing, 0.05, "red")
push!(world, ideal_robot)
dt = 0.1
anim = @animate for i = 1:300
annota = "t = $(round(dt * i, sigdigits=3))[s]"
zs = []
for j = 1:10
obsv = observations(robots[j].sensor_, robots[j].pose_)
if obsv == nothing
push!(zs, nothing)
else
push!(zs, copy(obsv))
end
end
obsv1 = observations(ideal_robot.sensor_, ideal_robot.pose_)
p = draw(world, annota)
for j = 1:10
v, ω = decision(circling_agent, zs[j])
state_transition(robots[j], v, ω, dt; kidnap = true)
end
v, ω = decision(circling_agent, obsv1)
state_transition(ideal_robot, v, ω, dt)
end
if GUI
gif(anim, "ch04_sim05.gif", fps = 20)
end
end
@testset "ch04_sim07" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
circling_agent = Agent(0.2, 10.0 / 180 * pi)
landmarks =
[Landmark([-4.0, 2.0], 0), Landmark([2.0, -3.0], 1), Landmark([3.0, 3.0], 1)]
m = Map()
push!(m, landmarks)
robot =
RealRobot([0.0, 0.0, pi / 6], circling_agent, RealCamera(landmarks); color = "red")
push!(world, robot)
push!(world, m)
dt = 0.1
anim = @animate for i = 1:300
t = dt * i
annota = "t = $(round(t, sigdigits=3))[s]"
obsv = observations(robot.sensor_, robot.pose_; noise = true)
p = draw(world, annota)
v, ω = decision(circling_agent, obsv)
state_transition(robot, v, ω, dt)
end
if GUI
gif(anim, "ch04_sim07.gif", fps = 20)
end
end
@testset "ch04_sim08" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
straight_agent = Agent(0.2, 0.0)
landmarks = [
Landmark([-4.0, 2.0], 0),
Landmark([3.0, -3.0], 1),
Landmark([3.0, 3.0], 2),
Landmark([3.0, -2.0], 3),
Landmark([3.0, 0.0], 4),
Landmark([3.0, 1.0], 5),
]
m = Map()
push!(m, landmarks)
robot = RealRobot([0.0, 0.0, 0.0], straight_agent, RealCamera(landmarks); color = "red")
push!(world, robot)
push!(world, m)
dt = 0.1
anim = @animate for i = 1:300
t = dt * i
annota = "t = $(round(t, sigdigits=3))[s]"
obsv = observations(robot.sensor_, robot.pose_; noise = true, bias = true)
p = draw(world, annota)
v, ω = decision(straight_agent, obsv)
state_transition(robot, v, ω, dt)
end
if GUI
gif(anim, "ch04_sim08.gif", fps = 20)
end
end
@testset "ch04_sim09" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
landmarks =
[Landmark([-4.0, 2.0], 0), Landmark([2.0, -3.0], 1), Landmark([3.0, 3.0], 2)]
m = Map()
push!(m, landmarks)
circling = Agent(0.2, 10.0 / 180 * pi)
robot = RealRobot(
[0.0, 0.0, 0.0],
circling,
RealCamera(landmarks; phantom_prob = 0.2);
color = "red",
)
push!(world, robot)
push!(world, m)
dt = 0.1
anim = @animate for i = 1:300
t = dt * i
annota = "t = $(round(t, sigdigits=3))[s]"
z = observations(
robot.sensor_,
robot.pose_;
noise = true,
bias = true,
phantom = true,
)
p = draw(world, annota)
v, ω = decision(circling, z)
state_transition(robot, v, ω, dt; move_noise = true, vel_bias_noise = true)
end
if GUI
gif(anim, "ch04_sim09.gif", fps = 20)
end
end
@testset "ch04_sim10" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
landmarks =
[Landmark([-4.0, 2.0], 0), Landmark([2.0, -3.0], 1), Landmark([3.0, 3.0], 2)]
m = Map()
push!(m, landmarks)
circling = Agent(0.2, 10.0 / 180 * pi)
robot = RealRobot(
[0.0, 0.0, 0.0],
circling,
RealCamera(landmarks; overlook_prob = 0.5);
color = "red",
)
push!(world, robot)
push!(world, m)
dt = 0.1
anim = @animate for i = 1:300
t = dt * i
annota = "t = $(round(t, sigdigits=3))[s]"
z = observations(
robot.sensor_,
robot.pose_;
noise = true,
bias = true,
overlook = true,
)
p = draw(world, annota)
v, ω = decision(circling, z)
state_transition(robot, v, ω, dt; move_noise = true, vel_bias_noise = true)
end
if GUI
gif(anim, "ch04_sim10.gif", fps = 20)
end
end
@testset "ch04_sim11" begin
xlim = [-5.0, 5.0]
ylim = [-5.0, 5.0]
world = World(xlim, ylim)
landmarks =
[Landmark([-4.0, 2.0], 0), Landmark([2.0, -3.0], 1), Landmark([3.0, 3.0], 2)]
m = Map()
push!(m, landmarks)
circling = Agent(0.2, 10.0 / 180 * pi)
robot = RealRobot(
[2.0, 2.0, pi / 6],
circling,
RealCamera(landmarks; occlusion_prob = 0.1);
color = "red",
)
push!(world, robot)
push!(world, m)
dt = 0.1
anim = @animate for i = 1:300
t = dt * i
annota = "t = $(round(t, sigdigits=3))[s]"
z = observations(
robot.sensor_,
robot.pose_;
noise = true,
bias = true,
occlusion = true,
)
p = draw(world, annota)
v, ω = decision(circling, z)
state_transition(robot, v, ω, dt; move_noise = true, vel_bias_noise = true)
end
if GUI
gif(anim, "ch04_sim11.gif", fps = 20)
end
end
|
{"hexsha": "e64a7b70515c37399b7148c269ea7e389d35a2cf", "size": 9564, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/ch04_test.jl", "max_stars_repo_name": "soblin/JuliaProbo", "max_stars_repo_head_hexsha": "bb206e19dd350af7f82b90e7c5062e5a088eff2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-04T06:15:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-04T06:15:20.000Z", "max_issues_repo_path": "test/ch04_test.jl", "max_issues_repo_name": "soblin/JuliaProbo", "max_issues_repo_head_hexsha": "bb206e19dd350af7f82b90e7c5062e5a088eff2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/ch04_test.jl", "max_forks_repo_name": "soblin/JuliaProbo", "max_forks_repo_head_hexsha": "bb206e19dd350af7f82b90e7c5062e5a088eff2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8942598187, "max_line_length": 92, "alphanum_fraction": 0.5135926391, "num_tokens": 3259}
|
\section{Intermediate Language}~\label{sec:il}
Each prover first translates the Copilot specification into an
intermediate representation best suited for model checking. Two
representations are available :
\begin{itemize}
\item
The \textbf{IL} format : a list
of quantifier-free equations over integer sequences, implicitly
universally quantified by a free variable \emph{n}. Each sequence
roughly corresponds to a stream. This format is the one used in G.
Hagen's thesis~\cite{HagenPhD}. The \emph{light prover} works with this
format.
\item
The \textbf{TransSys} format : a modular representation of a \emph{state transition system}~\cite{XX} The \emph{Kind2 prover} uses thisitio
format, which is similar to Kind2's native format.
\end{itemize}
\textbf{Cnub} is a simplified representation of a Copilot program where
only the informations useful for SMT-based model checking are kept. For
now, it is not used by the two standard provers but it could be used in
the future as an intermediate step in the translation of a copilot specification to the \textbf{IL} or \textbf{TransSys} format.
%For each of these formats, there is a folder in
%\texttt{src/Copilot/Kind} which contains at least three files
%\begin{itemize}
%\item \texttt{Spec.hs} where the format is defined
%\item \texttt{PrettyPrint.hs} for pretty
%printing (useful for debugging)
%\item \texttt{Translate.hs} where the
%translation process from \texttt{Core.Spec} is defined
%\end{itemize}
%These three formats share a simplified set of types and operators,
%defined respectively in \texttt{Misc.Type} and \texttt{Misc.Operator}.
\subsection{The \textbf{Cnub} format}
The complexity of the models that are built from Copilot specification is limited by the power and expressiveness of the theories handled by the current SMT solver. For instance, bounded integer arithmetic is often approximated by standard integer arithmetic as overflow problems are ignored and most real functions like trigonometric functions are not handled.
The \textbf{Cnub} format is aimed at abstracting a copilot specification in a format relying on a simple theory including basic integer and real arithmetic and uninterpreted functions. As said before, using it as an intermediate step in the translation process to \textbf{IL} or \textbf{TransSys} would result in a significant simplification of the latter.
In the \textbf{Cnub} format, the stream structure is kept from the copilot core. However, the following differences have to be emphasized :
\begin{itemize}
\item In contrast to the great diversity of numeric types available in Copilot, we restrain ourselves to three basic types which are handled by the \texttt{SMTLib} standard and defined in \texttt{Kind.Misc.Types} : \texttt{Bool}, \texttt{Integer} and \texttt{Real}.
\item \textit{Uninterpreted functions} are used to model operators that are not handled. Uninterpreted functions are abstract symbols with the property that : $$ \left( \forall i . \; x_i = y_i \right) \Longrightarrow f(x_1, \cdots, x_n) = f(y_1, \cdots, y_n). $$ They are provided by most SMT solvers.
\item \textit{Non-deterministic} functions are used to deal with extern values, which corresponds to the copilot constructions \texttt{ExternFun}, \texttt{ExternVar} and \texttt{ExternArray}. They could be seen as uninterpreted functions taking as an additional argument the current time. Indeed, between two clock ticks, they yield the same result if given the same argument. However, they can change at each clock tick.
\end{itemize}
\paragraph{Remark : some ideas to build more faithful models}
Treating machine integers as unbounded integers and complex operators as uninterpreted functions might be an unacceptable approximation in many cases. The first problem could be tackled by adding automatically to the property being verified some bound-checking conditions for all integer variables. This solution has many weaknesses :
\begin{itemize}
\item It treats every program which causes an integer overflow as wrong, although this behaviour could be intended.
\item It generates a great overhead for the proof engine.
\end{itemize}
An intermediate solution could be to let the developper annotate the program so he can specify which bounds have to be checked automatically.
\medskip
The problem of complex operators like trigonometry operators is more difficult to handle. A seemingly good solution would be to give some classical properties of these operators as additional constraints for the SMT solvers. For instance, we could add the following constraint to any specification where the sine function appears : \[ \forall x . \; -1 \leq \sin x \leq 1 \]
Unfortunately, quantifiers are not handled well by the current state
of the art SMT solvers.~\footnote{As we will discuss it later, it
seems to us it is one of the main limitation of SMT-based techniques
which keeps them from scaling well.} An alternative would be to add
automatically a specialized version of these constraints for each
appropriate variable. For instance, we could add the constraint \[-1
\leq y \leq 1 \] for all variables $y$ such that we have a
constraint \[ y = \sin x \] for $x$ any variable. Further
improvements on these techniques warrant further study.
\subsection{The \textbf{IL} format}
In this format, a stream of type $a$ is modeled by a function of type $\mathbb{N} \to a$. Each stream definition is translated into a list of constraints on such functions. For instance, the stream definition :
\begin{lstlisting}[frame=single]
fib = [1, 1] ++ (fib + drop 1 fib)
\end{lstlisting}
is translated into a function $f : \mathbb{N} \to \mathbb{N}$ with the constraints :
$$
\begin{array}{c}
f(0) = 1 \\
f(1) = 1 \\
\forall n . \; f(n + 2) = f(n + 1) + f(n)
\end{array}
$$
\medskip
Specifications in the \textbf{IL} format can be printed out in the SMTLib format.
\paragraph{The translation process}
The translation process is straightforward as the reification process~\cite{gill,pike-icfp-12} has previously transformed the copilot program such that the \texttt{(++)} operator only occurs at the top of a stream definition. Indeed, all stream definitions are written with the pattern
\begin{center}\texttt{ s = [$s_0$,...,$s_p$] ++ e}\end{center}
where \texttt{e} is an expression in which \texttt{(++)} does not occur. In \texttt{Core.Spec}, a stream is defined as :
\begin{lstlisting}[frame=single]
data Stream = forall a.Typed a => Stream
{ streamId :: Id
, streamBuffer :: [a]
, streamExpr :: Expr a
, streamExprType :: Type a }
\end{lstlisting}
where the field \texttt{streamBuffer} corresponds to \texttt{[$s_0$,...,$s_p$]} and the field \texttt{streamExpr} to \texttt{e}. Moreover, a simplified definition of the type \texttt{Expr} defined in \texttt{Core.Spec} is :
\begin{lstlisting}[frame=single]
data Expr a where
Const :: Type a -> a -> Expr a
Drop :: Type a -> DropIdx -> Id -> Expr a
Op2 :: Op2 a b c -> Expr a -> Expr b -> Expr c
\end{lstlisting}
Then, the translation algorithm is the following. For each stream equation \begin{center}\texttt{ s = [$s_0$,...,$s_p$] ++ e},\end{center}we introduce a new fresh function $f_s$ and the following constraints :
\begin{itemize}
\item $f_s(i) = s_i$ \quad for $1 \leq i \leq p$
\item $\forall n . \; f_s(n + p) = \texttt{expr}_p \; e$
\end{itemize}
where $\texttt{expr}_p$ is defined in this way on expressions :
\begin{itemize}
\item $\texttt{expr}_p \; (\texttt{Const } v) = v$
\item $\texttt{expr}_p \; (\texttt{Drop } g \; i) = f_g(n + p - i)$
\item $\texttt{expr}_p \; (\texttt{Op2 } \oplus \; x_1 \; x_2) = {expr}_p \; x_1 \; \oplus \; {expr}_p \; x_2 $
\end{itemize}
\paragraph{A formal specification of the IL format}
An \textbf{IL} specification is given by :
\begin{itemize}
\item Some identifiers for stream functions and some uninterpreted function symbols
\item A list of constraint between stream functions
\item A list of properties, that is boolean stream functions which should be proved to be constant to \texttt{true}.
Constraints are defined by the following grammar.
\end{itemize}
\bigskip
\begin{tabular}{llclr}
& \textbf{constr} & = & \textbf{expr}$_{Bool}$ & \\
$\forall \tau$ & \textbf{expr}$_{\tau}$ & = & $\left< \textit{const}\,_\tau \right>$ &\\
& & $\textbar$ & \textit{if} \textbf{expr}$_{Bool}$ \textit{then} \textbf{expr}$_{\tau}$ \textit{else} \textbf{expr}$_{\tau}$ \\
& & $\textbar$ &\textbf{op}$^1_{\alpha, \tau}$ \textbf{expr}$_{\alpha}$ \\
& & $\textbar$ & \textbf{expr}$_{\alpha}$ \textbf{op}$^2_{\alpha, \, \beta, \, \tau}$ \textbf{expr}$_{\beta}$ \\
& & $\textbar$ & $\left< \textit{sid} \, \right> \, \left(\,\textbf{sindex}\,\right)$ \\
& & $\textbar$ & $\left< \textit{unintid} \, \right> $ $\left( \exists \alpha . \, \textbf{expr}_\alpha \right)^{*}$
\\
& \textbf{sindex} & = & $\left< \textit{int} \, \right> $ & \\
& & $\textbar$ & $ \textit{n} + \left< \textit{int} \, \right> $ & \\
& $\cdots$ & & \\
&&& \\
& \textbf{op}$^2_{Int, Int, Int}$ & = & $+ \,\textbar \, - \,\textbar\, * \,\textbar\, mod \,\textbar\, div \,\textbar\, \cdots $ & \\
& \textbf{op}$^2_{Int, Int, Bool}$ & = & $< \,\textbar \, \leq \,\textbar\, \geq \,\textbar\, > \,\textbar\, = \,\textbar\, \cdots $ & \\
&&& \\
& $\cdots$ & & \\
&&& \\
\end{tabular}
\medskip
Each expression is parametrized by its type, which belongs to $\{
\texttt{Int}, \texttt{Real}, \texttt{Bool}\}$ and is comprised of a
constant, an \textit{if-then-else} construction or other expressions
glued by an operator, or one of the following :
\begin{itemize}
\item The value of a stream at a given time. The grammar for such
a constraint is defined by an identifier \textit{sid} and an index. The index is either an integer constant or an expression matching the pattern $ \textit{n} + \left< \textit{int} \, \right> $, $n$ being an integer variable by which each constraint is implicitly universally quantified.
\item The application of an uninterpreted function, which identifier is given, to a list of arguments.
\end{itemize}
To access a full list of the operators supported by \textit{copilot-kind}, see the module \texttt{Kind.Misc.Operators}.
\subsection{The \textbf{TransSys} format}
Recall, a state transition system is a triple $(S,I,T),$
where $S$ is a set of states, $I \subseteq S$ is the set of initial
states and $T \subseteq S \times S $ is a transition relation over $S$.
Here, a state consists of the values of a finite set of variables, with types belong to $\{ \texttt{Int}, \texttt{Real}, \texttt{Bool}\}$. $I$ is encoded by a logical formula whose free variables correspond to the state variables and that holds for a state $q$ if and only if $q$ is an initial state. Similarly, the transition relation is given by a formula $T$ such that $T\left[\, q, \, q' \,\right]$ holds if and only if $q \rightarrow q'$.
The \textbf{TransSys} format is a modular encoding of such a state transition system. Related variables are grouped into \textit{nodes}, each node providing a distinct namespace and expressing some constraints between its variables.
\paragraph{Formal definition}
Inside a node, a variable is referenced by a string identifier. Indeed, the corresponding \texttt{Var} type is defined as :
\begin{lstlisting}[frame=single]
data Var = Var {varName :: String}
\end{lstlisting}
Outside a node, a variable is referenced by a node identifier and an instance of \texttt{Var} :
\begin{lstlisting}[frame=single]
data ExtVar = ExtVar {extVarNode :: NodeId, extVarLocalPart :: Var}
\end{lstlisting}
where \texttt{NodeId = String}. Then, the \texttt{Node} type is defined by :
\begin{lstlisting}[frame=single]
data Node = Node
{ nodeId :: NodeId
, nodeDependencies :: [NodeId]
, nodeLocalVars :: Map Var VarDescr
, nodeImportedVars :: Bimap Var ExtVar
, nodeConstrs :: [Expr Bool] }
\end{lstlisting}
where :
\begin{itemize}
\item \texttt{nodeId} is the identifier of the node
\item \texttt{nodeImportedVars} contains the bijection between the external variables used in the node and their local aliases. Indeed, inside a node we cannot refer to an external variable (a variable belonging to another node). Therefore, a mean to bind it to a local name is provided.
\item \texttt{nodeDependencies} is the list of the nodes from which a variable is given a local alias. This information is redundant.
\item \texttt{nodeLocalVars} is a dictionnary which binds each local variable to its descriptor. The type of descriptors \texttt{VarDescr} will be described later.
\item \texttt{nodeConstrs} is a list of constraints on local variables.
\end{itemize}
A variable descriptor is comprised of a type and a definition :
\begin{lstlisting}[frame=single]
data VarDescr = forall t.VarDescr
{ varType :: Type t
, varDef :: VarDef t }
\end{lstlisting}
Note that in the same way as in \texttt{Copilot.Core}, {\sc
gadt}s~\cite{Xi2003,CheneyHinze03, Johann2008} are used to achieve some additional type safety. The \texttt{VarDef} type is defined as :
\begin{lstlisting}[frame=single]
data VarDef t =
Pre t Var
| Expr (Expr t)
| Constrs [Expr Bool]
\end{lstlisting}
As we can see, a local variable can be defined
\begin{itemize}
\item as the previous value of another variable, an initial value being given too (\texttt{Pre} constructor)
\item by an expression involving other variables (\texttt{Expr} constructor)
\item implicitly by a serie of constraints (\texttt{Constrs} constructor)
\end{itemize}
Note that the last constructor can be used to achieve some non-determinism. For instance, a variable whose \texttt{varDef} field is \texttt{Constrs []} is left totally unconstrained~\footnote{In fact, this is not exact as each constraint which is put in a \texttt{Constrs} constructor could have been put in the \texttt{nodeConstrs} field of the current node instead. However, this is to be avoided for readability and the \texttt{nodeConstrs} field should be left empty as often as possible.
}. The expression type associated with the second constructor is defined as :
\begin{lstlisting}[frame=single]
data Expr t where
Const :: Type t -> t -> Expr t
Ite :: Type t -> Expr Bool -> Expr t -> Expr t -> Expr t
Op1 :: Type t -> Op1 x t -> Expr x -> Expr t
Op2 :: Type t -> Op2 x y t -> Expr x -> Expr y -> Expr t
VarE :: Type t -> Var -> Expr t
\end{lstlisting}
Therefore, an expression is a combination of constants, operators, and local variable names.
Finally, a specification in the \textbf{TransSys} format is comprised
of a list of node and a dictionary binding property names to boolean variables which have to be shown constant equals to \texttt{true}.
\begin{lstlisting}[frame=single]
data Spec = Spec
{ specNodes :: [Node]
, specProps :: Map PropId ExtVar }
\end{lstlisting}
\paragraph{Semantics of a transition system}
We give a semantics to \textbf{TransSys} specifications by extracting from them a state transition system in the form of a list of variables and two boolean formulas $I$ and $T$.
The list of variables is simply a concatenation of the local variables of all nodes. Then, we define the formula $C$ as the concatenation of the following clauses, for each node :
\begin{itemize}
\item A series of equalities $a_i = b_i$ for each entry $(a_i, b_i)$ in \texttt{nodeImportedVars}
\item Each constraint defined in \texttt{nodeConstrs}
\item For each local variable $v$ defined in \texttt{nodeLocalVars} :
\begin{itemize}
\item If the \texttt{Expr} $e$ constructor is used, add the constraint $v = e$
\item If the \texttt{Constrs cs} constructor is used, add the constraints \texttt{cs}
\item If the \texttt{Pre} constructor is used, add no constraint
\end{itemize}
\end{itemize}
Then, we define a formula $T_0[\vec q, \vec q \,']$ where $\vec q$ is the vector of variables representing the current state. Moreover, $\vec q \,'$ is a vector containing the same variables, primed, and represents the \textit{next} state. $T_0$ is defined as the conjunction of the following clauses, for each node and each local variable $u$ which descriptor is \texttt{Pre \_} $v$ :
\begin{center} $u' = v$ \end{center}
After this, we define a formula $I_0[\vec q]$ that is the conjunction of the following clauses, for each node and each local variable $u$ which descriptor is \texttt{Pre} $a$ \_ :
\begin{center} $u = a$ \end{center}
Finally, we define $I$ and $T$ as :
\[ I[\vec q] = I_0[\vec q] \;\wedge\; C[\vec q] \qquad T[\vec q, \; \vec q \, '] = T_0[\vec q, \; \vec q \,'] \, \wedge \, C[\vec q \, '] \]
\paragraph{Translation from the \texttt{Core} format to the \texttt{TransSys} format}
This translation process is pretty straightforward : for each stream definition
\begin{center}\texttt{ s = [$s_0$,...,$s_p$] ++ e}\end{center}
we add a node with the same name as $s$ and the local variables $out_0, out_1, \cdots, \, out_p$ with the descriptors :
\begin{itemize}
\item $out_{i} = $ \texttt{Pre} $s_i$ $out_{i + 1}$
\item $out_{p} = $ \texttt{Expr} $\tilde e$ where $\tilde e$ is a translation of $e$ in the \textbf{TransSys} expression format, which is quite similar to the core expression format. Moreover :
\begin{itemize}
\item Local variables in the core format can be expressed by adding variables in the current node.
\item The core expression \texttt{Drop} $g$ $i$ is translated by $out_{p - i}$ if $g = s$. Otherwise, a local alias is defined for the variable $out_{p - i}$ of the node $g$ and is returned.
\end{itemize}
\end{itemize}
\paragraph{Transformations on modular transition systems}
%The transition system obtained by the previously discuddes translation process is perfectly consistent. However, it can't be directly translated into the Kind2 native file format. Indeed, it is natural to bind each node to a predicate but the Kind2 file format requires that each predicate only uses previously defined predicates. However, some nodes in our transition system could be mutually recursive.
The modular transition systems obtained after the translation process
defined in the previous paragraph cannot be printed out in the \texttt{Kind2} native format yet. For instance, some nodes could be mutually dependent whereas \texttt{Kind2} requires its predicates to be cycle-dependence free and written in topological order.
In fact, the actual implementation design is aimed at splitting an otherwise longer and more complex translation process in a series of simple program transformations. These transformations are defined in the \texttt{Kind.TransSys.Transform} module.
\bigskip
One of the most basic transformations is merging nodes, that is, replacing these nodes by a bigger one which contains the concatenation of their local variables. Some of them might have to be renamed to avoid name conflicts. This renaming process is made easier by the use of a specific monad defined in \texttt{Kind.Misc.Renaming}.
The \texttt{removeCycles} :: \texttt{Spec -> Spec} transformation turns a \texttt{TransSys} specification into the \textit{most modular} specification with the same semantics which is dependence-cycle free. This function relies on the \texttt{mergeNodes} :: \texttt{[NodeId] -> Spec -> Spec} function previously discussed. The \texttt{removeCycles} function computes the strongly connected components of the dependency graph of a specification and merges each one into a single node. The complexity of this process is high in the worst case (the square of the total size of the system times the size of the biggest node), but good in practice as few nodes are to be merged in most practical cases.
Note that, with the \texttt{mergeNodes} function, we can get for free the function
\begin{lstlisting}[frame=single]
inline :: Spec -> Spec
inline spec = mergeNodes [nodeId n | n <- specNodes spec] spec
\end{lstlisting}
which discards all the structure of a modular transition system and turns it into a non-modular transition system with only one node.
\bigskip
Once the dependence-cycles have been discarded in a specification, a last transformation is to be applied before an output in the Kind2 native format can be produced. This transformation, whose signature is \texttt{complete} :: \texttt{Spec -> Spec}, is a bit technical. To put it in a nutshell, it transforms a specification into a semantically equivalent one such that :
\begin{itemize}
\item The dependency graph is transitive, that is, if A depends of B which depends of C then A depends on C.
\item If a node depends on another one, it defines a local alias for all its variables.
\end{itemize}
This way, it is possible to see a node as a predicate on its variables whose definition makes a call to the predicates associated to the nodes of its dependency list. After this, a translation to the Kind2 native format is only a matter of syntax.
\subsection{Some examples}
\subsubsection{A simple property about the Fibonacci sequence}
The following specification :
\begin{lstlisting}[frame=single]
spec = do
prop "pos" (fib > 0)
where
fib :: Stream Word64
fib = [1, 1] ++ (fib + drop 1 fib)
\end{lstlisting}
can be translated into this IL specification :
\begin{code}
SEQUENCES
s0 : Int
MODEL INIT
s0[0] = 1
s0[1] = 1
MODEL REC
s0[n + 2] = s0[n] + s0[n + 1]
PROPERTIES
'pos' : s0[n] > 0
\end{code}
Then, it is possible to use a SMT solver to prove \textit{"pos"}. For instance, here is the SMTLib code produced for the verification of the induction step :
\begin{code}
<step> (set-logic QF_UFLIA)
<step> (declare-fun n () Int)
<step> (declare-fun s0 (Int) Int)
<step> (assert (= (s0 (+ n 2)) (+ (s0 (+ n 0)) (s0 (+ n 1)))))
<step> (assert (= (s0 (+ n 3)) (+ (s0 (+ n 1)) (s0 (+ n 2)))))
<step> (assert (> (s0 (+ n 0)) 0))
<step> (push 1)
<step> (assert (or false (not (> (s0 (+ n 1)) 0))))
<step> (check-sat)
<step> (pop 1)
<step> (assert (= (s0 (+ n 4)) (+ (s0 (+ n 2)) (s0 (+ n 3)))))
<step> (assert (> (s0 (+ n 1)) 0))
<step> (push 1)
<step> (assert (or false (not (> (s0 (+ n 2)) 0))))
<step> (check-sat)
unsat
<step> (pop 1)
\end{code}
\bigskip
Otherwise, the original Copilot specification could be translated into a modular transition system (shown here after the \texttt{removeCycles} and \texttt{complete} transformations are applied) :
\begin{code}
NODE 's0' DEPENDS ON []
DEFINES
out : Int =
1 -> pre out.1
out.1 : Int =
1 -> pre out.2
out.2 : Int =
(out) + (out.1)
NODE 'prop-pos' DEPENDS ON [s0]
IMPORTS
(s0 : out) as 's0.out'
(s0 : out.1) as 's0.out.1'
(s0 : out.2) as 's0.out.2'
DEFINES
out : Bool =
(s0.out) > (0)
NODE 'top' DEPENDS ON [prop-pos, s0]
IMPORTS
(prop-pos : out) as 'pos'
(s0 : out) as 's0.out'
(s0 : out.1) as 's0.out.1'
(s0 : out.2) as 's0.out.2'
PROPS
'pos' is (top : pos)
\end{code}
This system is translated into
\begin{code}
(define-pred s0
((out Int)
(out.1 Int)
(out.2 Int))
(init
(and
(= out 1)
(= out.1 1)
(= out.2
(+ out out.1))))
(trans
(and
(= (prime out) out.1)
(= (prime out.1) out.2)
(= (prime out.2)
(+ (prime out) (prime out.1))))))
(define-pred prop-pos
((out Bool)
(s0.out Int)
(s0.out.1 Int)
(s0.out.2 Int))
(init
(and
(= out
(> s0.out 0))
(s0.init s0.out s0.out.1 s0.out.2)))
(trans
(and
(= (prime out)
(> (prime s0.out) 0))
(s0.trans s0.out s0.out.1 s0.out.2 (prime s0.out) (prime s0.out.1) (prime s0.out.2)))))
(define-pred top
((pos Bool)
(s0.out Int)
(s0.out.1 Int)
(s0.out.2 Int))
(init
(prop-pos.init pos s0.out s0.out.1 s0.out.2))
(trans
(prop-pos.trans pos s0.out s0.out.1 s0.out.2 (prime pos) (prime s0.out) (prime s0.out.1) (prime s0.out.2))))
(check-prop
((pos pos)))
\end{code}
\subsubsection{An example of transformations on transition systems}
Consider the following Copilot specification :
\begin{lstlisting}[frame=single]
spec :: Spec
spec = do
prop "prop" (a == c)
where
a :: Stream Word64
a = [1] ++ b
b :: Stream Word64
b = [1] ++ a
c :: Stream Word64
c = [1] ++ c
\end{lstlisting}
Translated to the TransSys format, we get :
\begin{code}
NODE 's2' DEPENDS ON []
DEFINES
out : Int =
1 -> pre out.1
out.1 : Int =
out
NODE 's1' DEPENDS ON [s0]
IMPORTS
(s0 : out) as 's0.out'
DEFINES
out : Int =
1 -> pre out.1
out.1 : Int =
s0.out
NODE 's0' DEPENDS ON [s1]
IMPORTS
(s1 : out) as 's1.out'
DEFINES
out : Int =
1 -> pre out.1
out.1 : Int =
s1.out
NODE 'prop-prop' DEPENDS ON [s0, s2]
IMPORTS
(s0 : out) as 's0.out'
(s2 : out) as 's2.out'
DEFINES
out : Bool =
(s0.out) = (s2.out)
NODE 'top' DEPENDS ON [prop-prop]
IMPORTS
(prop-prop : out) as 'prop'
PROPS
'prop' is (top : prop)
\end{code}
As we can see, the nodes \textit{'s0'} and \textit{'s1'} are mutually dependent. A call to the \texttt{removeCycles} function merges them :
\begin{code}
NODE 's2' DEPENDS ON []
DEFINES
out : Int =
1 -> pre out.1
out.1 : Int =
out
NODE 's0-s1' DEPENDS ON []
DEFINES
s0.out : Int =
1 -> pre s0.out.1
s0.out.1 : Int =
s1.out
s1.out : Int =
1 -> pre s1.out.1
s1.out.1 : Int =
s0.out
NODE 'prop-prop' DEPENDS ON [s0-s1, s2]
IMPORTS
(s0-s1 : s0.out) as 's0.out'
(s2 : out) as 's2.out'
DEFINES
out : Bool =
(s0.out) = (s2.out)
NODE 'top' DEPENDS ON [prop-prop]
IMPORTS
(prop-prop : out) as 'prop'
PROPS
'prop' is (top : prop)
\end{code}
At least, if we apply the \texttt{complete} function, we get :
\begin{code}
NODE 's2' DEPENDS ON []
DEFINES
out : Int =
1 -> pre out.1
out.1 : Int =
out
NODE 's0-s1' DEPENDS ON []
DEFINES
s0.out : Int =
1 -> pre s0.out.1
s0.out.1 : Int =
s1.out
s1.out : Int =
1 -> pre s1.out.1
s1.out.1 : Int =
s0.out
NODE 'prop-prop' DEPENDS ON [s0-s1, s2]
IMPORTS
(s0-s1 : s0.out) as 's0.out'
(s0-s1 : s0.out.1) as 's0.out.1'
(s0-s1 : s1.out) as 's1.out'
(s0-s1 : s1.out.1) as 's1.out.1'
(s2 : out) as 's2.out'
(s2 : out.1) as 's2.out.1'
DEFINES
out : Bool =
(s0.out) = (s2.out)
NODE 'top' DEPENDS ON [prop-prop, s0-s1, s2]
IMPORTS
(prop-prop : out) as 'prop'
(s0-s1 : s0.out) as 's0.out'
(s0-s1 : s0.out.1) as 's0.out.1'
(s0-s1 : s1.out) as 's1.out'
(s0-s1 : s1.out.1) as 's1.out.1'
(s2 : out) as 's2.out'
(s2 : out.1) as 's2.out.1'
PROPS
'prop' is (top : prop)
\end{code}
At least, if we had used \texttt{inline} instead of \texttt{complete . removeCycles}, we would have gotten :
\begin{code}
NODE 'top' DEPENDS ON []
DEFINES
prop-prop.out : Bool =
(s0.out) = (s2.out)
s0.out : Int =
1 -> pre s0.out.1
s0.out.1 : Int =
s1.out
s1.out : Int =
1 -> pre s1.out.1
s1.out.1 : Int =
s0.out
s2.out : Int =
1 -> pre s2.out.1
s2.out.1 : Int =
s2.out
PROPS
'prop' is (top : prop-prop.out)
\end{code}
\subsubsection{The Boyer-Moore majority vote algorithm}
Finally, we study a more ambitious example : the Boyer-moore majority vote algorithm. If not familiar with it, the reader should consult \ref{?} first. Two versions of this algorithm were checked with \texttt{copilot-kind}. The first one comes from the original Copilot tutorial \ref{?} and is a parallel implementation : at each tick, the algorithm takes $n$ inputs from $n$ distinct streams and is fully executed. The second one is a sequential version, where the inputs are delivered one by one in time and where the result is updated at each clock tick. Both can be checked with the basic k-induction algorithm but the proofs involved are of very different natures.
\paragraph{The parallel version}
The core of the algorithm is the following :
\begin{code}
majorityVote :: forall a . (Typed a, Eq a) => [Stream a] -> Stream a
majorityVote [] = error "empty list"
majorityVote (x : xs) = aux x 1 xs
where
aux :: Stream a -> Stream Word8 -> [Stream a] -> Stream a
aux p _s [] = p
aux p s (l : ls) =
local (if s == 0 then l else p) $ \ p' ->
local (if s == 0 || l == p then s + 1 else s - 1) $ \ s' ->
aux p' s' ls
\end{code}
Let's denote $A$ the set of the elements that can be used as inputs for the algorithm. If $l$ is a list and $a \in A$, we denote $|l|_a$ the number of occurences of $a$ in $l$. The total length of a list $l$ is simply written $|l|$. The \texttt{majorityVote} functions takes a list of streams $l$ as its input and returns an output $maj$ such that : \[ \forall a \in A, \; \left( a \neq maj \right) \Longrightarrow \left(|l|_a \leq |l| / {2}\right) \] As said before, quantifiers are handled very poorly by SMT solvers and their use is restricted in most model-checking tools, including \texttt{copilot-kind}. Hopefully, in this case, we can use a very simple trick to write and check this property. Indeed, if $P(n)$ is a predicate of an integer $n$, we have $\forall n, \; P(n)$ if and only if $\neg P(n)$ is satisfiable, where $n$ an unconstrained integer, which can be solved by a SMT solver. Using this trick, the corresponding copilot specification can be written as :
\begin{code}
okWith ::
forall a . (Typed a, Eq a) =>
Stream a -> [Stream a] -> Stream a -> Stream Bool
okWith a l maj = (a /= maj) ==> ((2 * count a l) <= length l)
where
count :: Stream a -> [Stream a] -> Stream Word8
count _e [] = 0
count e (x : xs) = (if x == e then 1 else 0) + count e xs
spec :: Spec
spec = do
prop "OK" (okWith (arbitraryCst "n") ss maj)
where
ss = [ arbitrary ("s" ++ show i) | i <- [1..10]]
maj = majorityVote
\end{code}
The function \texttt{arbitrary} is provided by the copilot-kind standard library and introduces an arbitrary stream. In the same way, \texttt{arbitraryCst} introduces a stream taking an unconstrained but constant value.
Note that we prove the algorithm for a fixed number of $N$ inputs (here $N=10$). Therefore no induction is needed for the proof and the invariant of the Boyer-Moore algorithm doesn't need to be made explicit. However, the size of the problem discharged to the SMT solver grows in proportion to $N$.
\paragraph{The serial version} Now, we discuss an implementation of the algorithm where the inputs are read one by one in a single stream and the result is updated at each clock tick. As the number of inputs of the algorithm isn't bounded anymore, a proof by induction is necessary and the invariant of the Boyer-Moore algorithm, being non-trivial, have to be stated explicitly. As seen in \ref{?}, this invariants is :
\[ \begin{array}{c}
\forall m \in A, \;\;\; \left(m \neq p\right) \Longrightarrow \left( s + 2|l|_m \,\leq\, |l| \right) \;\; \wedge \;\; \left(m = p\right) \Longrightarrow \left( 2|l|_m \,\leq\, s + |l| \right)
\\
\end{array} \]
where $l$ is the list of processed inputs, $p$ is the intermediary result and $s$ is an internal state of the algorithm. The problem here is that the induction invariant needs universal quantification to be expressed. Unfortunately, this quantifier can't be removed by a similar trick like the one seen previously. Indeed, when an invariant is of the form $\forall x. P(x, s)$, $s$ denoting the current state of the world, the induction formula we have to prove is :
\[ \forall x. P(x, s) \,\wedge\, T\left(s, s' \right) \,\models\, \forall x. P(x, s') \]
Sometimes, the stronger entailment
\[ P(x, s) \,\wedge\, T\left(s, s' \right) \,\models\, P(x, s') \]
holds and the problem becomes tractable for the SMT solver, by replacing a universally quantified variable by an unconstrained one. In our current example, it is not the case.
The way we fix this is far from being perfect : we restrains ourselves to the case where $A$ is finite and replace each formula of the form $\forall x \in A \; P(x)$ by $\bigwedge_{x \in A} P(x)$. This can be done with the help of the \texttt{forAllCst} function provided by the copilot-kind standard library. It is defined as :
\begin{code}
forAllCst ::(Typed a) => [a] -> (Stream a -> Stream Bool) -> Stream Bool
forAllCst l f = conj \$ map (f . constant) l
where conj = foldl (&&) true
\end{code}
The code for the serial Boyer-Moore algorithm and its specification is then :
\begin{code}
allowed :: [Word8]
allowed = [1, 2]
majority :: Stream Word8 -> (Stream Word8, Stream Word8, Stream Bool)
majority l = (p, s, j)
where
p = [0] ++ if s <= 0 then l else p
s = [0] ++ if p == l || s <= 0 then s + 1 else s - 1
k = [0] ++ (1 + k)
count m = cnt
where cnt = [0] ++ if l == m then cnt + 1 else cnt
j = forAllCst allowed \$ \ m ->
local (count m) \$ \ cnt ->
let j0 = (m /= p) ==> ((s + 2 * cnt) <= k)
j1 = (m == p) ==> ((2 * cnt) <= (s + k))
in j0 && j1
spec = do
prop "J" j
prop "inRange" (existsCst allowed \$ \ a -> input == a)
where
input = externW8 "in" Nothing
(p, s, j) = majority input
scheme = do
assuming ["inRange"] \$ check "J"
\end{code}
As the reader can see, we make the hypothesis that all the elements manipulated by the algorithm are in the set \texttt{allowed}, which is finite. As this set grows, the proofs discharged to the SMT solver becomes more and more large and redundant, which is why this solution isn't scalable and has limited real world interest.
|
{"hexsha": "deda03ca042cc519ede7c46b769a6c65a8c69af0", "size": 33522, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "co-kind-report/IL.tex", "max_stars_repo_name": "Copilot-Language/copilot-discussion", "max_stars_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-06-10T00:44:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-17T13:20:09.000Z", "max_issues_repo_path": "co-kind-report/IL.tex", "max_issues_repo_name": "Copilot-Language/copilot-discussion", "max_issues_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2019-04-01T20:24:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-07T22:34:17.000Z", "max_forks_repo_path": "co-kind-report/IL.tex", "max_forks_repo_name": "Copilot-Language/copilot-discussion", "max_forks_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8769633508, "max_line_length": 976, "alphanum_fraction": 0.6906509158, "num_tokens": 9973}
|
(* Copyright 2021 (C) Mihails Milehins *)
section\<open>Simple semicategories\<close>
theory CZH_SMC_Simple
imports
CZH_DG_Simple
CZH_SMC_NTSMCF
begin
subsection\<open>Background\<close>
text\<open>
The section presents a variety of simple semicategories, such as the empty
semicategory \<open>0\<close> and a semicategory with one object and one arrow \<open>1\<close>.
All of the entities presented in this section are generalizations of certain
simple categories, whose definitions can be found
in \<^cite>\<open>"mac_lane_categories_2010"\<close>.
\<close>
subsection\<open>Empty semicategory \<open>0\<close>\<close>
subsubsection\<open>Definition and elementary properties\<close>
text\<open>See Chapter I-2 in \<^cite>\<open>"mac_lane_categories_2010"\<close>.\<close>
definition smc_0 :: "V"
where "smc_0 = [0, 0, 0, 0, 0]\<^sub>\<circ>"
text\<open>Components.\<close>
lemma smc_0_components:
shows "smc_0\<lparr>Obj\<rparr> = 0"
and "smc_0\<lparr>Arr\<rparr> = 0"
and "smc_0\<lparr>Dom\<rparr> = 0"
and "smc_0\<lparr>Cod\<rparr> = 0"
and "smc_0\<lparr>Comp\<rparr> = 0"
unfolding smc_0_def dg_field_simps by (simp_all add: nat_omega_simps)
text\<open>Slicing.\<close>
lemma smc_dg_smc_0: "smc_dg smc_0 = dg_0"
unfolding smc_dg_def smc_0_def dg_0_def dg_field_simps
by (simp add: nat_omega_simps)
lemmas_with (in \<Z>) [folded smc_dg_smc_0, unfolded slicing_simps]:
smc_0_is_arr_iff = dg_0_is_arr_iff
subsubsection\<open>\<open>0\<close> is a semicategory\<close>
lemma (in \<Z>) semicategory_smc_0[smc_cs_intros]: "semicategory \<alpha> smc_0"
proof(intro semicategoryI)
show "vfsequence smc_0" unfolding smc_0_def by (simp add: nat_omega_simps)
show "vcard smc_0 = 5\<^sub>\<nat>" unfolding smc_0_def by (simp add: nat_omega_simps)
show "digraph \<alpha> (smc_dg smc_0)"
by (simp add: smc_dg_smc_0 \<Z>.digraph_dg_0 \<Z>_axioms)
qed (auto simp: smc_0_components smc_0_is_arr_iff)
lemmas [smc_cs_intros] = \<Z>.semicategory_smc_0
subsubsection\<open>Opposite of the semicategory \<open>0\<close>\<close>
lemma op_smc_smc_0[smc_op_simps]: "op_smc (smc_0) = smc_0"
proof(rule smc_dg_eqI)
define \<beta> where "\<beta> = \<omega> + \<omega>"
interpret \<beta>: \<Z> \<beta> unfolding \<beta>_def by (rule \<Z>_\<omega>\<omega>)
show "semicategory \<beta> (op_smc smc_0)"
by (cs_concl cs_shallow cs_intro: smc_cs_intros smc_op_intros)
show "semicategory \<beta> smc_0" by (cs_concl cs_shallow cs_intro: smc_cs_intros)
qed
(
simp_all add:
smc_0_components op_smc_components smc_dg_smc_0
slicing_commute[symmetric] dg_op_simps
)
subsubsection\<open>A semicategory without objects is empty\<close>
lemma (in semicategory) smc_smc_0_if_Obj_0:
assumes "\<CC>\<lparr>Obj\<rparr> = 0"
shows "\<CC> = smc_0"
by (rule smc_eqI[of \<alpha>])
(
auto simp:
smc_cs_intros
assms
semicategory_smc_0
smc_0_components
smc_Arr_vempty_if_Obj_vempty
smc_Cod_vempty_if_Arr_vempty
smc_Dom_vempty_if_Arr_vempty
smc_Comp_vempty_if_Arr_vempty
)
subsection\<open>Empty semifunctor\<close>
text\<open>
An empty semifunctor is defined as a semifunctor between an
empty semicategory and an arbitrary semicategory.
\<close>
subsubsection\<open>Definition and elementary properties\<close>
definition smcf_0 :: "V \<Rightarrow> V"
where "smcf_0 \<AA> = [0, 0, smc_0, \<AA>]\<^sub>\<circ>"
text\<open>Components.\<close>
lemma smcf_0_components:
shows "smcf_0 \<AA>\<lparr>ObjMap\<rparr> = 0"
and "smcf_0 \<AA>\<lparr>ArrMap\<rparr> = 0"
and "smcf_0 \<AA>\<lparr>HomDom\<rparr> = smc_0"
and "smcf_0 \<AA>\<lparr>HomCod\<rparr> = \<AA>"
unfolding smcf_0_def dghm_field_simps by (simp_all add: nat_omega_simps)
text\<open>Slicing.\<close>
lemma smcf_dghm_smcf_0: "smcf_dghm (smcf_0 \<AA>) = dghm_0 (smc_dg \<AA>)"
unfolding
smcf_dghm_def smcf_0_def dg_0_def smc_0_def dghm_0_def smc_dg_def
dg_field_simps dghm_field_simps
by (simp add: nat_omega_simps)
text\<open>Opposite empty semicategory homomorphism.\<close>
lemma op_smcf_smcf_0: "op_smcf (smcf_0 \<CC>) = smcf_0 (op_smc \<CC>)"
unfolding
smcf_0_def op_smc_def op_smcf_def smc_0_def dghm_field_simps dg_field_simps
by (simp add: nat_omega_simps)
subsubsection\<open>Object map\<close>
lemma smcf_0_ObjMap_vsv[smc_cs_intros]: "vsv (smcf_0 \<CC>\<lparr>ObjMap\<rparr>)"
unfolding smcf_0_components by simp
subsubsection\<open>Arrow map\<close>
lemma smcf_0_ArrMap_vsv[smc_cs_intros]: "vsv (smcf_0 \<CC>\<lparr>ArrMap\<rparr>)"
unfolding smcf_0_components by simp
subsubsection\<open>Empty semifunctor is a faithful semifunctor\<close>
lemma (in \<Z>) smcf_0_is_ft_semifunctor:
assumes "semicategory \<alpha> \<AA>"
shows "smcf_0 \<AA> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>.\<^sub>f\<^sub>a\<^sub>i\<^sub>t\<^sub>h\<^sub>f\<^sub>u\<^sub>l\<^bsub>\<alpha>\<^esub> \<AA>"
proof(rule is_ft_semifunctorI)
show "smcf_0 \<AA> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA>"
proof(rule is_semifunctorI, unfold smc_dg_smc_0 smcf_dghm_smcf_0)
show "vfsequence (smcf_0 \<AA>)" unfolding smcf_0_def by simp
show "vcard (smcf_0 \<AA>) = 4\<^sub>\<nat>"
unfolding smcf_0_def by (simp add: nat_omega_simps)
show "dghm_0 (smc_dg \<AA>) : dg_0 \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<AA>"
by
(
simp add:
assms
dghm_0_is_ft_dghm
is_ft_dghm.axioms(1)
semicategory.smc_digraph
)
qed (auto simp: assms semicategory_smc_0 smcf_0_components smc_0_is_arr_iff)
show "smcf_dghm (smcf_0 \<AA>) : smc_dg smc_0 \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^sub>.\<^sub>f\<^sub>a\<^sub>i\<^sub>t\<^sub>h\<^sub>f\<^sub>u\<^sub>l\<^bsub>\<alpha>\<^esub> smc_dg \<AA>"
by
(
auto simp:
assms
\<Z>.dghm_0_is_ft_dghm
\<Z>_axioms
smc_dg_smc_0
semicategory.smc_digraph
smcf_dghm_smcf_0
)
qed
lemma (in \<Z>) smcf_0_is_ft_semifunctor'[smcf_cs_intros]:
assumes "semicategory \<alpha> \<AA>"
and "\<BB>' = \<AA>"
and "\<AA>' = smc_0"
shows "smcf_0 \<AA> : \<AA>' \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>.\<^sub>f\<^sub>a\<^sub>i\<^sub>t\<^sub>h\<^sub>f\<^sub>u\<^sub>l\<^bsub>\<alpha>\<^esub> \<BB>'"
using assms(1) unfolding assms(2,3) by (rule smcf_0_is_ft_semifunctor)
lemmas [smcf_cs_intros] = \<Z>.smcf_0_is_ft_semifunctor'
lemma (in \<Z>) smcf_0_is_semifunctor:
assumes "semicategory \<alpha> \<AA>"
shows "smcf_0 \<AA> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<AA>"
using smcf_0_is_ft_semifunctor[OF assms] by auto
lemma (in \<Z>) smcf_0_is_semifunctor'[smc_cs_intros]:
assumes "semicategory \<alpha> \<AA>"
and "\<BB>' = \<AA>"
and "\<AA>' = smc_0"
shows "smcf_0 \<AA> : \<AA>' \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>'"
using assms(1) unfolding assms(2,3) by (rule smcf_0_is_semifunctor)
lemmas [smc_cs_intros] = \<Z>.smcf_0_is_semifunctor'
subsubsection\<open>Further properties\<close>
lemma is_semifunctor_is_smcf_0_if_smc_0:
assumes "\<FF> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
shows "\<FF> = smcf_0 \<CC>"
proof(rule smcf_dghm_eqI)
interpret \<FF>: is_semifunctor \<alpha> smc_0 \<CC> \<FF> by (rule assms(1))
show "\<FF> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>" by (rule assms(1))
then have dom_lhs: "\<D>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) = 0" "\<D>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) = 0"
by (cs_concl cs_simp: smc_cs_simps smc_0_components)+
show "smcf_0 \<CC> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>" by (cs_concl cs_intro: smc_cs_intros)
show "smcf_dghm \<FF> = smcf_dghm (smcf_0 \<CC>)"
unfolding smcf_dghm_smcf_0
by
(
rule is_dghm_is_dghm_0_if_dg_0,
rule \<FF>.smcf_is_dghm[unfolded slicing_simps smc_dg_smc_0]
)
qed simp_all
subsection\<open>Empty natural transformation of semifunctors\<close>
subsubsection\<open>Definition and elementary properties\<close>
definition ntsmcf_0 :: "V \<Rightarrow> V"
where "ntsmcf_0 \<CC> = [0, smcf_0 \<CC>, smcf_0 \<CC>, smc_0, \<CC>]\<^sub>\<circ>"
text\<open>Components.\<close>
lemma ntsmcf_0_components:
shows "ntsmcf_0 \<CC>\<lparr>NTMap\<rparr> = 0"
and [smc_cs_simps]: "ntsmcf_0 \<CC>\<lparr>NTDom\<rparr> = smcf_0 \<CC>"
and [smc_cs_simps]: "ntsmcf_0 \<CC>\<lparr>NTCod\<rparr> = smcf_0 \<CC>"
and [smc_cs_simps]: "ntsmcf_0 \<CC>\<lparr>NTDGDom\<rparr> = smc_0"
and [smc_cs_simps]: "ntsmcf_0 \<CC>\<lparr>NTDGCod\<rparr> = \<CC>"
unfolding ntsmcf_0_def nt_field_simps by (simp_all add: nat_omega_simps)
text\<open>Slicing.\<close>
lemma ntsmcf_tdghm_ntsmcf_0: "ntsmcf_tdghm (ntsmcf_0 \<AA>) = tdghm_0 (smc_dg \<AA>)"
unfolding
ntsmcf_tdghm_def ntsmcf_0_def tdghm_0_def smcf_dghm_def
smcf_0_def smc_dg_def smc_0_def dghm_0_def dg_0_def
dg_field_simps dghm_field_simps nt_field_simps
by (simp add: nat_omega_simps)
text\<open>Duality.\<close>
lemma op_ntsmcf_ntsmcf_0: "op_ntsmcf (ntsmcf_0 \<CC>) = ntsmcf_0 (op_smc \<CC>)"
by
(
simp_all add:
op_ntsmcf_def ntsmcf_0_def op_smc_def op_smcf_smcf_0 smc_0_def
nt_field_simps dg_field_simps nat_omega_simps
)
subsubsection\<open>Natural transformation map\<close>
lemma ntsmcf_0_NTMap_vsv[smc_cs_intros]: "vsv (ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>)"
unfolding ntsmcf_0_components by simp
lemma ntsmcf_0_NTMap_vdomain[smc_cs_simps]: "\<D>\<^sub>\<circ> (ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>) = 0"
unfolding ntsmcf_0_components by simp
lemma ntsmcf_0_NTMap_vrange[smc_cs_simps]: "\<R>\<^sub>\<circ> (ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>) = 0"
unfolding ntsmcf_0_components by simp
subsubsection\<open>
Empty natural transformation of semifunctors
is a natural transformation of semifunctors
\<close>
lemma (in semicategory) smc_ntsmcf_0_is_ntsmcfI:
"ntsmcf_0 \<CC> : smcf_0 \<CC> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F smcf_0 \<CC> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
proof(intro is_ntsmcfI)
show "vfsequence (ntsmcf_0 \<CC>)" unfolding ntsmcf_0_def by simp
show "vcard (ntsmcf_0 \<CC>) = 5\<^sub>\<nat>"
unfolding ntsmcf_0_def by (simp add: nat_omega_simps)
show "ntsmcf_tdghm (ntsmcf_0 \<CC>) :
smcf_dghm (smcf_0 \<CC>) \<mapsto>\<^sub>D\<^sub>G\<^sub>H\<^sub>M smcf_dghm (smcf_0 \<CC>) :
smc_dg smc_0 \<mapsto>\<mapsto>\<^sub>D\<^sub>G\<^bsub>\<alpha>\<^esub> smc_dg \<CC>"
unfolding ntsmcf_tdghm_ntsmcf_0 smcf_dghm_smcf_0 smc_dg_smc_0
by (cs_concl cs_shallow cs_intro: dg_cs_intros slicing_intros)
show
"ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>\<lparr>b\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> smcf_0 \<CC>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> =
smcf_0 \<CC>\<lparr>ArrMap\<rparr>\<lparr>f\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>\<lparr>a\<rparr>"
if "f : a \<mapsto>\<^bsub>smc_0\<^esub> b" for a b f
using that by (elim is_arrE) (auto simp: smc_0_components)
qed
(
cs_concl cs_shallow
cs_simp: smc_cs_simps smc_0_components(1) cs_intro: smc_cs_intros
)+
lemma (in semicategory) smc_ntsmcf_0_is_ntsmcfI'[smc_cs_intros]:
assumes "\<FF>' = smcf_0 \<CC>"
and "\<GG>' = smcf_0 \<CC>"
and "\<AA>' = smc_0"
and "\<BB>' = \<CC>"
and "\<FF>' = \<FF>"
and "\<GG>' = \<GG>"
shows "ntsmcf_0 \<CC> : \<FF>' \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<GG>' : \<AA>' \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>'"
unfolding assms by (rule smc_ntsmcf_0_is_ntsmcfI)
lemmas [smc_cs_intros] = semicategory.smc_ntsmcf_0_is_ntsmcfI'
lemma is_ntsmcf_is_ntsmcf_0_if_smc_0:
assumes "\<NN> : \<FF> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<GG> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
shows "\<NN> = ntsmcf_0 \<CC>" and "\<FF> = smcf_0 \<CC>" and "\<GG> = smcf_0 \<CC>"
proof-
interpret \<NN>: is_ntsmcf \<alpha> smc_0 \<CC> \<FF> \<GG> \<NN> by (rule assms(1))
note is_tdghm_is_tdghm_0_if_dg_0 = is_tdghm_is_tdghm_0_if_dg_0
[
OF \<NN>.ntsmcf_is_tdghm[unfolded smc_dg_smc_0],
folded smcf_dghm_smcf_0 ntsmcf_tdghm_ntsmcf_0
]
show \<FF>_def: "\<FF> = smcf_0 \<CC>" and \<GG>_def: "\<GG> = smcf_0 \<CC>"
by (all\<open>intro is_semifunctor_is_smcf_0_if_smc_0\<close>)
(cs_concl cs_shallow cs_intro: smc_cs_intros)+
show "\<NN> = ntsmcf_0 \<CC>"
proof(rule ntsmcf_tdghm_eqI)
show "\<NN> : \<FF> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<GG> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>" by (rule assms(1))
show "ntsmcf_0 \<CC> : \<FF> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<GG> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
by (cs_concl cs_simp: \<FF>_def \<GG>_def cs_intro: smc_cs_intros)
qed (simp_all add: \<FF>_def \<GG>_def is_tdghm_is_tdghm_0_if_dg_0)
qed
subsubsection\<open>Further properties\<close>
lemma ntsmcf_vcomp_ntsmcf_ntsmcf_0[smc_cs_simps]:
assumes "\<NN> : \<FF> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<GG> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
shows "\<NN> \<bullet>\<^sub>N\<^sub>T\<^sub>S\<^sub>M\<^sub>C\<^sub>F ntsmcf_0 \<CC> = ntsmcf_0 \<CC>"
proof-
interpret \<NN>: is_ntsmcf \<alpha> smc_0 \<CC> \<FF> \<GG> \<NN> by (rule assms(1))
show ?thesis
unfolding is_ntsmcf_is_ntsmcf_0_if_smc_0[OF assms]
proof(rule ntsmcf_eqI)
show "ntsmcf_0 \<CC> \<bullet>\<^sub>N\<^sub>T\<^sub>S\<^sub>M\<^sub>C\<^sub>F ntsmcf_0 \<CC> :
smcf_0 \<CC> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F smcf_0 \<CC> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
by (cs_concl cs_intro: smc_cs_intros)
then have dom_lhs: "\<D>\<^sub>\<circ> ((ntsmcf_0 \<CC> \<bullet>\<^sub>N\<^sub>T\<^sub>S\<^sub>M\<^sub>C\<^sub>F ntsmcf_0 \<CC>)\<lparr>NTMap\<rparr>) = 0"
by
(
cs_concl
cs_simp: smc_cs_simps smc_0_components cs_intro: smc_cs_intros
)
show "ntsmcf_0 \<CC> : smcf_0 \<CC> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F smcf_0 \<CC> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
by (cs_concl cs_intro: smc_cs_intros)
then have dom_rhs: "\<D>\<^sub>\<circ> (ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>) = 0"
by
(
cs_concl
cs_simp: smc_cs_simps smc_0_components cs_intro: smc_cs_intros
)
show "(ntsmcf_0 \<CC> \<bullet>\<^sub>N\<^sub>T\<^sub>S\<^sub>M\<^sub>C\<^sub>F ntsmcf_0 \<CC>)\<lparr>NTMap\<rparr> = ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>"
by (rule vsv_eqI, unfold dom_lhs dom_rhs) (auto intro: smc_cs_intros)
qed simp_all
qed
lemma ntsmcf_vcomp_ntsmcf_0_ntsmcf[smc_cs_simps]:
assumes "\<NN> : \<FF> \<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<GG> : smc_0 \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
shows "ntsmcf_0 \<CC> \<bullet>\<^sub>N\<^sub>T\<^sub>S\<^sub>M\<^sub>C\<^sub>F \<NN> = ntsmcf_0 \<CC>"
proof-
interpret \<NN>: is_ntsmcf \<alpha> smc_0 \<CC> \<FF> \<GG> \<NN> by (rule assms(1))
show ?thesis
unfolding is_ntsmcf_is_ntsmcf_0_if_smc_0[OF assms]
by (cs_concl cs_simp: smc_cs_simps cs_intro: smc_cs_intros)
qed
subsection\<open>\<open>10\<close>: semicategory with one object and no arrows\<close>
subsubsection\<open>Definition and elementary properties\<close>
definition smc_10 :: "V \<Rightarrow> V"
where "smc_10 \<aa> = [set {\<aa>}, 0, 0, 0, 0]\<^sub>\<circ>"
text\<open>Components.\<close>
lemma smc_10_components:
shows "smc_10 \<aa>\<lparr>Obj\<rparr> = set {\<aa>}"
and "smc_10 \<aa>\<lparr>Arr\<rparr> = 0"
and "smc_10 \<aa>\<lparr>Dom\<rparr> = 0"
and "smc_10 \<aa>\<lparr>Cod\<rparr> = 0"
and "smc_10 \<aa>\<lparr>Comp\<rparr> = 0"
unfolding smc_10_def dg_field_simps by (auto simp: nat_omega_simps)
text\<open>Slicing.\<close>
lemma smc_dg_smc_10: "smc_dg (smc_10 \<aa>) = (dg_10 \<aa>)"
unfolding smc_dg_def smc_10_def dg_10_def dg_field_simps
by (simp add: nat_omega_simps)
lemmas_with (in \<Z>) [folded smc_dg_smc_10, unfolded slicing_simps]:
smc_10_is_arr_iff = dg_10_is_arr_iff
subsubsection\<open>\<open>10\<close> is a semicategory\<close>
lemma (in \<Z>) semicategory_smc_10:
assumes "\<aa> \<in>\<^sub>\<circ> Vset \<alpha>"
shows "semicategory \<alpha> (smc_10 \<aa>)"
proof(intro semicategoryI)
show "vfsequence (smc_10 \<aa>)"
unfolding smc_10_def by (simp add: nat_omega_simps)
show "vcard (smc_10 \<aa>) = 5\<^sub>\<nat>"
unfolding smc_10_def by (simp add: nat_omega_simps)
show "digraph \<alpha> (smc_dg (smc_10 \<aa>))"
unfolding smc_dg_smc_10 by (rule digraph_dg_10[OF assms])
qed (auto simp: smc_10_components smc_10_is_arr_iff vsubset_vsingleton_leftI)
subsubsection\<open>Arrow with a domain and a codomain\<close>
lemma smc_10_is_arr_iff: "\<FF> : \<AA> \<mapsto>\<^bsub>smc_10 \<aa>\<^esub> \<BB> \<longleftrightarrow> False"
unfolding is_arr_def smc_10_components by simp
subsection\<open>\<open>1\<close>: semicategory with one object and one arrow\<close>
subsubsection\<open>Definition and elementary properties\<close>
definition smc_1 :: "V \<Rightarrow> V \<Rightarrow> V"
where "smc_1 \<aa> \<ff> =
[set {\<aa>}, set {\<ff>}, set {\<langle>\<ff>, \<aa>\<rangle>}, set {\<langle>\<ff>, \<aa>\<rangle>}, set {\<langle>[\<ff>, \<ff>]\<^sub>\<circ>, \<ff>\<rangle>}]\<^sub>\<circ>"
text\<open>Components.\<close>
lemma smc_1_components:
shows "smc_1 \<aa> \<ff>\<lparr>Obj\<rparr> = set {\<aa>}"
and "smc_1 \<aa> \<ff>\<lparr>Arr\<rparr> = set {\<ff>}"
and "smc_1 \<aa> \<ff>\<lparr>Dom\<rparr> = set {\<langle>\<ff>, \<aa>\<rangle>}"
and "smc_1 \<aa> \<ff>\<lparr>Cod\<rparr> = set {\<langle>\<ff>, \<aa>\<rangle>}"
and "smc_1 \<aa> \<ff>\<lparr>Comp\<rparr> = set {\<langle>[\<ff>, \<ff>]\<^sub>\<circ>, \<ff>\<rangle>}"
unfolding smc_1_def dg_field_simps by (simp_all add: nat_omega_simps)
text\<open>Slicing.\<close>
lemma dg_smc_1: "smc_dg (smc_1 \<aa> \<ff>) = dg_1 \<aa> \<ff>"
unfolding smc_dg_def smc_1_def dg_1_def dg_field_simps
by (simp add: nat_omega_simps)
lemmas_with [folded dg_smc_1, unfolded slicing_simps]:
smc_1_is_arrI = dg_1_is_arrI
and smc_1_is_arrD = dg_1_is_arrD
and smc_1_is_arrE = dg_1_is_arrE
and smc_1_is_arr_iff = dg_1_is_arr_iff
subsubsection\<open>Composition\<close>
lemma smc_1_Comp_app[simp]: "\<ff> \<circ>\<^sub>A\<^bsub>smc_1 \<aa> \<ff>\<^esub> \<ff> = \<ff>"
unfolding smc_1_components by simp
subsubsection\<open>\<open>1\<close> is a semicategory\<close>
lemma (in \<Z>) semicategory_smc_1:
assumes "\<aa> \<in>\<^sub>\<circ> Vset \<alpha>" and "\<ff> \<in>\<^sub>\<circ> Vset \<alpha>"
shows "semicategory \<alpha> (smc_1 \<aa> \<ff>)"
proof(intro semicategoryI, unfold dg_smc_1)
show "vfsequence (smc_1 \<aa> \<ff>)"
unfolding smc_1_def by (simp add: nat_omega_simps)
show "vcard (smc_1 \<aa> \<ff>) = 5\<^sub>\<nat>"
unfolding smc_1_def by (simp add: nat_omega_simps)
qed
(
auto simp:
assms
digraph_dg_1
smc_1_is_arr_iff
smc_1_components
vsubset_vsingleton_leftI
)
text\<open>\newpage\<close>
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/CZH_Foundations/czh_semicategories/CZH_SMC_Simple.thy"}
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Copyright 2019 Pivotal Software Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
"""
Name: transactionGnerator.py
Author: Jarrod Vawdrey
Description:
"""
import time
import json
import random
import numpy as np
import pandas as pd
import csv
import math
import sys
import logging
from datetime import timedelta,datetime
from kafka import KafkaProducer, KafkaClient
import geopy.distance
import enlighten
import redis
import fraudSignatures as fs
class myDataFiles:
def __init__(self):
# import locations data
with open('locations.json') as data_file:
locations = json.load(data_file)
# prep locations data in with state for key
uniqueStatesList = []
for loc in locations:
if(loc['merchant_state'] not in uniqueStatesList):
uniqueStatesList.append(loc['merchant_state'])
uniqueStates = {}
for state in uniqueStatesList:
uniqueStates[state] = [loc for loc in locations if loc['merchant_state'] == state]
# import accounts data
with open('accounts.json') as data_file:
accounts = json.load(data_file)
self.locations = locations
self.uniqueStatesList = uniqueStatesList
self.uniqueStates = uniqueStates
self.accounts = accounts
# Setup progress bar
manager = enlighten.get_manager()
N = len(self.accounts)
msg = "Mapping {} locations to {} accounts".format(len(self.locations), N)
logging.info(msg)
pbar = manager.counter(total=N, desc='Progress', unit='account')
# build list of locations within "distance" of account holders home address
self.accounts_location = {}
for a in self.accounts:
self.accounts_location[a['account_id']] = []
for l in self.locations:
dist = geopy.distance.vincenty((a['lat'], a['long']), (l['merchant_lat'],l['merchant_long'])).miles
if (dist < a['transaction_radius']):
l['merchant_distance'] = dist
self.accounts_location[a['account_id']].append(l)
pbar.update()
manager.stop()
class myTimestamp():
def __init__(self, minOpen=2, maxClose=23):
self.timestamp = datetime.now()
self.minOpen = minOpen
self.maxClose = maxClose
# To Do: Make this location specific
def newTimestamp(self, loc):
rnd = int(random.random() * 10)
self.timestamp = self.timestamp + timedelta(seconds=rnd)
dow = self.timestamp.weekday()
days = ['mon','tue','wed','thu','fri','sat','sun']
open = loc['{}{}'.format(days[dow],'_open')]
close = loc['{}{}'.format(days[dow],'_close')]
if self.timestamp.hour < self.minOpen:
self.timestamp.replace(hour=self.minOpen)
if self.timestamp.hour > self.maxClose:
N = 24 - self.timestamp.hour + self.minOpen
self.timestamp = self.timestamp.replace(minute=0, second=0) + timedelta(hours=N)
if open == -1 or close == -1 or self.timestamp.hour < open or self.timestamp.hour > close:
rnd = int(random.random() * 10)
self.timestamp = self.timestamp + timedelta(minutes=rnd)
return False
return time.mktime(self.timestamp.timetuple())
def output_file(filename, records, type='json'):
try:
if type == 'csv':
df = pd.read_json(json.dumps(records), orient='list')
df.to_csv(filename,index=False,quoting=csv.QUOTE_NONNUMERIC)
elif type == 'json':
f = open(filename,'w')
for rec in records:
f.write(json.dumps(rec) + '\n')
f.close()
except Exception as e:
logging.error(e)
def iterate_transaction_id(datafiles, transaction_id):
# iterate transaction_id
for i in range(0,len(datafiles.locations)):
if datafiles.locations[i]['transaction_id'] == transaction_id:
datafiles.locations[i]['transaction_id'] += 1
def random_location(datafiles, acct):
# build list of locations within "distance" of account holders home address
close_locations = []
close_locations = datafiles.accounts_location[acct['account_id']]
#close_locations = []
#for l in datafiles.locations:
# dist = geopy.distance.vincenty((lat, long), (l['merchant_lat'],l['merchant_long'])).miles
# if (dist < distance):
# l['merchant_distance'] = dist
# close_locations.append(l)
msg = "{} total location found within {} miles".format(len(close_locations),acct['transaction_radius'])
logging.debug(msg)
if (close_locations != []):
loc = random.choice(close_locations)
# no locations found - looks within state
elif (acct['state'] in datafiles.uniqueStatesList):
msg = "No merchant found within {} miles - choosing location within state".format(acct['transaction_radius'])
logging.debug(msg)
loc = random.choice(datafiles.uniqueStates[acct['state']])
# final option - pick location at random
else:
msg = "No merchant found within {} miles or state {} - choosing random location".format(acct['transaction_radius'], acct['state'])
logging.debug(msg)
loc = random.choice(datafiles.locations)
return loc
def random_account(datafiles):
return random.choice(datafiles.accounts)
def generate_transaction(datafiles, ts, fraud, storeFraudFlag, target):
# transaction date
# Grab random account
acct = random_account(datafiles)
# Grab random merchant location
loc = random_location(datafiles, acct)
iterate_transaction_id(datafiles, loc['transaction_id'])
trxnTS = ts.newTimestamp(loc)
if trxnTS == False:
return False
# Create transaction (account dependent amount) - 20%
if (np.random.rand() < 0.2):
trxn_amount = str(round(np.random.normal(acct['trxn_mean'], acct['trxn_std']), 2))
# Create transaction (merchant dependent amount) - 80%
else:
trxn_amount = str(round(np.random.normal(float(loc['merchant_trxn_mean']), float(loc['merchant_trxn_std'])), 2))
if (target == "file"):
trxn = {
'rlb_location_key': loc['rlb_location_key']
,'account_id': acct['account_id']
,'account_number': acct['account_number']
,'account_lat': acct['lat']
,'account_long': acct['long']
,'card_type': acct['card_type']
,'location_id': loc['location_id']
,'merchant_city': loc['merchant_city']
,'merchant_city_alias': loc['merchant_city_alias']
,'merchant_name': loc['merchant_name']
,'merchant_state': loc['merchant_state']
,'merchant_long': loc['merchant_long']
,'merchant_lat': loc['merchant_lat']
,'posting_date': time.time()
,'transaction_amount': trxn_amount
,'transaction_date': trxnTS
,'transaction_id': loc['transaction_id']
}
elif (target == "kafka"):
trxn = {
'rlb_location_key': loc['rlb_location_key']
,'account_id': acct['account_id']
,'account_number': acct['account_number']
,'account_lat': acct['lat']
,'account_long': acct['long']
,'card_type': acct['card_type']
,'location_id': loc['location_id']
,'merchant_city': loc['merchant_city']
,'merchant_city_alias': loc['merchant_city_alias']
,'merchant_name': loc['merchant_name']
,'merchant_state': loc['merchant_state']
,'merchant_long': loc['merchant_long']
,'merchant_lat': loc['merchant_lat']
,'posting_date': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
,'transaction_amount': trxn_amount
,'transaction_date': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(trxnTS))
,'transaction_id': loc['transaction_id']
}
if (storeFraudFlag == True):
trxn['fraud_flag'] = False
# Update transaction if fraud case
if (fraud == True):
msg = "***** Generating fraud transaction *****"
logging.debug(msg)
trxn = fs.transform(trxn, acct, loc)
return trxn
if (storeFraudFlag == True):
trxn['fraud_flag'] = False
# Update transaction if fraud case
if (fraud == True):
msg = "***** Generating fraud transaction *****"
logging.debug(msg)
trxn = fs.transform(trxn, acct, loc)
return trxn
def generate_kafka_data(myConfigs):
redis_host = myConfigs['redis']['host']
redis_port = myConfigs['redis']['port']
redis_pwd = myConfigs['redis']['passwd']
redis_db= redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd)
transactionNumber = myConfigs['generator']['transactionNumber']
everyNFraud = myConfigs['generator']['FraudEveryNTransactions']
sleepBetweenIterations = myConfigs['generator']['sleepBetweenIterations']
storeFraudFlag = myConfigs['generator']['storeFraudFlag']
datafiles = myDataFiles()
ts = myTimestamp()
logging.debug("Transaction Generator: Applying fraud signature every {} transactions".format(everyNFraud))
iter_counter = 0
results = []
bootstrapServers = myConfigs['target']['kafka']
topic = myConfigs['target']['topic']
batch=100
producer = KafkaProducer(bootstrap_servers=bootstrapServers)
for i in range(0,transactionNumber):
iter_counter += 1
# MOD
fraud = False
if (iter_counter % everyNFraud == 0):
logging.debug("***** Generating fraud record *****")
fraud = True
msg = generate_transaction(datafiles, ts, fraud, storeFraudFlag, 'kafka')
if msg == False:
iter_counter -= 1
else:
logging.debug(msg)
logging.debug(json.dumps(msg).encode('utf-8'))
producer.flush()
producer.send(topic, json.dumps(msg).encode('utf-8'))
redis_db.incr("TransactionsCount")
if(iter_counter % batch ==0):
time.sleep(sleepBetweenIterations)
def generate_file_data(myConfigs):
transactionNumber = myConfigs['generator']['transactionNumber']
everyNFraud = myConfigs['generator']['FraudEveryNTransactions']
transactionPerFile = myConfigs['target']['transactionPerFile']
storeFraudFlag = myConfigs['generator']['storeFraudFlag']
datafiles = myDataFiles()
ts = myTimestamp()
iter_counter = 0
batch_counter = 0
results = []
logging.info("start generating transactions .............")
logging.info("Applying fraud signature every {} transactions ........".format(everyNFraud))
# Setup progress bar
manager = enlighten.get_manager()
pbar = manager.counter(total=transactionNumber, desc='Progress', unit='transaction')
for i in range(0,transactionNumber):
iter_counter += 1
# MOD
fraud = False
if ((iter_counter % everyNFraud) == 0):
fraud = True
msg = generate_transaction(datafiles, ts, fraud, storeFraudFlag, 'file')
if msg == False:
iter_counter -= 1
else:
results.append(msg)
if (iter_counter == transactionPerFile or i == transactionNumber-1):
filename = 'transactions_{}.{}'.format((str(time.time())).replace('.', ''),myConfigs['target']['type'])
locationFilename = '{}{}'.format(myConfigs['target']['transactionsFileLoctation'],filename)
output_file(locationFilename, results, myConfigs['target']['type'])
iter_counter = 0
results = []
batch_counter += 1
time.sleep(myConfigs['generator']['sleepBetweenFiles'])
time.sleep(myConfigs['generator']['sleepBetweenIterations'])
pbar.update()
if __name__ == '__main__':
ts = myTimestamp()
|
{"hexsha": "4c51d992304c0daab3a140d65867d8cb5107e8fc", "size": 12608, "ext": "py", "lang": "Python", "max_stars_repo_path": "RTS4MADlib/samples/CreditCardTransactionGenerator/transactionGenerator.py", "max_stars_repo_name": "pivotal/Realtime-scoring-for-MADlib", "max_stars_repo_head_hexsha": "4ed6500bdd3422c3d090d8c6c679d940c2b7c4f4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-08-13T11:02:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-29T17:10:33.000Z", "max_issues_repo_path": "RTS4MADlib/samples/CreditCardTransactionGenerator/transactionGenerator.py", "max_issues_repo_name": "pivotal/Realtime-scoring-for-MADlib", "max_issues_repo_head_hexsha": "4ed6500bdd3422c3d090d8c6c679d940c2b7c4f4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-12T16:11:28.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-12T16:11:43.000Z", "max_forks_repo_path": "RTS4MADlib/samples/CreditCardTransactionGenerator/transactionGenerator.py", "max_forks_repo_name": "pivotal/Realtime-scoring-for-MADlib", "max_forks_repo_head_hexsha": "4ed6500bdd3422c3d090d8c6c679d940c2b7c4f4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-10-22T04:07:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T18:28:36.000Z", "avg_line_length": 36.0228571429, "max_line_length": 138, "alphanum_fraction": 0.6228585025, "include": true, "reason": "import numpy", "num_tokens": 2800}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function
import os
import requests
from numpy import testing as npt
from astropy.tests.helper import pytest
from astropy.table import Table
import astropy.coordinates as coord
import astropy.units as u
from ...exceptions import RemoteServiceError
from ...utils.testing_tools import MockResponse
from ... import ned
from ...utils import commons
from ...ned import conf
DATA_FILES = {
'object': 'query_object.xml',
'Near Name Search': 'query_near_name.xml',
'Near Position Search': 'query_near_position.xml',
'IAU Search': 'query_iau_format.xml',
'Diameters': 'query_diameters.xml',
'image': 'query_images.fits',
'Photometry': 'query_photometry.xml',
'Positions': 'query_positions.xml',
'Redshifts': 'query_redshifts.xml',
'Reference': 'query_references.xml',
'Search': 'query_refcode.xml',
'error': 'error.xml',
'extract_urls': 'image_extract.html',
'Notes': 'query_notes.xml'
}
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_get(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests, 'get', get_mockreturn)
return mp
@pytest.fixture
def patch_get_readable_fileobj(request):
def get_readable_fileobj_mockreturn(filename, cache=True, encoding=None):
# Need to read FITS files with binary encoding: should raise error
# otherwise
assert encoding == 'binary'
return open(data_path(DATA_FILES['image']), 'rb')
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(commons, 'get_readable_fileobj',
get_readable_fileobj_mockreturn)
return mp
def get_mockreturn(url, params=None, timeout=10, **kwargs):
search_type = params.get('search_type')
if search_type is not None:
filename = data_path(DATA_FILES[search_type])
elif 'imgdata' in url:
filename = data_path(DATA_FILES['extract_urls'])
else:
filename = data_path(DATA_FILES['object'])
content = open(filename, "rb").read()
return MockResponse(content, **kwargs)
@pytest.mark.parametrize(('radius', 'expected'),
[(5 * u.deg, 300),
('0d5m0s', 5),
(5 * u.arcsec, 0.0833)
])
def test_parse_radius(radius, expected):
# radius in any equivalent unit must be converted to minutes
actual_radius = ned.core._parse_radius(radius)
npt.assert_approx_equal(actual_radius, expected, significant=3)
def test_get_references_async(patch_get):
response = ned.core.Ned.get_table_async("m1", table='references',
from_year=2010,
to_year=2013,
get_query_payload=True)
assert response['objname'] == 'm1'
assert response['ref_extend'] == 'no'
assert response['begin_year'] == 2010
assert response['end_year'] == 2013
assert response['search_type'] == 'Reference'
def test_get_references(patch_get):
response = ned.core.Ned.get_table_async(
"m1", table='references', from_year=2010)
assert response is not None
result = ned.core.Ned.get_table(
"m1", table='references', to_year=2012, extended_search=True)
assert isinstance(result, Table)
def test_get_positions_async(patch_get):
response = ned.core.Ned.get_table_async(
"m1", table='positions', get_query_payload=True)
assert response['objname'] == 'm1'
response = ned.core.Ned.get_table_async("m1", table='positions')
assert response is not None
def test_get_positions(patch_get):
result = ned.core.Ned.get_table("m1", table='positions')
assert isinstance(result, Table)
def test_get_redshifts_async(patch_get):
response = ned.core.Ned.get_table_async(
"3c 273", table='redshifts', get_query_payload=True)
assert response['objname'] == '3c 273'
assert response['search_type'] == 'Redshifts'
response = ned.core.Ned.get_table_async("3c 273", table='redshifts')
assert response is not None
def test_get_redshifts(patch_get):
result = ned.core.Ned.get_table("3c 273", table='redshifts')
assert isinstance(result, Table)
def test_get_photometry_async(patch_get):
response = ned.core.Ned.get_table_async(
"3c 273", table='photometry', get_query_payload=True)
assert response['objname'] == '3c 273'
assert response['meas_type'] == 'bot'
assert response['search_type'] == 'Photometry'
response = ned.core.Ned.get_table_async("3C 273", table='photometry')
assert response is not None
def test_photometry(patch_get):
result = ned.core.Ned.get_table("3c 273", table='photometry')
assert isinstance(result, Table)
def test_extract_image_urls():
html_in = open(data_path(DATA_FILES['extract_urls']), 'r').read()
url_list = ned.core.Ned.extract_image_urls(html_in)
assert len(url_list) == 5
for url in url_list:
assert url.endswith('fits.gz')
def test_get_image_list(patch_get):
response = ned.core.Ned.get_image_list('m1', get_query_payload=True)
assert response['objname'] == 'm1'
response = ned.core.Ned.get_image_list('m1')
assert len(response) == 5
def test_get_images_async(patch_get, patch_get_readable_fileobj):
readable_objs = ned.core.Ned.get_images_async('m1')
assert readable_objs is not None
def test_get_images(patch_get, patch_get_readable_fileobj):
fits_images = ned.core.Ned.get_images('m1')
assert fits_images is not None
def test_query_refcode_async(patch_get):
response = ned.core.Ned.query_refcode_async('1997A&A...323...31K', True)
assert response == {'search_type': 'Search',
'refcode': '1997A&A...323...31K',
'hconst': conf.hubble_constant,
'omegam': 0.27,
'omegav': 0.73,
'corr_z': conf.correct_redshift,
'out_csys': conf.output_coordinate_frame,
'out_equinox': conf.output_equinox,
'obj_sort': conf.sort_output_by,
'extend': 'no',
'img_stamp': 'NO',
'list_limit': 0,
'of': 'xml_main'
}
response = ned.core.Ned.query_refcode_async('1997A&A...323...31K')
assert response is not None
def test_query_refcode(patch_get):
result = ned.core.Ned.query_refcode('1997A&A...323...31K')
assert isinstance(result, Table)
def test_query_region_iau_async(patch_get):
response = ned.core.Ned.query_region_iau_async(
'1234-423', get_query_payload=True)
assert response['search_type'] == 'IAU Search'
assert response['iau_name'] == '1234-423'
assert response['in_csys'] == 'Equatorial'
assert response['in_equinox'] == 'B1950.0'
response = ned.core.Ned.query_region_iau_async('1234-423')
assert response is not None
def test_query_region_iau(patch_get):
result = ned.core.Ned.query_region_iau('1234-423')
assert isinstance(result, Table)
def mock_check_resolvable(name):
if name != 'm1':
raise coord.name_resolve.NameResolveError
def test_query_region_async(monkeypatch, patch_get):
# check with the name
monkeypatch.setattr(
coord.name_resolve, 'get_icrs_coordinates', mock_check_resolvable)
response = ned.core.Ned.query_region_async("m1", get_query_payload=True)
assert response['objname'] == "m1"
assert response['search_type'] == "Near Name Search"
# check with Galactic coordinates
response = ned.core.Ned.query_region_async(
commons.GalacticCoordGenerator(l=-67.02084, b=-29.75447,
unit=(u.deg, u.deg)),
get_query_payload=True)
assert response['search_type'] == 'Near Position Search'
npt.assert_approx_equal(
response['lon'] % 360, -67.02084 % 360, significant=5)
npt.assert_approx_equal(response['lat'], -29.75447, significant=5)
response = ned.core.Ned.query_region_async("05h35m17.3s +22d00m52.2s")
assert response is not None
def test_query_region(monkeypatch, patch_get):
monkeypatch.setattr(
coord.name_resolve, 'get_icrs_coordinates', mock_check_resolvable)
result = ned.core.Ned.query_region("m1")
assert isinstance(result, Table)
def test_query_object_async(patch_get):
response = ned.core.Ned.query_object_async('m1', get_query_payload=True)
assert response['objname'] == 'm1'
response = ned.core.Ned.query_object_async('m1')
assert response is not None
def test_query_object(patch_get):
result = ned.core.Ned.query_object('m1')
assert isinstance(result, Table)
def test_get_object_notes_async(patch_get):
response = ned.core.Ned.get_table_async(
'm1', table='object_notes', get_query_payload=True)
assert response['objname'] == 'm1'
assert response['search_type'] == 'Notes'
response = ned.core.Ned.get_table_async('m1', table='object_notes')
assert response is not None
def test_get_object_notes(patch_get):
result = ned.core.Ned.get_table('3c 273', table='object_notes')
assert isinstance(result, Table)
def test_parse_result(capsys):
content = open(data_path(DATA_FILES['error']), 'rb').read()
response = MockResponse(content)
with pytest.raises(RemoteServiceError) as exinfo:
ned.core.Ned._parse_result(response)
if hasattr(exinfo.value, 'message'):
assert exinfo.value.message == ("The remote service returned the "
"following error message.\nERROR: "
"No note found.")
else:
assert exinfo.value.args == ("The remote service returned the "
"following error message.\nERROR: "
"No note found.",)
|
{"hexsha": "91bb00fc01a33b363a273f6e3b54bfeba82876d3", "size": 10110, "ext": "py", "lang": "Python", "max_stars_repo_path": "astroquery/ned/tests/test_ned.py", "max_stars_repo_name": "hamogu/astroquery", "max_stars_repo_head_hexsha": "9a2d1a2ecc4dbfafa6a39cf7a180bcf831a6266a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-18T23:47:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-18T23:47:09.000Z", "max_issues_repo_path": "astroquery/ned/tests/test_ned.py", "max_issues_repo_name": "astrocatalogs/astroquery", "max_issues_repo_head_hexsha": "9919a32cb027febcd73cd743efaae6754061a534", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-01-15T14:46:02.000Z", "max_issues_repo_issues_event_max_datetime": "2016-01-15T14:46:02.000Z", "max_forks_repo_path": "astroquery/ned/tests/test_ned.py", "max_forks_repo_name": "hamogu/astroquery", "max_forks_repo_head_hexsha": "9a2d1a2ecc4dbfafa6a39cf7a180bcf831a6266a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4736842105, "max_line_length": 77, "alphanum_fraction": 0.6591493571, "include": true, "reason": "from numpy,import astropy,from astropy", "num_tokens": 2407}
|
from dolfin import * # @UnusedWildImport
import logging
import numpy as np
logging.getLogger('FFC').setLevel(logging.warnings)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('UFL').setLevel(logging.warnings)
set_log_level(WARNING)
def kl(y, N, order=1):
deg = order
mesh = UnitSquareMesh(int(N), int(N))
V = FunctionSpace(mesh, 'CG', deg)
bc = DirichletBC(V, Constant(0.0), lambda x, on_boundary: on_boundary)
output = np.zeros((y.shape[0], 1))
ut = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
L = f * v * dx
d = y.shape[1]
expression_string = diffusion_coefficient(d)
for j in range(y.shape[0]):
# Compute solution
arg_dict = { 'y%d' % (i + 1): v for i, v in enumerate(y[j, :])}
arg_dict['degree'] = 2 * deg
D = Expression(expression_string, **arg_dict)
a = dot(D * grad(ut), grad(v)) * dx
u = Function(V)
problem = LinearVariationalProblem(a, L, u, bc)
solver = LinearVariationalSolver(problem)
solver.parameters['linear_solver'] = 'cg'
solver.parameters['preconditioner'] = 'amg'
cg_prm = solver.parameters['krylov_solver']
cg_prm['absolute_tolerance'] = 1E-14
cg_prm['relative_tolerance'] = 1E-14
cg_prm['maximum_iterations'] = 10000
solver.solve()
integrand = u * dx
a = assemble(integrand)
# print(a)
output[j] = a
return output
def diffusion_coefficient(d):
expression = 'exp( 0 '
for dd in range(d):
dd = dd + 1
line = int(np.ceil(-1. / 2 + np.sqrt(1. / 4 + 2 * dd) - 1))
remains = int(dd - (line ** 2 + line) / 2.)
if remains > 0:
s1 = line + 2 - remains
s2 = remains
expression += '+'
expression += 'pow({},-4)*y{}*'.format(dd, dd)
if s1 % 2 == 0:
expression += 'sin({}*pi*x[0])'.format(int(s1 / 2.))
else:
expression += 'cos({}*pi*x[0])'.format(int((s1 + 1) / 2.))
if s2 % 2 == 0:
expression += '*sin({}*pi*x[1])'.format(int(s2 / 2.))
else:
expression += '*cos({}*pi*x[1])'.format(int((s2 + 1) / 2.))
expression += ')'
return expression
|
{"hexsha": "4248c56cd0bf1ef03712df9325ce0828b110508f", "size": 2247, "ext": "py", "lang": "Python", "max_stars_repo_path": "smolyak/applications/pde/kl.py", "max_stars_repo_name": "mbaudin47/smolyak", "max_stars_repo_head_hexsha": "c12d7cbae57d9b1cf2e026547b78bcd9c8d63a6c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-23T21:54:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-23T21:54:43.000Z", "max_issues_repo_path": "smolyak/applications/pde/kl.py", "max_issues_repo_name": "mbaudin47/smolyak", "max_issues_repo_head_hexsha": "c12d7cbae57d9b1cf2e026547b78bcd9c8d63a6c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smolyak/applications/pde/kl.py", "max_forks_repo_name": "mbaudin47/smolyak", "max_forks_repo_head_hexsha": "c12d7cbae57d9b1cf2e026547b78bcd9c8d63a6c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-02-03T19:57:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-11T21:26:29.000Z", "avg_line_length": 35.109375, "max_line_length": 74, "alphanum_fraction": 0.5531820205, "include": true, "reason": "import numpy", "num_tokens": 672}
|
using ControlBenchmarks
using ControlSystems
benchmarkProb = controlbenchmark( JonesMorari() )
@test ControlSystems.nstates( benchmarkProb.sys ) == 4
@test ControlSystems.ninputs( benchmarkProb.sys ) == 2
|
{"hexsha": "6532e32e4da0aa34c6fb63f16ca3f51ace5dd29b", "size": 207, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/benchmarks/jonesMorari.jl", "max_stars_repo_name": "imciner2/ControlBenchmarks.jl", "max_stars_repo_head_hexsha": "f11f4a98bd8ab2923771b72e7f0d4c60302207c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/benchmarks/jonesMorari.jl", "max_issues_repo_name": "imciner2/ControlBenchmarks.jl", "max_issues_repo_head_hexsha": "f11f4a98bd8ab2923771b72e7f0d4c60302207c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/benchmarks/jonesMorari.jl", "max_forks_repo_name": "imciner2/ControlBenchmarks.jl", "max_forks_repo_head_hexsha": "f11f4a98bd8ab2923771b72e7f0d4c60302207c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.875, "max_line_length": 54, "alphanum_fraction": 0.7971014493, "num_tokens": 49}
|
import torch
import os
import argparse
import numpy as np
import sys
sys.path.append('./')
from pipelines import config
from pipelines.utils.point_utils import read_point_ply
parser = argparse.ArgumentParser(description='Extract meshes from occupancy process.')
parser.add_argument('--config', default='configs/lig/lig_pretrained.yaml', type=str, help='Path to config file.')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--input_ply', type=str, help='Input object file')
parser.add_argument('--output_ply', type=str, help='Output object file')
parser.add_argument('--gen', action='store_true', help='to generate mesh, no training')
parser.add_argument('--continue_training', action='store_true', help='whether to continue training')
parser.add_argument('--model', type=str, default='pretrained_models/lig/model_best.pt', help='pretrained model path')
parser.add_argument('--debug', action='store_true', help='whether it is debug mode')
parser.add_argument('--normalized', action='store_true', help='whether normalize the input')
args = parser.parse_args()
print(str(args))
cfg = config.load_config(args.config, 'configs/default.yaml')
assert not np.logical_and(args.gen, args.continue_training), "Cannot be generation mode and training mode at the same time"
if args.gen:
assert args.model != '', "Pretrained model path shouldn't be empty in generation mode"
cfg['generation']['optimizer_kwargs']['optim_steps'] = 0
if args.continue_training:
assert args.model != '', "Pretrained model path shouldn't be empty in continue training mode"
cfg['generation']['optimizer_kwargs']['continue_training'] = args.continue_training
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Fix seed of numpy and torch to make results reproducable
np.random.seed(0)
torch.manual_seed(0)
# Model
model = config.get_model(cfg, device=device)
print('!!! Model Loaded !!! ')
out_dir = cfg['generation']['out_dir']
# Initialize generation directory
file_path, file_name = os.path.split(args.input_ply)
obj_name, ext = os.path.splitext(file_name)
generation_dir = os.path.join(out_dir, obj_name + '_debug' if args.debug else obj_name)
if not os.path.exists(generation_dir):
os.makedirs(generation_dir)
output_path = './'
# Set pretrained path and output path of the model
model_path = os.path.join(output_path, 'model')
cfg['model']['model_path'] = model_path
cfg['model']['pretrained_path'] = args.model
# Load pretrained weight
if args.model != '':
pretrained_dict = torch.load(args.model)
model_dict = model.state_dict()
update_dict = {k : v for k, v in pretrained_dict.items() if k in model_dict.keys()}
model_dict.update(update_dict)
model.load_state_dict(model_dict)
print(f"Total {len(model_dict)} parameters, updated {len(update_dict)} parameters")
# Generator
generator = config.get_generator(model, cfg, device=device, output_path=output_path)
v, n = read_point_ply(args.input_ply)
v = v.astype(np.float32)
n = n.astype(np.float32)
# Normalize to unit sphere
if args.normalized:
v = v - v.mean(axis=0)
v = v / (np.linalg.norm(v, ord=2, axis=1).max() + 1e-12)
mesh = generator.generate_single_obj_mesh(v, n)
# Write output
mesh.export(args.output_ply)
|
{"hexsha": "6472385358bc1c46afc0f566ed1ad6ca79ae58a5", "size": 3313, "ext": "py", "lang": "Python", "max_stars_repo_path": "main/run_lig.py", "max_stars_repo_name": "wnbzhao/Local-Implicit-Grid-Pytorch", "max_stars_repo_head_hexsha": "d45da37beda52653f0066f9ba0f0500c54402e13", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-21T13:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T01:16:43.000Z", "max_issues_repo_path": "main/run_lig.py", "max_issues_repo_name": "wnbzhao/Local-Implicit-Grid-Pytorch", "max_issues_repo_head_hexsha": "d45da37beda52653f0066f9ba0f0500c54402e13", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main/run_lig.py", "max_forks_repo_name": "wnbzhao/Local-Implicit-Grid-Pytorch", "max_forks_repo_head_hexsha": "d45da37beda52653f0066f9ba0f0500c54402e13", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9012345679, "max_line_length": 123, "alphanum_fraction": 0.7476607305, "include": true, "reason": "import numpy", "num_tokens": 773}
|
import numpy
import pickle
from tensorflow.python.framework import dtypes
from sklearn.model_selection import train_test_split
import tensorflow as tf
from enum import Enum
import scipy.ndimage
import scipy.misc
from sklearn.utils import shuffle
class TrafficDataProvider(object):
"""
provide data to neural network
"""
def __init__(self,
X_train_array, y_train_array, X_validation_array, y_validation_array, X_test_array, y_test_array):
self.X_train = X_train_array
self.X_validation = X_validation_array
self.y_train = y_train_array
self.y_validation = y_validation_array
self.X_test = X_test_array
self.y_test = y_test_array
def to_other_provider(self, X_train_overwrite=None, y_train_overwrite=None):
if X_train_overwrite is not None:
X_train = X_train_overwrite
else:
X_train = self.X_train
if y_train_overwrite is not None:
y_train = y_train_overwrite
else:
y_train = self.y_train
return TrafficDataProvider(X_train, y_train, self.X_validation, self.y_validation, self.X_test, self.y_test)
def save_to_file(self, file_name):
data = {
"train_features": self.X_train,
"train_labels": self.y_train,
"validation_features": self.X_validation,
"validation_labels": self.y_validation,
"test_features": self.X_test,
"test_labels": self.y_test
}
with open(file_name, mode='wb') as f:
pickle.dump(data, f)
@staticmethod
def load_from_file(file_name):
with open(file_name, mode='rb') as f:
data = pickle.load(f)
return TrafficDataProvider(
X_train_array=data["train_features"],
y_train_array=data["train_labels"],
X_validation_array=data["validation_features"],
y_validation_array=data["validation_labels"],
X_test_array=data["test_features"],
y_test_array=data["test_labels"]
)
@classmethod
def from_other_provider(cls, data_provider):
return cls(data_provider.X_train, data_provider.y_train,
data_provider.X_validation, data_provider.y_validation,
data_provider.X_test, data_provider.y_test)
class TrafficDataProviderAutoSplitValidationData(TrafficDataProvider):
def __init__(self, X_train_array, y_train_array, X_test_array, y_test_array,
split_validation_from_train=False, validation_size=0.20):
"""
Provide X_train and X_test, calculate validation set from X_train.
:param X_train_array:
:param y_train_array:
:param X_test_array:
:param y_test_array:
:param split_validation_from_train: if true will shuffle data and split validation based on ratio of
validation_size, otherwise simple copy 1 to 1000 images from X_train
:param validation_size: how much to split validation from training set.
"""
if split_validation_from_train:
X_train, X_validation, y_train, y_validation = train_test_split(X_train_array, y_train_array,
test_size=validation_size, random_state=42)
else:
X_train, y_train = X_train_array, y_train_array
X_validation = X_train_array[0:1000]
y_validation = y_train_array[0:1000]
super().__init__(X_train, y_train, X_validation, y_validation, X_test_array, y_test_array)
class TrafficDataRealFileProviderAutoSplitValidationData(TrafficDataProviderAutoSplitValidationData):
def __init__(self, training_file="train.p", testing_file="test.p",
split_validation_from_train=True, validation_size=0.20):
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
super().__init__(train['features'], train['labels'], test['features'], test['labels'],
split_validation_from_train, validation_size)
class TrafficDataSets(object):
NUMBER_OF_CLASSES = 43
def __init__(self, data_provider, one_hot_encode=True,
training_dataset_factory=lambda X, y: DataSet(X, y),
test_dataset_factory=lambda X, y: DataSet(X, y)):
y_train, y_validation, y_test = data_provider.y_train, data_provider.y_validation, data_provider.y_test
if one_hot_encode:
y_train, y_validation, y_test = self.dense_to_one_hot(data_provider.y_train, TrafficDataSets.NUMBER_OF_CLASSES), \
self.dense_to_one_hot(data_provider.y_validation, TrafficDataSets.NUMBER_OF_CLASSES), \
self.dense_to_one_hot(data_provider.y_test, TrafficDataSets.NUMBER_OF_CLASSES)
self.train = training_dataset_factory(data_provider.X_train, y_train)
self.validation = test_dataset_factory(data_provider.X_validation, y_validation)
self.test = test_dataset_factory(data_provider.X_test, y_test)
@staticmethod
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
return tf.one_hot(labels_dense, num_classes).eval(session=tf.Session())
# num_labels = labels_dense.shape[0]
# index_offset = numpy.arange(num_labels) * num_classes
# labels_one_hot = numpy.zeros((num_labels, num_classes))
# labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
# return labels_one_hot
class DataSet(object):
def __init__(self,
images,
labels):
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def shuffle(self):
images, labels = shuffle(self._images, self._labels)
self._images = images
self._labels = labels
@property
def is_grayscale(self):
return self._images.shape[3] == 1
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
class DataSetType(Enum):
Training = 1
TestAndValudation = 2
class DataSetWithGenerator(DataSet):
"""
Haven't able to train those dataset yet, always get 0.03 accuracy
"""
def __init__(self,
images,
labels,
dataset_type,
save_to_dir=None, save_prefix=None):
super().__init__(images, labels)
if DataSetType.Training == dataset_type:
self.datagen = DataSetWithGenerator._training_data_generator_factory()
else:
self.datagen = DataSetWithGenerator._test_data_generator_factory()
self.datagen.fit(self._images)
self.iterator = None
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
@staticmethod
def _training_data_generator_factory():
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
featurewise_center=False,
featurewise_std_normalization=False,
zca_whitening=False,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=False,
vertical_flip=False,
fill_mode='nearest',
dim_ordering='tf')
return datagen
@staticmethod
def _test_data_generator_factory():
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
horizontal_flip=False,
vertical_flip=False,
fill_mode='nearest',
dim_ordering='tf')
return datagen
def next_batch(self, batch_size):
if self.iterator is None:
self.iterator = self.datagen.flow(self._images, self._labels, batch_size=batch_size,
shuffle=True, seed=1234,
save_to_dir=self.save_to_dir, save_prefix=self.save_prefix, save_format='jpeg')
images, y = self.iterator.next()
return images, y
|
{"hexsha": "1247c724e4a13c7e240a8b423429cf4cd01cd03b", "size": 9579, "ext": "py", "lang": "Python", "max_stars_repo_path": "traffic/traffic_data.py", "max_stars_repo_name": "JamesLuoau/Traffic-Sign-Recognition-with-Deep-Learning-CNN", "max_stars_repo_head_hexsha": "e73a892a17db71121861b746861d57643d34ea30", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2017-01-10T05:35:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T07:36:04.000Z", "max_issues_repo_path": "traffic/traffic_data.py", "max_issues_repo_name": "jcheng1602/Traffic-Sign-Recognition-with-Deep-Learning-CNN", "max_issues_repo_head_hexsha": "e73a892a17db71121861b746861d57643d34ea30", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-18T13:26:07.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-18T13:26:07.000Z", "max_forks_repo_path": "traffic/traffic_data.py", "max_forks_repo_name": "jcheng1602/Traffic-Sign-Recognition-with-Deep-Learning-CNN", "max_forks_repo_head_hexsha": "e73a892a17db71121861b746861d57643d34ea30", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2017-04-25T03:15:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-10T09:46:36.000Z", "avg_line_length": 37.7125984252, "max_line_length": 131, "alphanum_fraction": 0.64390855, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2002}
|
#!/usr/bin/env python
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import numpy as np
import network
slim = tf.contrib.slim
import os
import json
import cv2
import signal
import sys
class Inferer:
def __init__(self, model_num):
model_name = str(model_num)
log_folder = '/home/nvidia/wilcove/ros_workspace/src/deep_segmentation/src/tboard_logs'
with open(log_folder + '/' + model_name + '/train/data.json', 'r') as fp:
args = json.load(fp)
class Dotdict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
args = Dotdict(args)
tf.set_random_seed(1)
self.holder = tf.placeholder(dtype=tf.float32, shape=[1, 180, 480, 3])
logits_tf = network.deeplab_v3(self.holder, args, is_training=False, reuse=False)
self.predictions_tf = tf.argmax(logits_tf, axis=3)
probabilities_tf = tf.nn.softmax(logits_tf)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
self.sess = tf.Session(config=config)
self.sess.run(tf.local_variables_initializer())
self.sess.run(tf.global_variables_initializer())
test_folder = os.path.join(log_folder, model_name, "test")
train_folder = os.path.join(log_folder, model_name, "train")
saver = tf.train.Saver()
saver.restore(self.sess, os.path.join(train_folder, "model.ckpt"))
print("Model", model_name, "restored.")
def infer(self, image):
out = self.sess.run(self.predictions_tf, feed_dict={self.holder: [image]})
out = np.squeeze(out)
bg = cv2.inRange(out, 0, 0)
stairs = cv2.inRange(out, 3, 255)
unsafe = cv2.bitwise_or(stairs, bg)
return unsafe
'''if __name__ == "__main__":
inferer = Inferer()
cam = cv2.VideoCapture(1)
ret, frame = cam.read()
frame = frame[240:, :]
frame = cv2.resize(frame, (480, 180))
frame = cv2.GaussianBlur(frame, (3, 3), 0)
out = inferer.infer(frame)
cv2.imshow('yo', out)
cv2.waitKey(0)'''
|
{"hexsha": "94a82be4f453680103ce86fa486955ca721ad2b9", "size": 2131, "ext": "py", "lang": "Python", "max_stars_repo_path": "ros_workspace/src/deep_segmentation/src/inferer.py", "max_stars_repo_name": "NVIDIA-Jetson/Foursee-Navigation", "max_stars_repo_head_hexsha": "673b4a8bcf5774cf23d2564bada68709d28c850e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2018-11-01T06:05:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T02:48:03.000Z", "max_issues_repo_path": "ros_workspace/src/deep_segmentation/src/inferer.py", "max_issues_repo_name": "NVIDIA-Jetson/Foursee-Navigation", "max_issues_repo_head_hexsha": "673b4a8bcf5774cf23d2564bada68709d28c850e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-03T18:54:19.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-03T18:55:37.000Z", "max_forks_repo_path": "ros_workspace/src/deep_segmentation/src/inferer.py", "max_forks_repo_name": "NVIDIA-Jetson/Foursee-Navigation", "max_forks_repo_head_hexsha": "673b4a8bcf5774cf23d2564bada68709d28c850e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-12-17T10:56:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-10T06:45:28.000Z", "avg_line_length": 35.5166666667, "max_line_length": 95, "alphanum_fraction": 0.6428906617, "include": true, "reason": "import numpy", "num_tokens": 545}
|
%----------------------------------------------------------------------------------------
% VARIOUS REQUIRED PACKAGES
%----------------------------------------------------------------------------------------
\usepackage{titlesec} % Allows customization of titles
\usepackage[top=3cm,bottom=3cm,left=3.2cm,right=3.2cm,headsep=10pt,a4paper]{geometry} % Page margins
\usepackage{xcolor} % Required for specifying colors by name
\usepackage{graphicx} % Required for including pictures
% Font Settings
\usepackage{avant} % Use the Avantgarde font for headings
%\usepackage{times} % Use the Times font for headings
\usepackage{mathptmx} % Use the Adobe Times Roman as the default text font together with math symbols from the Symbol, Chancery and Computer Modern fonts
\usepackage{marvosym}
\usepackage{microtype} % Slightly tweak font spacing for aesthetics
\usepackage[utf8]{inputenc} % Required for including letters with accents
\usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs
\usepackage{pifont}
% Index
\usepackage{calc} % For simpler calculation - used for spacing the index letter headings correctly
\usepackage{makeidx} % Required to make an index
\graphicspath{{pictures/}} % Specifies the directory where pictures are stored
\usepackage{lipsum} % Inserts dummy text
\usepackage{tikz} % Required for drawing custom shapes
\usepackage[english]{babel} % English language/hyphenation
\usepackage{enumitem} % Customize lists
\setlist{nolistsep} % Reduce spacing between bullet points and numbered lists
\usepackage{booktabs} % Required for nicer horizontal rules in tables
\usepackage{eso-pic} % Required for specifying an image background in the title page
\usepackage{listings} % Required for inserting code snippets
\definecolor{ocre}{RGB}{243,102,25} % Define the orange color used for highlighting throughout the book
\definecolor{DarkGreen}{rgb}{0.0,0.4,0.0} % Comment color
\definecolor{highlight}{RGB}{255,251,204} % Code highlight color
\lstdefinestyle{Style1}{ % Define a style for your code snippet, multiple definitions can be made if, for example, you wish to insert multiple code snippets using different programming languages into one document
language=C, % Detects keywords, comments, strings, functions, etc for the language specified
backgroundcolor=\color{highlight}, % Set the background color for the snippet - useful for highlighting
basicstyle=\footnotesize\ttfamily, % The default font size and style of the code
breakatwhitespace=false, % If true, only allows line breaks at white space
breaklines=true, % Automatic line breaking (prevents code from protruding outside the box)
captionpos=b, % Sets the caption position: b for bottom; t for top
commentstyle=\usefont{T1}{pcr}{m}{sl}\color{DarkGreen}, % Style of comments within the code - dark green courier font
deletekeywords={}, % If you want to delete any keywords from the current language separate them by commas
%escapeinside={\%}, % This allows you to escape to LaTeX using the character in the bracket
firstnumber=1, % Line numbers begin at line 1
frame=single, % Frame around the code box, value can be: none, leftline, topline, bottomline, lines, single, shadowbox
frameround=tttt, % Rounds the corners of the frame for the top left, top right, bottom left and bottom right positions
keywordstyle=\color{blue}\bf, % Functions are bold and blue
morekeywords={}, % Add any functions no included by default here separated by commas
numbers=left, % Location of line numbers, can take the values of: none, left, right
numbersep=10pt, % Distance of line numbers from the code box
numberstyle=\tiny\color{gray}, % Style used for line numbers
rulecolor=\color{black}, % Frame border color
showstringspaces=false, % Don't put marks in string spaces
showtabs=false, % Display tabs in the code as lines
stepnumber=5, % The step distance between line numbers, i.e. how often will lines be numbered
stringstyle=\color{purple}, % Strings are purple
tabsize=2, % Number of spaces per tab in the code
}
%----------------------------------------------------------------------------------------
% MAIN TABLE OF CONTENTS
%----------------------------------------------------------------------------------------
\usepackage{titletoc} % Required for manipulating the table of contents
\contentsmargin{0cm} % Removes the default margin
% Chapter text styling
\titlecontents{chapter}[1.25cm] % Indentation
{\addvspace{15pt}\large\sffamily\bfseries} % Spacing and font options for chapters
{\color{ocre!60}\contentslabel[\Large\thecontentslabel]{1.25cm}\color{ocre}} % Chapter number
{}
{\color{ocre!60}\normalsize\sffamily\bfseries\;\titlerule*[.5pc]{.}\;\thecontentspage} % Page number
% Section text styling
\titlecontents{section}[1.25cm] % Indentation
{\addvspace{5pt}\sffamily\bfseries} % Spacing and font options for sections
{\contentslabel[\thecontentslabel]{1.25cm}} % Section number
{}
{\sffamily\hfill\color{black}\thecontentspage} % Page number
[]
% Subsection text styling
\titlecontents{subsection}[1.25cm] % Indentation
{\addvspace{1pt}\sffamily\small} % Spacing and font options for subsections
{\contentslabel[\thecontentslabel]{1.25cm}} % Subsection number
{}
{\sffamily\;\titlerule*[.5pc]{.}\;\thecontentspage} % Page number
[]
%----------------------------------------------------------------------------------------
% MINI TABLE OF CONTENTS IN CHAPTER HEADS
%----------------------------------------------------------------------------------------
% Section text styling
\titlecontents{lsection}[0em] % Indendating
{\footnotesize\sffamily} % Font settings
{}
{}
{}
% Subsection text styling
\titlecontents{lsubsection}[.5em] % Indentation
{\normalfont\footnotesize\sffamily} % Font settings
{}
{}
{}
%----------------------------------------------------------------------------------------
% PAGE HEADERS
%----------------------------------------------------------------------------------------
\usepackage{fancyhdr} % Required for header and footer configuration
\pagestyle{fancy}
\renewcommand{\chaptermark}[1]{\markboth{\sffamily\normalsize\bfseries #1}{}} % Chapter text font settings
\renewcommand{\sectionmark}[1]{\markright{\sffamily\large\thesection\hspace{5pt} #1}{}} % Section text font settings
\fancyhf{} \fancyhead[LE,RO]{\sffamily\normalsize\thepage} % Font setting for the page number in the header
\fancyhead[LO]{\rightmark} % Print the nearest section name on the left side of odd pages
\fancyhead[RE]{\leftmark} % Print the current chapter name on the right side of even pages
\renewcommand{\headrulewidth}{0.5pt} % Width of the rule under the header
\addtolength{\headheight}{2.5pt} % Increase the spacing around the header slightly
\renewcommand{\footrulewidth}{0pt} % Removes the rule in the footer
\fancypagestyle{plain}{\fancyhead{}\renewcommand{\headrulewidth}{0pt}} % Style for when a plain pagestyle is specified
% Removes the header from odd empty pages at the end of chapters
\makeatletter
\renewcommand{\cleardoublepage}{
\clearpage\ifodd\c@page\else
%\hbox{}
%\vspace*{\fill}
%\thispagestyle{empty}
%\newpage
\fi}
%----------------------------------------------------------------------------------------
% THEOREM STYLES
%----------------------------------------------------------------------------------------
\usepackage{amsmath,amsfonts,amssymb,amsthm} % For including math equations, theorems, symbols, etc
\newcommand{\intoo}[2]{\mathopen{]}#1\,;#2\mathclose{[}}
\newcommand{\ud}{\mathop{\mathrm{{}d}}\mathopen{}}
\newcommand{\intff}[2]{\mathopen{[}#1\,;#2\mathclose{]}}
\newtheorem{notation}{Notation}[chapter]
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%% dedicated to boxed/framed environements %%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtheoremstyle{ocrenumbox}% % Theorem style name
{0pt}% Space above
{0pt}% Space below
{\normalfont}% % Body font
{}% Indent amount
{\small\bf\sffamily\color{ocre}}% % Theorem head font
{\;}% Punctuation after theorem head
{0.25em}% Space after theorem head
{\small\sffamily\color{ocre}\thmname{#1}\nobreakspace\thmnumber{\@ifnotempty{#1}{}\@upn{#2}}% Theorem text (e.g. Theorem 2.1)
\thmnote{\nobreakspace\the\thm@notefont\sffamily\bfseries\color{black}---\nobreakspace#3.}} % Optional theorem note
\renewcommand{\qedsymbol}{$\blacksquare$}% Optional qed square
\newtheoremstyle{blacknumex}% Theorem style name
{5pt}% Space above
{5pt}% Space below
{\normalfont}% Body font
{} % Indent amount
{\small\bf\sffamily}% Theorem head font
{\;}% Punctuation after theorem head
{0.25em}% Space after theorem head
{\small\sffamily{\tiny\ensuremath{\blacksquare}}\nobreakspace\thmname{#1}\nobreakspace\thmnumber{\@ifnotempty{#1}{}\@upn{#2}}% Theorem text (e.g. Theorem 2.1)
\thmnote{\nobreakspace\the\thm@notefont\sffamily\bfseries---\nobreakspace#3.}}% Optional theorem note
\newtheoremstyle{blacknumbox} % Theorem style name
{0pt}% Space above
{0pt}% Space below
{\normalfont}% Body font
{}% Indent amount
{\small\bf\sffamily}% Theorem head font
{\;}% Punctuation after theorem head
{0.25em}% Space after theorem head
{\small\sffamily\thmname{#1}\nobreakspace\thmnumber{\@ifnotempty{#1}{}\@upn{#2}}% Theorem text (e.g. Theorem 2.1)
\thmnote{\nobreakspace\the\thm@notefont\sffamily\bfseries---\nobreakspace#3.}}% Optional theorem note
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%% dedicated to non-boxed/non-framed environements %%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtheoremstyle{ocrenum}% % Theorem style name
{5pt}% Space above
{5pt}% Space below
{\normalfont}% % Body font
{}% Indent amount
{\small\bf\sffamily\color{ocre}}% % Theorem head font
{\;}% Punctuation after theorem head
{0.25em}% Space after theorem head
{\small\sffamily\color{ocre}\thmname{#1}\nobreakspace\thmnumber{\@ifnotempty{#1}{}\@upn{#2}}% Theorem text (e.g. Theorem 2.1)
\thmnote{\nobreakspace\the\thm@notefont\sffamily\bfseries\color{black}---\nobreakspace#3.}} % Optional theorem note
\renewcommand{\qedsymbol}{$\blacksquare$}% Optional qed square
\makeatother
% Defines the theorem text style for each type of theorem to one of the three styles above
\newcounter{dummy}
\numberwithin{dummy}{section}
\theoremstyle{ocrenumbox}
\newtheorem{theoremeT}[dummy]{Theorem}
\newtheorem{problem}{Problem}[chapter]
\newtheorem{question}{Question}[chapter]
\newtheorem{Warning}{Warning}[chapter]
\newtheorem{reminder}[question]{Reminder}
\newtheorem{exerciseT}{Exercise}[chapter]
\theoremstyle{blacknumex}
\newtheorem{exampleT}{Example}[chapter]
\theoremstyle{blacknumbox}
\newtheorem{vocabulary}{Vocabulary}[chapter]
\newtheorem{definitionT}{Definition}[section]
\newtheorem{corollaryT}[dummy]{Corollary}
\theoremstyle{ocrenum}
\newtheorem{proposition}[dummy]{Proposition}
%----------------------------------------------------------------------------------------
% DEFINITION OF COLORED BOXES
%----------------------------------------------------------------------------------------
\newcommand\Loadedframemethod{TikZ}
\RequirePackage[framemethod=\Loadedframemethod]{mdframed}
%\RequirePackage[framemethod=default]{mdframed} % Required for creating the theorem, definition, exercise and corollary boxes
% Theorem box
\newmdenv[skipabove=7pt,
skipbelow=7pt,
backgroundcolor=black!5,
linecolor=ocre,
innerleftmargin=5pt,
innerrightmargin=5pt,
innertopmargin=5pt,
leftmargin=0cm,
rightmargin=0cm,
innerbottommargin=5pt]{tBox}
% Exercise box
\newmdenv[skipabove=7pt,
skipbelow=7pt,
rightline=false,
leftline=true,
topline=false,
bottomline=false,
backgroundcolor=ocre!10,
linecolor=ocre,
innerleftmargin=5pt,
innerrightmargin=5pt,
innertopmargin=5pt,
innerbottommargin=5pt,
leftmargin=0cm,
rightmargin=0cm,
linewidth=4pt]{eBox}
% Definition box
\newmdenv[skipabove=7pt,
skipbelow=7pt,
rightline=false,
leftline=true,
topline=false,
bottomline=false,
linecolor=ocre,
innerleftmargin=5pt,
innerrightmargin=5pt,
innertopmargin=0pt,
leftmargin=0cm,
rightmargin=0cm,
linewidth=4pt,
innerbottommargin=0pt]{dBox}
% Corollary box
\newmdenv[skipabove=7pt,
skipbelow=7pt,
rightline=false,
leftline=true,
topline=false,
bottomline=false,
linecolor=gray,
backgroundcolor=black!5,
innerleftmargin=5pt,
innerrightmargin=5pt,
innertopmargin=5pt,
leftmargin=0cm,
rightmargin=0cm,
linewidth=4pt,
innerbottommargin=5pt]{cBox}
% Creates an environment for each type of theorem and assigns it a theorem text style from the "Theorem Styles" section above and a colored box from above
\newenvironment{theorem}{\begin{tBox}\begin{theoremeT}}{\end{theoremeT}\end{tBox}}
\newenvironment{exercise}{\begin{eBox}\begin{exerciseT}}{\hfill{\color{ocre}\tiny\ensuremath{\blacksquare}}\end{exerciseT}\end{eBox}}
\newenvironment{definition}{\begin{dBox}\begin{definitionT}}{\end{definitionT}\end{dBox}}
\newenvironment{example}{\begin{exampleT}}{\hfill{\tiny\ensuremath{\blacksquare}}\end{exampleT}}
\newenvironment{corollary}{\begin{cBox}\begin{corollaryT}}{\end{corollaryT}\end{cBox}}
%----------------------------------------------------------------------------------------
% REMARK ENVIRONMENT
%----------------------------------------------------------------------------------------
\newenvironment{remark}{\par\vskip10pt\small % Vertical white space above the remark and smaller font size
\begin{list}{}{
\leftmargin=35pt % Indentation on the left
\rightmargin=25pt}\item\ignorespaces % Indentation on the right
\makebox[-2.5pt]{\begin{tikzpicture}[overlay]
\node[draw=ocre!60,line width=1pt,circle,fill=ocre!25,font=\sffamily\bfseries,inner sep=2pt,outer sep=0pt] at (-15pt,0pt){\textcolor{ocre}{R}};\end{tikzpicture}} % Orange R in a circle
\advance\baselineskip -1pt}{\end{list}\vskip5pt} % Tighter line spacing and white space after remark
%----------------------------------------------------------------------------------------
% SECTION NUMBERING IN THE MARGIN
%----------------------------------------------------------------------------------------
\makeatletter
\renewcommand{\@seccntformat}[1]{\llap{\textcolor{ocre}{\csname the#1\endcsname}\hspace{1em}}}
\renewcommand{\section}{\@startsection{section}{1}{\z@}
{-4ex \@plus -1ex \@minus -.4ex}
{1ex \@plus.2ex }
{\normalfont\large\sffamily\bfseries}}
\renewcommand{\subsection}{\@startsection {subsection}{2}{\z@}
{-3ex \@plus -0.1ex \@minus -.4ex}
{0.5ex \@plus.2ex }
{\normalfont\sffamily\bfseries}}
\renewcommand{\subsubsection}{\@startsection {subsubsection}{3}{\z@}
{-2ex \@plus -0.1ex \@minus -.2ex}
{0.2ex \@plus.2ex }
{\normalfont\small\sffamily\bfseries}}
\renewcommand\paragraph{\@startsection{paragraph}{4}{\z@}
{-2ex \@plus-.2ex \@minus .2ex}
{0.1ex}
{\normalfont\small\sffamily\bfseries}}
%----------------------------------------------------------------------------------------
% CHAPTER HEADINGS
%----------------------------------------------------------------------------------------
\newcommand{\thechapterimage}{}
\newcommand{\chapterimage}[1]{\renewcommand{\thechapterimage}{#1}}
\def\thechapter{\arabic{chapter}}
\def\@makechapterhead#1{
\thispagestyle{empty}
{\centering \normalfont\sffamily
\ifnum \c@secnumdepth >\m@ne
\if@mainmatter
\startcontents
\begin{tikzpicture}[remember picture,overlay]
\node at (current page.north west)
{\begin{tikzpicture}[remember picture,overlay]
\node[anchor=north west,inner sep=0pt] at (0,0) {\includegraphics[width=\paperwidth]{\thechapterimage}};
%Commenting the 3 lines below removes the small contents box in the chapter heading
\draw[fill=white,opacity=.6] (1cm,0) rectangle (7.7cm,-4.7cm);
\node[anchor=north west] at (1cm,.25cm) {\parbox[t][8cm][t]{6.6cm}{\huge\bfseries\flushleft \printcontents{l}{1}{\setcounter{tocdepth}{2}}}};
\draw[anchor=west] (5cm,-9cm) node [rounded corners=25pt,fill=white,fill opacity=.6,text opacity=1,draw=ocre,draw opacity=1,line width=2pt,inner sep=15pt]{\huge\sffamily\bfseries\textcolor{black}{\thechapter\ ---\ #1\vphantom{plPQq}\makebox[22cm]{}}};
\end{tikzpicture}};
\end{tikzpicture}}\par\vspace*{230\p@}
\fi
\fi
}
\def\@makeschapterhead#1{
\thispagestyle{empty}
{\centering \normalfont\sffamily
\ifnum \c@secnumdepth >\m@ne
\if@mainmatter
\startcontents
\begin{tikzpicture}[remember picture,overlay]
\node at (current page.north west)
{\begin{tikzpicture}[remember picture,overlay]
\node[anchor=north west] at (-4pt,4pt) {\includegraphics[width=\paperwidth]{\thechapterimage}};
\draw[anchor=west] (5cm,-9cm) node [rounded corners=25pt,fill=white,opacity=.7,inner sep=15.5pt]{\huge\sffamily\bfseries\textcolor{black}{\vphantom{plPQq}\makebox[22cm]{}}};
\draw[anchor=west] (5cm,-9cm) node [rounded corners=25pt,draw=ocre,line width=2pt,inner sep=15pt]{\huge\sffamily\bfseries\textcolor{black}{#1\vphantom{plPQq}\makebox[22cm]{}}};
\end{tikzpicture}};
\end{tikzpicture}}\par\vspace*{230\p@}
\fi
\fi
}
\makeatother
|
{"hexsha": "52305bea64ac3ac0ddd2d29ee58e8ee00a2f691a", "size": 16911, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "usr/src/uts/common/io/qede/579xx/drivers/ecore/documentation/structure.tex", "max_stars_repo_name": "AsahiOS/gate", "max_stars_repo_head_hexsha": "283d47da4e17a5871d9d575e7ffb81e8f6c52e51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "usr/src/uts/common/io/qede/579xx/drivers/ecore/documentation/structure.tex", "max_issues_repo_name": "AsahiOS/gate", "max_issues_repo_head_hexsha": "283d47da4e17a5871d9d575e7ffb81e8f6c52e51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "usr/src/uts/common/io/qede/579xx/drivers/ecore/documentation/structure.tex", "max_forks_repo_name": "AsahiOS/gate", "max_forks_repo_head_hexsha": "283d47da4e17a5871d9d575e7ffb81e8f6c52e51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-30T00:04:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-30T00:04:16.000Z", "avg_line_length": 43.1403061224, "max_line_length": 251, "alphanum_fraction": 0.6741765715, "num_tokens": 4723}
|
function keep(varargin);
%KEEP keeps the caller workspace variables of your choice and clear the rest.
% Its usage is just like "clear" but only for variables.
%
% Xiaoning (David) Yang xyang@lanl.gov 1998
% Revision based on comments from Michael McPartland,
% michael@gaitalf.mgh.harvard.edu, 1999
% Keep all
if isempty(varargin)
return
end
% See what are in caller workspace
wh = evalin('caller','who');
% Check workspace variables
if isempty(wh)
error(' There is nothing to keep!')
end
% Construct a string containing workspace variables delimited by ":"
variable = [];
for i = 1:length(wh)
variable = [variable,':',wh{i}];
end
variable = [variable,':'];
% Extract desired variables from string
flag = 0;
for i = 1:length(varargin)
I = findstr(variable,[':',varargin{i},':']);
if isempty(I)
disp([' ',varargin{i}, ' does not exist!'])
flag = 1;
elseif I == 1
variable = variable(1+length(varargin{i})+1:length(variable));
elseif I+length(varargin{i})+1 == length(variable)
variable = variable(1:I);
else
variable = [variable(1:I),variable(I+length(varargin{i})+2:length(variable))];
end
end
% No delete if some input variables do not exist
if flag == 1
disp(' No variables are deleted!')
return
end
% Convert string back to cell and delete the rest
I = findstr(variable,':');
if length(I) ~= 1
for i = 1:length(I)-1
if i ~= length(I)-1
del(i) = {[variable(I(i)+1:I(i+1)-1),' ']};
else
del(i) = {variable(I(i)+1:length(variable)-1)};
end
end
evalin('caller',['clear ',del{:}])
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/181-keep/keep.m"}
|
import math
import numpy as np
from itertools import chain
import load_subjects as ls
# Given a list X, returns a list of changepoints
def get_changepoints(X):
return X[:-1] != X[1:]
# Construct numpy array from jagged data by filling ends of short rows with NaNs
def jagged_to_numpy(jagged):
aligned = np.ones((len(jagged), max([len(row) for row in jagged]))) * np.nan # allocate appropriately sized array of NaNs
for i, row in enumerate(jagged): # populate columns
aligned[i, :len(row)] = row
return aligned
def __interpolate_to_length_labels(X, N):
if not N == int(N):
raise ValueError('New length must be an integer, but is ' + str(N))
N = int(N)
change_points = np.where(X[:-1] != X[1:])[0]
X_new = np.zeros(N, dtype = int)
upsample_rate = float(N) / len(X)
new_segment_end = 0 # need this for the edge case where there are no change points
for change_point_idx in range(len(change_points)):
change_point = change_points[change_point_idx] + 1
if change_point_idx == 0:
prev_change_point = 0
new_segment_start = int(math.ceil(prev_change_point * upsample_rate))
new_segment_end = int(math.ceil(change_point * upsample_rate))
X_new[new_segment_start:new_segment_end] = X[prev_change_point]
X_new[new_segment_start:new_segment_end] = X[prev_change_point]
prev_change_point = change_point
X_new[new_segment_end:] = X[-1] # manually fill-in after last change point
return X_new
# X = np.array([0, 0, 0, 0, 1, 1, 0, 1])
# print X
# print __interpolate_to_length_labels(X, len(X))
# print __interpolate_to_length_labels(X, 2*len(X))
# print __interpolate_to_length_labels(X, 2.5*len(X))
# Given a K x D x N array of numbers, encoding the positions of each of K D-dimensional objects over N time points,
# performs interpolate_to_length_D (independently) on each object in X
def interpolate_to_length_distractors(X, new_len):
K = X.shape[0]
D = X.shape[1]
X_new = np.zeros((K, D, new_len))
for k in range(K):
X_new[k,:,:] = interpolate_to_length_D(X[k,:,:], new_len)
return X_new
# Given a D-dimensional sequence X of numbers, performs interpolate_to_length (independently) on each dimension of X
# X is D x N, where D is the dimensionality and N is the sample length
def interpolate_to_length_D(X, new_len):
D = X.shape[0]
X_new = np.zeros((D, new_len))
for d in range(D):
X_new[d, :] = __interpolate_to_length(X[d, :], new_len)
return X_new
# Given a sequence X of numbers, returns the length-new_len linear interpolant of X
def __interpolate_to_length(X, new_len):
old_len = X.shape[0]
return np.interp([(float(n)*old_len)/new_len for n in range(new_len)], range(old_len), X)
# Given a D-dimensional sequence X of numbers, performs impute_missing_data_D (independently) on each dimension of X
# X is D x N, where D is the dimensionality and N is the sample length
def impute_missing_data_D(X, max_len = 10):
D = X.shape[0]
for d in range(D):
X[d, :] = __impute_missing_data(X[d, :], max_len)
return X
# Given a sequence X of floats, replaces short streches (up to length max_len) of NaNs with linear interpolation
# For example, if
# X = np.array([1, NaN, NaN, 4, NaN, 6])
# then
# impute_missing_data(X, max_len = 1) == np.array([1, NaN, NaN, 5, 6])
# and
# impute_missing_data(X, max_len = 2) == np.array([1, 2, 3, 4, 5, 6])
def __impute_missing_data(X, max_len):
last_valid_idx = -1
for n in range(len(X)):
if not math.isnan(X[n]):
if last_valid_idx < n - 1: # there is missing data and we have seen at least one valid eyetracking sample
if n - (max_len + 1) <= last_valid_idx: # amount of missing data is at most than max_len
if last_valid_idx == -1: # No previous valid data (i.e., first timepoint is missing)
X[0:n] = X[n] # Just propogate first valid data point
else:
first_last = np.array([X[last_valid_idx], X[n]]) # initial and final values from which to linearly interpolate
new_len = n - last_valid_idx + 1
X[last_valid_idx:(n + 1)] = np.interp([float(x)/(new_len - 1) for x in range(new_len)], [0, 1], first_last)
last_valid_idx = n
elif n == len(X) - 1: # if n is the last index of X and X[n] is NaN
if n - (max_len + 1) <= last_valid_idx: # amount of missing data is at most than max_len
X[last_valid_idx:] = X[last_valid_idx]
return X
# Some test cases:
# X = np.array([1, 2, 3])
# print __impute_missing_data(X, 10)
# X = np.array([0, 1])
# print __interpolate_fixed_length(X, 5)
# X = np.array([1, float('nan'), float('nan'), float('nan'), 5, float('nan'), float('nan'), 8])
# print X
# print __impute_missing_data(X, 3)
# Given two binary sequences xs and ys of equal length, computes a confusion between xs and ys,
# but allows for some slack in detecting positives (i.e., positives with distance max_dist can be counted correct)
def generalized_2x2_confusion_matrix(xs, ys, max_dist):
# y_positives version of ys with 1s propogated max_dist on either side of original 1s
# y_positives is compared with xs to compute true positives (TPs). Rest of confusion matrix can be calculated just from TPs, xs, and ys.
y_positives = np.zeros(np.shape(ys), dtype=bool)
for offset in range(-max_dist,max_dist+1):
last_idx1 = None if offset >= 0 else offset # have to use None to index through end of array
last_idx2 = None if offset <= 0 else -offset
y_positives[max(0,offset):last_idx1] = np.logical_or(y_positives[max(0, offset):last_idx1],
ys[max(0,-offset):last_idx2])
TPs = np.sum(np.logical_and(xs, y_positives))
FPs = np.sum(xs) - TPs
FNs = max(0, np.sum(ys) - TPs)
TNs = len(xs) - (TPs + FPs + FNs)
return np.array([[TNs, FNs],[FPs, TPs]])
# A simple test:
# Compare to reference confusion matrix implementation for max_dist=0,
# And check that classification performance improves with larger max_dist
# >>> from sklearn.metrics import confusion_matrix
# >>> xs = np.random.randint(low=0,high=2,size=10000)
# >>> ys = np.random.randint(low=0,high=2,size=10000)
# >>> confusion_matrix(xs, ys, labels=[False, True])
# array([[2479, 2531],
# [2455, 2535]])
# >>> util.generalized_2x2_confusion_matrix(xs, ys, max_dist=0)
# array([[2479, 2531],
# [2455, 2535]])
# >>> util.generalized_2x2_confusion_matrix(xs, ys, max_dist=1)
# array([[4337, 673],
# [ 597, 4393]])
# >>> util.generalized_2x2_confusion_matrix(xs, ys, max_dist=1000)
# array([[4934, 76],
# [ 0, 4990]])
# Given a confusion matrix CM from a binary classification task; CM should be formatted as
# [[True Negatives, False Negatives],
# [False Positives, True Positives]]
def classification_performance(CM):
precision = float(CM[1,1])/(CM[1,1] + CM[1,0])
recall = float(CM[1,1])/(CM[1,1] + CM[0,1])
F1 = 2.0 * precision * recall / (precision + recall)
MCC = (float(CM[0,0])*CM[1,1] - float(CM[0,1])*CM[1,0])/math.sqrt((CM[1,1] + CM[1,0])*(CM[1,1]+CM[0,1])*(CM[0,0]+CM[1,0])*(CM[0,0]+CM[0,1]))
return precision, recall, F1, MCC
# Interpolate missing eyetracking data and store new imputed data proportion
def impute_missing_data(experiment, max_len = 10):
if not 'eyetrack' in experiment.datatypes:
return # If eyetracking data is missing, nothing to do
eyetrack = experiment.datatypes['eyetrack']
impute_missing_data_D(eyetrack.raw_data.T, max_len=max_len).T
eyetrack.proportion_missing_frames_after_imputation = np.mean(np.isnan(eyetrack.raw_data[:, 1]))
eyetrack.proportion_imputed_frames = eyetrack.proportion_missing_frames_after_imputation - eyetrack.proportion_total_missing_frames
# Break experiment's eyetracking data into trialsprint('After: ' + str(trackit_trial.object_positions.shape))
def break_eyetracking_into_trials(experiment):
if not 'trackit' in experiment.datatypes or not 'eyetrack' in experiment.datatypes:
experiment.has_all_experiment_data = False
return # If either TrackIt or eyetracking data is missing, nothing to do
experiment.has_all_experiment_data = True
trackit, eyetrack = experiment.datatypes['trackit'], experiment.datatypes['eyetrack']
eyetrack.trials = []
for (trial_idx, trial) in enumerate(trackit.trials):
trial_start, trial_end = trial.timestamps[0], trial.timestamps[-1]
trial_eyetrack_data = np.asarray([frame for frame in eyetrack.raw_data if trial_start < frame[0] and frame[0] < trial_end])
eyetrack.trials.append(ls.Eyetrack_Trial_Data(trial_eyetrack_data))
eyetrack.trials[-1].proportion_missing_frames = np.mean(np.isnan(trial_eyetrack_data[:,1]))
# Interpolate the TrackIt data points to be synchronized with the Eyetracking data
def interpolate_trackit_to_eyetracking(experiment):
if not experiment.has_all_experiment_data:
return # If either TrackIt or eyetracking data is missing, nothing to do
trackit, eyetrack = experiment.datatypes['trackit'], experiment.datatypes['eyetrack']
for (trackit_trial, eyetrack_trial) in zip(trackit.trials, eyetrack.trials):
eyetrack_len = eyetrack_trial.data.shape[0]
interpolated_object_positions = np.zeros((trackit_trial.object_positions.shape[0], eyetrack_len, 2))
# X coordinates
interpolated_object_positions[:, :, 0] = interpolate_to_length_D(trackit_trial.object_positions[:, :, 0], new_len=eyetrack_len)
# Y coordinates
interpolated_object_positions[:, :, 1] = interpolate_to_length_D(trackit_trial.object_positions[:, :, 1], new_len=eyetrack_len)
trackit_trial.object_positions = interpolated_object_positions
# Annotates experiment with the trials to be filtered, as well as whether the entire experiment should be filtered.
# Also excludes practice trials.
def filter_experiment(experiment, min_prop_data_per_trial=0.5, min_prop_trials_per_subject=0.5):
try:
eyetrack = experiment.datatypes['eyetrack']
except KeyError: # If the experiment doesn't have eyetracking data, nothing to do
return
except AttributeError as e:
print("AttributeError: " + str(e))
print('Perhaps, the eyetracking data has not yet been broken into trials. Run break_eyetracking_into_trials(experiment) first.')
return
trials = eyetrack.trials
experiment.trials_to_keep = [idx for (idx, trial) in enumerate(trials) \
if 1 - trial.proportion_missing >= min_prop_data_per_trial and idx > 0]
experiment.keep_experiment = (len(experiment.trials_to_keep) >= len(trials) * min_prop_trials_per_subject)
|
{"hexsha": "13a18f70287406317fca507d451a086a2908f71a", "size": 10479, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis_code/util.py", "max_stars_repo_name": "sss1/behavioral_eyetracking", "max_stars_repo_head_hexsha": "945268e564b9e9031be0afb40054d6ab35059633", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis_code/util.py", "max_issues_repo_name": "sss1/behavioral_eyetracking", "max_issues_repo_head_hexsha": "945268e564b9e9031be0afb40054d6ab35059633", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis_code/util.py", "max_forks_repo_name": "sss1/behavioral_eyetracking", "max_forks_repo_head_hexsha": "945268e564b9e9031be0afb40054d6ab35059633", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.3798076923, "max_line_length": 142, "alphanum_fraction": 0.7131405668, "include": true, "reason": "import numpy", "num_tokens": 2984}
|
""" Import libraries """
import math
import matplotlib.pyplot as plt
import keras
import pandas as pd
import numpy as np
import getopt
from decimal import Decimal
from keras.models import Model
from keras.layers import LSTM
from keras.layers import *
from sklearn.preprocessing import MinMaxScaler
import os
import random
import tensorflow.random
os.environ['PYTHONHASHSEED'] = str(1)
random.seed(1)
tensorflow.random.set_seed(1)
np.random.seed(1)
import sys
pd.options.display.max_colwidth = None
def plot_examples(stock_input, stock_decoded):
n = 10
plt.figure(figsize=(20, 4))
for i, idx in enumerate(list(np.arange(0, 1, 200))):
# display original
ax = plt.subplot(2, n, i + 1)
if i == 0:
ax.set_ylabel("Input", fontweight=600)
else:
ax.get_yaxis().set_visible(False)
plt.plot(stock_input[idx])
ax.get_xaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
if i == 0:
ax.set_ylabel("Output", fontweight=600)
else:
ax.get_yaxis().set_visible(False)
plt.plot(stock_decoded[idx])
ax.get_xaxis().set_visible(False)
def check_input(argv, dataset):
try:
opts, args = getopt.getopt(argv, "d:")
except getopt.GetoptError:
print("Error: python reduce -d <dataset>")
sys.exit(1)
if len(args) != 0:
print("Error: python reduce -d <dataset>")
sys.exit(1)
for opt, arg in opts:
if opt in ("-d"):
dataset[0] = arg
else:
print("Error: python reduce -d <dataset>")
sys.exit(1)
if(dataset == ''):
print("Error: python reduce -d <dataset>")
sys.exit(1)
def main(argv):
dataset = [0]
check_input(argv, dataset)
print(dataset[0])
if os.path.isfile(dataset[0]) == False:
print("Error: Enter a valid CSV file")
sys.exit(1)
df=pd.read_csv(dataset[0], delimiter='\t', header=None)
df = df.T
print(f'Number of rows and columns: {df.shape}')
df.head(5)
#conv
window_length=10
epochs= 50
x_train_cnn_list = []
x_test_cnn_list = []
s=1
#successfully scaled after dayz xD
sc_cnn = []
training_set_scaled_cnn = []
for i in range (0,340):
sc_cnn.append(MinMaxScaler(feature_range = (0, 1)))
temp=df[i][1:]
temp=np.array(temp)
temp=temp.reshape(-1,1)
temp = sc_cnn[i].fit_transform(temp)
training_set_scaled_cnn.append(temp)
s=0
while(s<len(df[0])-1):
tempo_x_train = temp[s:s+10]
tempo_x_train=np.asarray(tempo_x_train).astype(np.float32)
x_train_cnn_list.append(tempo_x_train)
s=s+10
x_train_final = np.asarray(x_train_cnn_list).astype(np.float32)
#test
s=1
s=0
x_test_final_cnn_list = []
x_test_final_cnn_list_temp = []
for i in range (340,359):
sc_cnn.append(MinMaxScaler(feature_range = (0, 1)))
temp=df[i][1:]
temp=np.array(temp)
temp=temp.reshape(-1,1)
temp = sc_cnn[i].fit_transform(temp)
training_set_scaled_cnn.append(temp)
s=0
while(s<len(df[i])-1):
tempo_x_test = temp[s:s+10]
tempo_x_test=np.asarray(tempo_x_test).astype(np.float32)
x_test_cnn_list.append(tempo_x_test)
x_test_final_cnn_list_temp.append(tempo_x_test)
s=s+10
x_test_final_cnn_list.append(list(x_test_final_cnn_list_temp))
x_test_final_cnn_list_temp = []
x_test_final = np.asarray(x_test_cnn_list).astype(np.float32)
for i in range(0,len(x_test_final_cnn_list)):
x_test_final_cnn_list[i]=np.asarray(x_test_final_cnn_list[i]).astype(np.float32)
input_window = Input(shape=(window_length,1))
x = Conv1D(16, 3, activation="relu", padding="same")(input_window) # 10 dims
x = MaxPooling1D(2, padding="same")(x) # 5 dims
x = Conv1D(1, 3, activation="relu", padding="same")(x) # 5 dims
encoded = MaxPooling1D(2, padding="same")(x) # 3 dims
encoder = Model(input_window, encoded)
# 3 dimensions in the encoded layer
x = Conv1D(1, 3, activation="relu", padding="same")(encoded) # 3 dims
x = UpSampling1D(2)(x) # 6 dims
x = Conv1D(16, 2, activation='relu')(x) # 5 dims
x = UpSampling1D(2)(x) # 10 dims
decoded = Conv1D(1, 3, activation='sigmoid', padding='same')(x) # 10 dims
autoencoder = Model(input_window, decoded)
autoencoder.summary()
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
history = autoencoder.fit(x_train_final, x_train_final,
epochs=epochs, batch_size=256, shuffle=True)
encoder.save("encoder_C_question.h5")
for i in range(0,len(x_test_final_cnn_list)):
decoded_stocks = autoencoder.predict(x_test_final_cnn_list[i])
plot_examples(x_test_final_cnn_list[i],decoded_stocks)
if __name__ == "__main__":
main(sys.argv[1:])
|
{"hexsha": "24f24056aebd19d8a9539d8a9e9e35e0336216c6", "size": 5055, "ext": "py", "lang": "Python", "max_stars_repo_path": "Convolutional_autoencoder/reduce.py", "max_stars_repo_name": "EPantelaios/LSTM-Neural-Networks", "max_stars_repo_head_hexsha": "fc0490a79a237f12d56d52d9b33585c79d15d9ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Convolutional_autoencoder/reduce.py", "max_issues_repo_name": "EPantelaios/LSTM-Neural-Networks", "max_issues_repo_head_hexsha": "fc0490a79a237f12d56d52d9b33585c79d15d9ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Convolutional_autoencoder/reduce.py", "max_forks_repo_name": "EPantelaios/LSTM-Neural-Networks", "max_forks_repo_head_hexsha": "fc0490a79a237f12d56d52d9b33585c79d15d9ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2402234637, "max_line_length": 88, "alphanum_fraction": 0.6261127596, "include": true, "reason": "import numpy", "num_tokens": 1375}
|
from collections import defaultdict
import tensorflow_hub as hub
import cv2
from matplotlib import pyplot as plt
from matplotlib import patches
from pathlib import Path
import numpy as np
label_map = { 1: "person", 2: "bicycle", 3: "car", 4: "motorcycle", 5: "airplane", 6: "bus", 7: "train", 8: "truck", 9: "boat", 10: "traffic light", 11: "fire hydrant", 13: "stop sign", 14: "parking meter", 15: "bench", 16: "bird", 17: "cat", 18: "dog", 19: "horse", 20: "sheep", 21: "cow", 22: "elephant", 23: "bear", 24: "zebra", 25: "giraffe", 27: "backpack", 28: "umbrella", 31: "handbag", 32: "tie", 33: "suitcase", 34: "frisbee", 35: "skis", 36: "snowboard", 37: "sports ball", 38: "kite", 39: "baseball bat", 40: "baseball glove", 41: "skateboard", 42: "surfboard", 43: "tennis racket", 44: "bottle", 46: "wine glass", 47: "cup", 48: "fork", 49: "knife", 50: "spoon", 51: "bowl", 52: "banana", 53: "apple", 54: "sandwich", 55: "orange", 56: "broccoli", 57: "carrot", 58: "hot dog", 59: "pizza", 60: "donut", 61: "cake", 62: "chair", 63: "couch", 64: "potted plant", 65: "bed", 67: "dining table", 70: "toilet", 72: "tv", 73: "laptop", 74: "mouse", 75: "remote", 76: "keyboard", 77: "cell phone", 78: "microwave", 79: "oven", 80: "toaster", 81: "sink", 82: "refrigerator", 84: "book", 85: "clock", 86: "vase", 87: "scissors", 88: "teddy bear", 89: "hair drier", 90: "toothbrush",}
def intersection(self, other):
a, b = self, other
ay1, ax1, ay2, ax2 = a
by1, bx1, by2, bx2 = b
x1 = max(min(ax1, ax2), min(bx1, bx2))
y1 = max(min(ay1, ay2), min(by1, by2))
x2 = min(max(ax1, ax2), max(bx1, bx2))
y2 = min(max(ay1, ay2), max(by1, by2))
if x1<x2 and y1<y2:
return np.array([y1, x1, y2, x2])
else:
return np.array([0, 0, 0, 0])
def area(a):
ay1, ax1, ay2, ax2 = a
return (ay2 - ay1)*(ax2 - ax1)
# detector = hub.load("https://tfhub.dev/tensorflow/faster_rcnn/resnet101_v1_640x640/1")
# detector = hub.load("https://tfhub.dev/tensorflow/efficientdet/d5/1")
detector = hub.load("https://tfhub.dev/tensorflow/efficientdet/lite3/detection/1")
n_boxes = defaultdict(int)
for path in Path("24").glob("*.jpg"):
if path.name.startswith("."):
continue
img = cv2.imread(str(path))
height, width, _ = img.shape
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fig, ax = plt.subplots()
ax.imshow(img)
# img = cv2.resize(img, dsize=(640, 640), interpolation=cv2.INTER_CUBIC)
img = np.array(img)
img = np.expand_dims(img, 0)
# results = detector(img)
# results = {key:value.numpy() for key,value in results.items()}
# boxes, scores, classes = results['detection_boxes'][0], results['detection_scores'][0], results['detection_classes'][0]
# boxes = [np.array([b[0] * height, b[1] * width, b[2] * height, b[3] * width]) for b in boxes]
results = detector(img)
results = [x.numpy() for x in results]
boxes, scores, classes, num_detections = results
boxes, scores, classes = boxes[0], scores[0], classes[0]
print(boxes)
print(scores)
print(classes)
box_history = []
for box, score, cl in zip(boxes, scores, classes):
if score < 0.2:
continue
# dup = False
# for h in box_history:
# intersect_ratio = area(intersection(h, box))/(max(area(box), area(h)))
# if intersect_ratio > 0.8:
# dup = True
# break
# if dup:
# continue
label = label_map[int(cl)]
ymin, xmin, ymax, xmax = box
rect = patches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.text(xmin, ymin, f"{label}", color="red")
box_history.append(box)
n_boxes[label] += 1
plt.show()
print(n_boxes)
|
{"hexsha": "4f7a2cedd4f60ebb2e5d4d8405a992429397767f", "size": 4084, "ext": "py", "lang": "Python", "max_stars_repo_path": "question3/main.py", "max_stars_repo_name": "wechat-haibei/3D-deeplearning-processing", "max_stars_repo_head_hexsha": "9acf5900b24e29b03cb358c992d3d1e429288279", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-17T01:54:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T01:54:06.000Z", "max_issues_repo_path": "question3/main.py", "max_issues_repo_name": "wechat-haibei/3D-deeplearning-processing", "max_issues_repo_head_hexsha": "9acf5900b24e29b03cb358c992d3d1e429288279", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "question3/main.py", "max_forks_repo_name": "wechat-haibei/3D-deeplearning-processing", "max_forks_repo_head_hexsha": "9acf5900b24e29b03cb358c992d3d1e429288279", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3777777778, "max_line_length": 1426, "alphanum_fraction": 0.5609696376, "include": true, "reason": "import numpy", "num_tokens": 1501}
|
const ignorefirst = 10 # cm
const bigturn = π/3 # 60°
# const smallturn = π/93 # 60°
const s = 500
const Point = SVector{2, Float64}
point(::Missing) = missing
point(x::Instantaneous)= Point(x.data[1], x.data[2])
point(x::Point) = x
_getv(spl, k) = SVector{2, Float64}(derivative(spl, k))
function gettpindex(spl, ks)
tp = ks[1]
vlast = _getv(spl, ks[1])
for k in Iterators.drop(ks, 1)
v = _getv(spl, k)
Δ = angle(vlast, v)
tp = k
Δ > bigturn && break
vlast = v
end
return tp
end
function gettpknot(spl)
ks = Dierckx.get_knots(spl)
filter!(k -> norm(spl(k) - spl(0)) > ignorefirst, ks)
isempty(ks) && return 0.0
tp2 = gettpindex(spl, ks)
# return tp2
tp1 = copy(tp2)
for k in ks
k == tp2 && break
tp1 = k
end
tp1 += 0.1
if tp1 < tp2
main = _getv(spl, tp1)
for t in tp2:-0.3:tp1
v = _getv(spl, t)
Δ = angle(main, v)
Δ < bigturn && return t
end
end
return tp2
end
mutable struct TimedPoint
xy::Point
t::Float64
end
const PointCollection = StructVector{TimedPoint}
pointcollection(x::Missing, t₀) = StructVector{TimedPoint}(undef, 0)
pointcollection(x, t₀) = StructVector(TimedPoint(Point(i[1], i[2]), i[3] - t₀) for i in eachrow(x.data))
mutable struct Track
coords::Vector{Point}
t::StepRangeLen{Float64,Base.TwicePrecision{Float64},Base.TwicePrecision{Float64}}
tp::Int
rawcoords::StructArray{TimedPoint}
end
function filterdance(xy, Δt)
xy2 = [xy[1,:]]
t = [0.0]
for p in eachrow(xy)
if norm(p - xy2[end]) > 4
push!(xy2, p)
push!(t, t[end] + Δt)
else
t[end] += Δt
end
end
t .-= t[1]
return t, hcat(xy2...)
end
function Track(x::Prolonged)
xyt = !issorted(x.data[:, 3]) ? sortslices(x.data, dims = 1, lt = (x, y) -> isless(x[3], y[3])) : x.data
Δt = mean(trim(diff(xyt[:, 3]), prop = 0.1))
t, xy = filterdance(xyt[:,1:2], Δt)
spl = ParametricSpline(t, xy; s = s, k = 2)
tl = range(0.0, step = Δt, stop = t[end])
xyl = Point.(spl.(tl))
tp = gettpknot(spl)
i = findfirst(≥(tp), tl)
if isnothing(i)
i = length(tl)
end
raw = pointcollection((data = xyt, ), xyt[1,3])
Track(xyl, tl, i, raw)
end
homing(t::Track) = t.coords[1:t.tp]
searching(t::Track) = t.coords[t.tp:end]
searchcenter(t::Track) = mean(searching(t))
turningpoint(t::Track) = t.coords[t.tp]
mutable struct Common
feeder::Union{Missing, Point}
nest::Union{Missing, Point}
track::Track
pellet::PointCollection
fictive_nest::Point
pickup::Union{Missing, Point}
dropoff::Point
end
|
{"hexsha": "f7ebbc166036c16d487c3682e1fab1f276829033", "size": 2726, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/track.jl", "max_stars_repo_name": "yakir12/DungBase.jl", "max_stars_repo_head_hexsha": "1144c221d8e3552e1d4bfac23059f2ed61688959", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/track.jl", "max_issues_repo_name": "yakir12/DungBase.jl", "max_issues_repo_head_hexsha": "1144c221d8e3552e1d4bfac23059f2ed61688959", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-11-11T14:55:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-25T06:25:40.000Z", "max_forks_repo_path": "src/track.jl", "max_forks_repo_name": "yakir12/DungBase.jl", "max_forks_repo_head_hexsha": "1144c221d8e3552e1d4bfac23059f2ed61688959", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T10:52:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T10:52:46.000Z", "avg_line_length": 24.3392857143, "max_line_length": 108, "alphanum_fraction": 0.5781364637, "num_tokens": 958}
|
.SH "Installing \*(PN"
.PP
.II installation
To install \*(PN on your system, following the directions in
the appropriate sub-section:
either for the VAX, or for the Intel development system.
.Sh "Installing \*(PN on the VAX"
.PP
To install \*(PN on the VAX, do the following:
.nr l1 0
\*i
Create a directory in which the compiler and its associated support libraries
can reside.
\*i
Copy the distribution tape into the directory via the VMS \fBBACKUP\fR
utility.
\*i
.II INSTAL86C.COM
Invoke the \fBINSTAL86C.COM\fR command file to build the compiler,
including your system run-time library.
\*i
Invoke the \fBSETUP86C.COM\fR command file to make the logical assignments
used by the compiler. As shipped, each user must invoke the command file.
The compiler can also be made available for system-wide use.
.PP
The following lists the contents of
\fBINSTAL86.COM\fR:
.DM
.ta 4.0i
$
$ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
$ ! !
$ ! MWC86 Version 2.5.2 !
$ ! Copyright 1982 - 1986 by Mark Williams Company, Chicago. !
$ ! All rights reserved may not be copied or disclosed !
$ ! without permission. !
$ ! !
$ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
$
$ say ::= write sys$output
$ say "Linking the cross compiler"
$
$
.ta 2.0i
$ link CCSCAN.obj,-
cc86.olb/include=dope,-
cc86.olb/include=macros,-
cc86.olb/include=paternf,-
cc86.olb/include=paternb,-
cc86.olb/include=table0,-
cc86.olb/include=table1,-
cc86.olb/include=optab,-
cc86.olb/include=regnam,-
cc86.olb/lib
$
$ rename CCSCAN.exe cc86.exe
$
.DE
The following gives the contents of
.BR SETUP86.COM :
.DM
$! File: setup86c.com
$! Last edit: 31-JAN-1986
$!+
.ta 1.5i
$! MWC86 VERSION 2.5.2
$! COPYRIGHT (C) 1982 - 86
$! BY MARK WILLIAMS COMPANY, CHICAGO.
$! ALL RIGHTS RESERVED.
$! MAY NOT BE COPIED OR DISCLOSED WITHOUT PERMISSION.
$!-
$!
$! This is the command file to set up the Mark Williams Co.
$! iAPX 86 C (VAX/VMS native mode) cross-compiler to run on VMS.
$! It is invoked by including the command
$!
$! $ @mwc86c:setup86c
$!
$! in one's login.com file. Here, "mwc" must be
$! the logical name
$! of the device containing this command file.
$! Note that this command file
$! assumes that the executable image of the compiler will reside on
$! DQA0: in directory [mwc86c]. This assignment should be
$! changed to reflect your installation realities.
$!
$ assign dqa0: mwc
$ assign mwc:[mwc86c.include] cc86$include
$ assign mwc:[mwc86c] mwc86c
$ cc86 :== $mwc86c:cc86
$ size :== $mwc86c:size
$!
$! Invoking the compiler:
$!
$! The command line is identical to that on the Intel Series III
$! development system. You say:
$! cc86 file [options]
$!
$! The output file is in the same directory and has the
$! same name as the input file; the file type is changed
$! to "o86" (for object) or "a86" (for pseudo-assembly code).
.DE
.Sh "Installing on the Intel development system"
.PP
To install \*(PN onto the Intel development system, simply
copy all of the files from the distribution disks onto your
hard disk.
Because its operating system does not use directories,
no special organization of the files is necessary.
|
{"hexsha": "ee2c1279e078a029b5f0b496f229d6bbeaf36c85", "size": 3161, "ext": "r", "lang": "R", "max_stars_repo_path": "doc/mwc/doc/mwc/text/isisinstall.r", "max_stars_repo_name": "gspu/Coherent", "max_stars_repo_head_hexsha": "299bea1bb52a4dcc42a06eabd5b476fce77013ef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2019-10-10T14:14:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T02:54:38.000Z", "max_issues_repo_path": "doc/mwc/doc/mwc/text/isisinstall.r", "max_issues_repo_name": "gspu/Coherent", "max_issues_repo_head_hexsha": "299bea1bb52a4dcc42a06eabd5b476fce77013ef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/mwc/doc/mwc/text/isisinstall.r", "max_forks_repo_name": "gspu/Coherent", "max_forks_repo_head_hexsha": "299bea1bb52a4dcc42a06eabd5b476fce77013ef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-25T18:38:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T18:38:37.000Z", "avg_line_length": 28.7363636364, "max_line_length": 77, "alphanum_fraction": 0.6915533059, "num_tokens": 967}
|
import pandas as pd
import numpy as np
import os
from sklearn.metrics import *
results_directory = '/sb-personal/cvqa/results/c2vqa-verbs-results-final'
output_directory = '/sb-personal/cvqa/src/c2vqa-verbs/analysis'
output_joined_file = os.path.join(output_directory, "all_models_test_results.csv")
if os.path.exists(output_joined_file):
all_df = pd.read_csv(output_joined_file)
else:
all_df = None
for f in os.listdir(results_directory):
if not f.endswith('test_results.csv'):
continue
print f
parts = f.split('-')
model = parts[0]
trial = parts[1]
path = os.path.join(results_directory, f)
df = pd.read_csv(path)
if 'y_predict_relevance' in df:
df['y_predict_relevance'] = df['y_predict_relevance'].apply(lambda x: 0 if x == "[False]" else 1)
else:
df['y_predict_relevance'] = df['y_predict'].apply(lambda x: 0 if x.startswith("no ") else 1)
y_true = np.asarray(df['relevant'].tolist())
y_predict = np.asarray(df['y_predict_relevance'].tolist())
df['model'] = model
df['trial'] = trial
if all_df is None:
all_df = df
else:
all_df = all_df.append(df)
print all_df
all_df.to_csv(output_joined_file)
all_df = all_df[all_df['trial'] == 0]
lstm_model_df = all_df[all_df['model'] == 'avg']
qcatt_model_df = all_df[all_df['model'] == 'qcatt']
qcatt_correct_df = qcatt_model_df[qcatt_model_df['y_predict'] == qcatt_model_df['y_true']]
lstm_incorrect_df = lstm_model_df[lstm_model_df['y_predict'] != lstm_model_df['y_true']]
print len(qcatt_correct_df)
print len(lstm_incorrect_df)
best_df = qcatt_correct_df[qcatt_correct_df['qa_id'].isin(lstm_incorrect_df['qa_id'])]
sample_df = best_df.sample(5)
print 'qcatt correct'
for _,row in sample_df.iterrows():
print ''
print row['question']
print row['image_file']
print row['caption']
print '\ttruth: [%s]' % (row['y_true'])
print '\tqcatt: [%s]' % (row['y_predict'])
lstm_row = lstm_incorrect_df[lstm_incorrect_df['qa_id'] == row['qa_id']].iloc[0]
# print '\tqclstm: [%s]' % (lstm_row['question'])
# print '\tqclstm: [%s]' % (lstm_row['y_true'])
print '\tqclstm: [%s]' % (lstm_row['y_predict'])
qcatt_incorrect_df = qcatt_model_df[qcatt_model_df['y_predict'] != qcatt_model_df['y_true']]
lstm_correct_df = lstm_model_df[lstm_model_df['y_predict'] == lstm_model_df['y_true']]
print len(qcatt_incorrect_df)
print len(lstm_correct_df)
best_df = qcatt_incorrect_df[qcatt_incorrect_df['qa_id'].isin(lstm_correct_df['qa_id'])]
sample_df = best_df.sample(5)
print 'qcatt incorrect'
for _,row in sample_df.iterrows():
print ''
print row['question']
print row['image_file']
print row['caption']
print '\ttruth: [%s]' % (row['y_true'])
print '\tqcatt: [%s]' % (row['y_predict'])
lstm_row = lstm_correct_df[lstm_correct_df['qa_id'] == row['qa_id']].iloc[0]
# print '\tqclstm: [%s]' % (lstm_row['question'])
# print '\tqclstm: [%s]' % (lstm_row['y_true'])
print '\tqclstm: [%s]' % (lstm_row['y_predict'])
|
{"hexsha": "65a721719034cbf82de5983f99f50dbdf11f9ea6", "size": 2935, "ext": "py", "lang": "Python", "max_stars_repo_path": "c2vqa-verbs/analysis/relevance-example-results.py", "max_stars_repo_name": "andeeptoor/qar-qae", "max_stars_repo_head_hexsha": "60d61b0c6b5686dda471c727227cee2cc365f836", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "c2vqa-verbs/analysis/relevance-example-results.py", "max_issues_repo_name": "andeeptoor/qar-qae", "max_issues_repo_head_hexsha": "60d61b0c6b5686dda471c727227cee2cc365f836", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "c2vqa-verbs/analysis/relevance-example-results.py", "max_forks_repo_name": "andeeptoor/qar-qae", "max_forks_repo_head_hexsha": "60d61b0c6b5686dda471c727227cee2cc365f836", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-06-09T01:05:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T06:17:32.000Z", "avg_line_length": 29.9489795918, "max_line_length": 100, "alphanum_fraction": 0.7052810903, "include": true, "reason": "import numpy", "num_tokens": 894}
|
import os
import numpy as np
from nobos_commons.data_structures.constants.dataset_part import DatasetPart
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.utils.file_helper import get_create_path
from nobos_torch_lib.datasets.action_recognition_datasets.ehpi_dataset import NormalizeEhpi, \
RemoveJointsOutsideImgEhpi
from torch.utils.data import DataLoader, ConcatDataset
from torchvision.transforms import transforms
from ehpi_action_recognition.config import data_dir, models_dir, ehpi_dataset_path
from ehpi_action_recognition.tester_ehpi import TesterEhpi
from ehpi_action_recognition.paper_reproduction_code.datasets.ehpi_lstm_dataset import EhpiLSTMDataset
from ehpi_action_recognition.paper_reproduction_code.models.ehpi_lstm import EhpiLSTM
def get_test_set_lab(dataset_path: str, image_size: ImageSize):
num_joints = 15
datasets = [
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE01_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_03_TEST_VUE02_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST),
]
for dataset in datasets:
dataset.print_label_statistics()
return ConcatDataset(datasets)
def get_test_set_office(dataset_path: str, image_size: ImageSize):
num_joints = 15
dataset = EhpiLSTMDataset(os.path.join(dataset_path, "JOURNAL_2019_04_TEST_EVAL2_30FPS"),
transform=transforms.Compose([
RemoveJointsOutsideImgEhpi(image_size),
# ScaleEhpi(image_size),
# TranslateEhpi(image_size),
# FlipEhpi(left_indexes=left_indexes, right_indexes=right_indexes),
NormalizeEhpi(image_size)
]), num_joints=num_joints, dataset_part=DatasetPart.TEST)
dataset.print_label_statistics()
return dataset
if __name__ == '__main__':
model_names = [
"ehpi_journal_2019_03_gt_seed_0_cp0200",
"ehpi_journal_2019_03_gt_seed_104_cp0200",
"ehpi_journal_2019_03_gt_seed_123_cp0200",
"ehpi_journal_2019_03_gt_seed_142_cp0200",
"ehpi_journal_2019_03_gt_seed_200_cp0200",
#
"ehpi_journal_2019_03_pose_seed_0_cp0200",
"ehpi_journal_2019_03_pose_seed_104_cp0200",
"ehpi_journal_2019_03_pose_seed_123_cp0200",
"ehpi_journal_2019_03_pose_seed_142_cp0200",
"ehpi_journal_2019_03_pose_seed_200_cp0200",
#
"ehpi_journal_2019_03_both_seed_0_cp0200",
"ehpi_journal_2019_03_both_seed_104_cp0200",
"ehpi_journal_2019_03_both_seed_123_cp0200",
"ehpi_journal_2019_03_both_seed_142_cp0200",
"ehpi_journal_2019_03_both_seed_200_cp0200",
]
# Test set
test_set = get_test_set_lab(ehpi_dataset_path, ImageSize(1280, 720))
result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "lab"))
# test_set = get_test_set_office(ImageSize(1280, 720))
# result_path = get_create_path(os.path.join(data_dir, "results", "its_journal_experiment_results", "office"))
test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
for model_name in model_names:
print("Model name: {}".format(model_name))
weights_path = os.path.join(models_dir, "{}.pth".format(model_name))
tester = TesterEhpi()
ehpi_results, seq_results = tester.test(test_loader, weights_path, model=EhpiLSTM(15, 5))
ehpi_results_np = np.array(ehpi_results, dtype=np.uint32)
seq_results_np = np.array(seq_results, dtype=np.uint32)
np.save(os.path.join(result_path, "{}_ehpis".format(model_name)), ehpi_results_np)
np.save(os.path.join(result_path, "{}_seqs".format(model_name)), seq_results_np)
|
{"hexsha": "d33c4f0cc31943aaa298a35c28cecd64171894e4", "size": 4367, "ext": "py", "lang": "Python", "max_stars_repo_path": "ehpi_action_recognition/paper_reproduction_code/evaluations/lstm/test_its_journal_2019.py", "max_stars_repo_name": "steuwe/ehpi_action_recognition", "max_stars_repo_head_hexsha": "4318e82e541c9b42bf0af7976815229ed6261c39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 100, "max_stars_repo_stars_event_min_datetime": "2019-04-16T17:18:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T08:59:51.000Z", "max_issues_repo_path": "ehpi_action_recognition/paper_reproduction_code/evaluations/lstm/test_its_journal_2019.py", "max_issues_repo_name": "steuwe/ehpi_action_recognition", "max_issues_repo_head_hexsha": "4318e82e541c9b42bf0af7976815229ed6261c39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-06-14T13:30:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T12:16:07.000Z", "max_forks_repo_path": "ehpi_action_recognition/paper_reproduction_code/evaluations/lstm/test_its_journal_2019.py", "max_forks_repo_name": "steuwe/ehpi_action_recognition", "max_forks_repo_head_hexsha": "4318e82e541c9b42bf0af7976815229ed6261c39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2019-05-08T03:29:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T05:51:00.000Z", "avg_line_length": 49.0674157303, "max_line_length": 114, "alphanum_fraction": 0.6972750172, "include": true, "reason": "import numpy", "num_tokens": 1067}
|
# -*- coding: utf-8 -*-
"""
This module contains the Branch class (one branch of the tree) and the Nodes class
"""
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from scipy.spatial import cKDTree
pool = ThreadPool(16)
class Branch:
"""Class that contains a branch of the fractal tree
Args:
mesh: an object of the mesh class, where the fractal tree will grow
init_node (int): initial node to grow the branch. This is an index that refers to a node in the nodes.nodes array.
init_dir (array): initial direction to grow the branch. In general, it refers to the direction of the last segment of the mother brach.
init_tri (int): the index of triangle of the mesh where the init_node sits.
l (float): total length of the branch
angle (float): angle (rad) with respect to the init_dir in the plane of the init_tri triangle
w (float): repulsitivity parameter. Controls how much the branches repel each other.
nodes: the object of the class nodes that contains all the nodes of the existing branches.
brother_nodes (list): the nodes of the brother and mother branches, to be excluded from the collision detection between branches.
Nsegments (int): number of segments to divide the branch.
Attributes:
child (list): contains the indexes of the child branches. It is not assigned when created.
dir (array): vector direction of the last segment of the branch.
nodes (list): contains the node indices of the branch. The node coordinates can be retrieved using nodes.nodes[i]
triangles (list): contains the indices of the triangles from the mesh where every node of the branch lies.
tri (int): triangle index where last node sits.
growing (bool): False if the branch collide or is out of the surface. True otherwise.
"""
def __init__(self,mesh,init_node,init_dir,init_tri,l,angle,w,nodes,brother_nodes,Nsegments):
# self.nnodes=0
self.child = [0,0]
self.dir = np.array([0.0,0.0,0.0])
self.nodes=[]
self.triangles=[]
# self.normal=np.array([0.0,0.0,0.0])
self.queue=[]
self.growing=True
shared_node=-1
init_normal=mesh.normals[init_tri]
nodes.update_collision_tree(brother_nodes)
# global_nnodes=len(nodes.nodes)
# R=np.array([[np.cos(angle),-np.sin(angle)],[ np.sin(angle), np.cos(angle)]])
inplane=-np.cross(init_dir,init_normal)
dir=np.cos(angle)*init_dir+np.sin(angle)*inplane
dir=dir/np.linalg.norm(dir)
self.nodes.append(init_node)
self.queue.append(nodes.nodes[init_node])
self.triangles.append(init_tri)
grad=nodes.gradient(self.queue[0])
dir=(dir+w*grad)/np.linalg.norm(dir+w*grad)
# print nodes.nodes[init_node]+dir*l/Nsegments
for i in range(1,Nsegments):
intriangle=self.add_node_to_queue(mesh,self.queue[i-1],dir*l/Nsegments)
#print 'intriangle',intriangle
if not intriangle:
print('Point not in triangle',i)
# print self.queue[i-1]+dir*l/50.
self.growing=False
break
collision=nodes.collision(self.queue[i])
if collision[1]<l/5.:
print("Collision",i, collision)
self.growing=False
self.queue.pop()
self.triangles.pop()
shared_node=collision[0]
break
grad=nodes.gradient(self.queue[i])
normal=mesh.normals[self.triangles[i],:]
#Project the gradient to the surface
grad=grad-(np.dot(grad,normal))*normal
dir=(dir+w*grad)/np.linalg.norm(dir+w*grad)
nodes_id=nodes.add_nodes(self.queue[1:])
[self.nodes.append(x) for x in nodes_id]
if not self.growing:
nodes.end_nodes.append(self.nodes[-1])
self.dir=dir
# #print self.triangles
self.tri=self.triangles[-1]
#Uncomment the following lines for a closed network
# if shared_node is not -1:
# self.nodes.append(shared_node)
def add_node_to_queue(self,mesh,init_node,dir):
"""Functions that projects a node in the mesh surface and it to the queue is it lies in the surface.
Args:
mesh: an object of the mesh class, where the fractal tree will grow
init_node (array): vector that contains the coordinates of the last node added in the branch.
dir (array): vector that contains the direction from the init_node to the node to project.
Return:
success (bool): true if the new node is in the triangle.
"""
# print 'node trying to project', init_node+dir
point, triangle=mesh.project_new_point(init_node+dir)
# print 'Projected point', point, 'dist', np.linalg.norm(point-init_node)
if triangle>=0:
self.queue.append(point)
self.triangles.append(triangle)
success=True
else:
# print point, triangle
success=False
#print 'Success? ',success
return success
class Nodes:
"""A class containing the nodes of the branches plus some fuctions to compute distance related quantities.
Args:
init_node (array): an array with the coordinates of the initial node of the first branch.
Attributes:
nodes (list): list of arrays containing the coordinates of the nodes
last_node (int): last added node.
end_nodes (list): a list containing the indices of all end nodes (nodes that are not connected) of the tree.
tree (scipy.spatial.cKDTree): a k-d tree to compute the distance from any point to the closest node in the tree. It is updated once a branch is finished.
collision_tree (scipy.spatial.cKDTree): a k-d tree to compute the distance from any point to the closest node in the tree, except from the brother and mother branches. It is used to check collision between branches.
"""
def __init__(self,init_node):
self.nodes=[]
self.nodes.append(init_node)
self.last_node=0
self.end_nodes=[]
self.tree=cKDTree(self.nodes)
def add_nodes(self,queue):
"""This function stores a list of nodes of a branch and returns the node indices. It also updates the tree to compute distances.
Args:
queue (list): a list of arrays containing the coordinates of the nodes of one branch.
Returns:
nodes_id (list): the indices of the added nodes.
"""
nodes_id=[]
for point in queue:
self.nodes.append(point)
self.last_node+=1
nodes_id.append(self.last_node)
self.tree=cKDTree(self.nodes)
return nodes_id
def distance_from_point(self,point):
"""This function returns the distance from any point to the closest node in the tree.
Args:
point (array): the coordinates of the point to calculate the distance from.
Returns:
d (float): the distance between point and the closest node in the tree.
"""
d,node=self.tree.query(point)
# distance=pool.map(lambda a: np.linalg.norm(a-point),self.nodes.values())
return d
def distance_from_node(self,node):
"""This function returns the distance from any node to the closest node in the tree.
Args:
node (int): the index of the node to calculate the distance from.
Returns:
d (float): the distance between specified node and the closest node in the tree.
"""
d, node = self.tree.query(self.nodes[node])
# distance=pool.map(lambda a: np.linalg.norm(a-self.nodes[node]),self.nodes.values())
return d
def update_collision_tree(self,nodes_to_exclude):
"""This function updates the collision_tree excluding a list of nodes from all the nodes in the tree. If all the existing nodes are excluded, one distant node is added.
Args:
nodes_to_exclude (list): contains the nodes to exclude from the tree. Usually it should be the mother and the brother branch nodes.
Returns:
none
"""
nodes=set(range(len(self.nodes)))
nodes=nodes.difference(nodes_to_exclude)
nodes_to_consider=[self.nodes[x] for x in nodes]
self.nodes_to_consider_keys=[x for x in nodes]
if len(nodes_to_consider)==0:
nodes_to_consider=[np.array([-100000000000.0,-100000000000.0,-100000000000.0])]
self.nodes_to_consider_keys=[100000000]
print("no nodes to consider")
self.collision_tree=cKDTree(nodes_to_consider)
def collision(self,point):
"""This function returns the distance between one point and the closest node in the tree and the index of the closest node using the collision_tree.
Args:
point (array): the coordinates of the point to calculate the distance from.
Returns:
collision (tuple): (distance to the closest node, index of the closest node)
"""
d,node=self.collision_tree.query(point)
collision=(self.nodes_to_consider_keys[node],d)
return collision
def gradient(self,point):
"""This function returns the gradient of the distance from the existing points of the tree from any point. It uses a central finite difference approximation.
Args:
point (array): the coordinates of the point to calculate the gradient of the distance from.
Returns:
grad (array): (x,y,z) components of gradient of the distance.
"""
delta=0.01
dx=np.array([delta,0,0])
dy=np.array([0.0,delta,0.0])
dz=np.array([0.0,0.0,delta])
distx_m=self.distance_from_point(point-dx)
distx_p=self.distance_from_point(point+dx)
disty_m=self.distance_from_point(point-dy)
disty_p=self.distance_from_point(point+dy)
distz_m=self.distance_from_point(point-dz)
distz_p=self.distance_from_point(point+dz)
grad=np.array([(distx_p-distx_m)/(2*delta),(disty_p-disty_m)/(2*delta),(distz_p-distz_m)/(2*delta)])
return grad
|
{"hexsha": "c5d77e22cf5d73077f91c367ba1ecd410952cca7", "size": 10545, "ext": "py", "lang": "Python", "max_stars_repo_path": "Branch3D.py", "max_stars_repo_name": "adsche/fractal-tree", "max_stars_repo_head_hexsha": "9d33a91cbdcdbad40b082a1b936e5cb133a8892a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-10-14T03:38:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T06:10:41.000Z", "max_issues_repo_path": "Branch3D.py", "max_issues_repo_name": "adsche/fractal-tree", "max_issues_repo_head_hexsha": "9d33a91cbdcdbad40b082a1b936e5cb133a8892a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-05T21:39:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-24T19:08:13.000Z", "max_forks_repo_path": "Branch3D.py", "max_forks_repo_name": "adsche/fractal-tree", "max_forks_repo_head_hexsha": "9d33a91cbdcdbad40b082a1b936e5cb133a8892a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-11-08T20:01:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T11:05:21.000Z", "avg_line_length": 45.847826087, "max_line_length": 223, "alphanum_fraction": 0.6330962541, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2324}
|
[STATEMENT]
lemma finite_is_class: "finite {C. is_class P C}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite {C. is_class P C}
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite {C. is_class P C}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. finite {C. is_class P C}
[PROOF STEP]
have "{C. is_class P C} = dom (map_of P)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {C. is_class P C} = dom (map_of P)
[PROOF STEP]
by (simp add: is_class_def class_def dom_def)
[PROOF STATE]
proof (state)
this:
{C. is_class P C} = dom (map_of P)
goal (1 subgoal):
1. finite {C. is_class P C}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
{C. is_class P C} = dom (map_of P)
goal (1 subgoal):
1. finite {C. is_class P C}
[PROOF STEP]
by (simp add: finite_dom_map_of)
[PROOF STATE]
proof (state)
this:
finite {C. is_class P C}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 435, "file": "JinjaDCI_Common_Decl", "length": 7}
|
export ode_order_lowering
function lower_varname(var::Variable, idv, order)
order == 0 && return var
name = Symbol(var.name, :_, string(idv.name)^order)
return Variable(name; known = var.known)
end
function ode_order_lowering(sys::ODESystem)
eqs_lowered, _ = ode_order_lowering(sys.eqs, sys.iv)
ODESystem(eqs_lowered, sys.iv, [eq.x for eq in eqs_lowered], sys.ps)
end
function ode_order_lowering(eqs, iv)
var_order = Dict{Variable,Int}()
vars = Variable[]
new_eqs = similar(eqs, DiffEq)
new_vars = Variable[]
for (i, eq) ∈ enumerate(eqs)
var, maxorder = eq.x, eq.n
if maxorder > get(var_order, var, 0)
var_order[var] = maxorder
any(isequal(var), vars) || push!(vars, var)
end
var′ = lower_varname(eq.x, iv, eq.n - 1)
rhs′ = rename(eq.rhs)
new_eqs[i] = DiffEq(var′, 1, rhs′)
end
for var ∈ vars
order = var_order[var]
for o in (order-1):-1:1
lvar = lower_varname(var, iv, o-1)
rvar = lower_varname(var, iv, o)
push!(new_vars, rvar)
rhs = rvar(iv())
eq = DiffEq(lvar, 1, rhs)
push!(new_eqs, eq)
end
end
return (new_eqs, new_vars)
end
function rename(O::Expression)
isa(O, Operation) || return O
if is_derivative(O)
(x, t, order) = flatten_differential(O)
return lower_varname(x.op, t.op, order)(x.args...)
end
return Operation(O.op, rename.(O.args))
end
|
{"hexsha": "8dcbc12758f313def6f171760cf2afa10f9a3e1d", "size": 1515, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/systems/diffeqs/first_order_transform.jl", "max_stars_repo_name": "shashi/ModelingToolkit.jl", "max_stars_repo_head_hexsha": "600ea214f19ed5b9d8c5d355e15e7ff4d9e9115b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/systems/diffeqs/first_order_transform.jl", "max_issues_repo_name": "shashi/ModelingToolkit.jl", "max_issues_repo_head_hexsha": "600ea214f19ed5b9d8c5d355e15e7ff4d9e9115b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/systems/diffeqs/first_order_transform.jl", "max_forks_repo_name": "shashi/ModelingToolkit.jl", "max_forks_repo_head_hexsha": "600ea214f19ed5b9d8c5d355e15e7ff4d9e9115b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5454545455, "max_line_length": 72, "alphanum_fraction": 0.5900990099, "num_tokens": 450}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
different transformation functions
in a neural network
synthetic classifiaction problem
possibility to restrain information
python 3.7.7
numpy 1.19.2
scikit-learn 0.24.1
tensorflow 2.0.0
keras 2.3.1
matplitlib 3.3.2
author: adrienne bohlmann
"""
import numpy as np
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import *
from sklearn import metrics, svm
from tensorflow import keras, random
from tensorflow.keras.utils import plot_model
from tensorflow.keras import layers
import matplotlib.pyplot as plt
##############################################################################
# reproducible or random?
# RANDOMNESS for checking robustness
# nR = None
# REPRODUCIBLE
# there are 3 random sources
# 1. in the creation of the data
# 2. in train-test-split
# 3. random weight initialization in model.fit
# for reproducable results fix everything
# for robustness-testing introduce randomness iteratively
# for synthetic data creation
nRdata = 22
# nRdata = None
# for train-test-split
nRtts = 22
# nRtts = None
# fix the model.fit (random weights)
random.set_seed(22)
##############################################################################
# restrain the available information?
# min: 2
# max: number of features (default = 5) = full information
n_X = 3
##############################################################################
class synth_data:
def __init__(self
, samples = 300 # sample size, < 800 to observe behaviour before normal distribution from large numbers kicks in
, features = 5 # true number of explanatory variables for synthetic binary classifiaction problem
, shift = 2.0 # shift away from E(X) = 0
, exp = False # if True, X = exp(X) to simulate nonlinearity
):
# make synthetic data
# random n-class classification problem
self.X, self.y = make_classification(n_samples = samples, n_features = features, shift = shift, random_state=(nRdata))
# exponential transform for simulating simple nonlinear relationship
if exp == True:
self.X = np.exp(self.X)
# train test split
def tts(self):
# train test split keeping only n_X explanatory variables
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X[:,0:n_X], self.y, random_state=(nRtts), stratify=self.y)
scaler_S = StandardScaler()
self.scaler_S = scaler_S
self.scaler_S.fit(self.X_train)
self.X_train_S_scaled = scaler_S.transform(self.X_train)
self.X_test_S_scaled = scaler_S.transform(self.X_test)
scaler_N = Normalizer()
self.scaler_N = scaler_N
self.scaler_N.fit(self.X_train)
self.X_train_N_scaled = scaler_N.transform(self.X_train)
self.X_test_N_scaled = scaler_N.transform(self.X_test)
scaler_QN = QuantileTransformer(output_distribution='normal', n_quantiles = 100)
self.scaler_QN = scaler_QN
self.scaler_QN.fit(self.X_train)
self.X_train_QN_scaled = scaler_QN.transform(self.X_train)
self.X_test_QN_scaled = scaler_QN.transform(self.X_test)
def y_train(self):
return self.y_train
def y_test(self):
return self.y_test
def get_unscaled_data(self):
return self.y_train, self.y_test, self.X_train, self.X_test
def X_train(self):
return self.X_train
def X_test(self):
return self.X_test
def X_train_S_scaled(self):
return self.X_train_S_scaled
def X_test_S_scaled(self):
return self.X_test_S_scaled
def X_train_N_scaled(self):
return self.X_train_N_scaled
def X_test_N_scaled(self):
return self.X_test_N_scaled
def X_train_QN_scaled(self):
return self.X_train_QN_scaled
def X_test_QN_scaled(self):
return self.X_test_QN_scaled
# plot histogramms
def plot_hist(self):
# unscaled data
plt.hist(self.X_train[:, 0], alpha=0.6)
plt.hist(self.X_train[:, 1], alpha=0.6)
plt.title('unscaled training data')
plt.show()
# scaled data
plt.hist(self.X_train_S_scaled[:, 0], alpha=0.6)
plt.hist(self.X_train_S_scaled[:, 1], alpha=0.6)
hist_title = str(self.scaler_S) + ' training data'
plt.title(hist_title)
plt.show()
plt.hist(self.X_train_N_scaled[:, 0], alpha=0.6)
plt.hist(self.X_train_N_scaled[:, 1], alpha=0.6)
hist_title = str(self.scaler_N) + ' training data'
plt.title(hist_title)
plt.show()
plt.hist(self.X_train_QN_scaled[:, 0], alpha=0.6)
plt.hist(self.X_train_QN_scaled[:, 1], alpha=0.6)
hist_title = str(self.scaler_QN) + ' training data'
plt.title(hist_title)
plt.show()
class nn_model:
def __init__(self, h1 = 21, h2 = 5, lr = 1e-3):
# build a keras model with API
# learning rate
self.learning_rate = lr
#optimizer
opt = keras.optimizers.Adam(learning_rate = self.learning_rate)
# model itself
inputs = keras.Input(shape=(n_X,), name = 'input')
hidden = layers.Dense(h1, activation='relu'
, kernel_initializer='he_normal'
, name = 'hidden1_relu')(inputs)
hidden = layers.Dense(h2, activation='relu'
, kernel_initializer='he_normal'
, name = 'hidden2_relu')(hidden)
#hidden = layers.Dense(7, activation='relu', name = 'hidden3_relu')(hidden)
out = layers.Dense(2, activation='softmax', name = 'output_softmax')(hidden)
# put it together
self.model = keras.Model(inputs, outputs=[out], name='nn_model')
plot_model(self.model, to_file='nn_model.png', show_shapes=True)
# compile
self.model.compile(optimizer=opt
, loss=['sparse_categorical_crossentropy']
, metrics=['accuracy']
)
print('NN created and compiled, shape saved in wd as nn_model.png')
def fit(self
, yy_train, yy_test, XX_train, XX_test
, n_epochs= 30
):
self.history = self.model.fit(XX_train, yy_train, epochs=n_epochs, validation_data=(XX_test, yy_test))
# plot loss
def plot_loss(self):
plt.plot(self.history.history['loss'], color='brown')
plt.plot(self.history.history['val_loss'], color='orange')
plt.title('model loss, train = blue, test = red')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'])
self.plt_loss = plt
# plot accuracy
def plot_acc(self):
plt.plot(self.history.history['accuracy'], color='brown')
plt.plot(self.history.history['val_accuracy'], color='orange')
plt.title('model accuracy, train = blue, test = red')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'])
self.plt_acc = plt
##############################################################################
# implementation
# create synthetic data
data = synth_data()
# have a look
data.tts()
data.plot_hist()
# implement the keras model
model = nn_model(h1 = 33, h2 = 11)
# epochs
eps = 21
# cross validation loop
for i in range(7):
# train test split and scale
data.tts()
# unscaled data
model.fit(yy_train = data.y_train, yy_test = data.y_test
, XX_train = data.X_train, XX_test = data.X_test
, n_epochs = eps)
plt.figure(num=1)
model.plot_loss()
plt.title('unscaled loss')
plt.figure(num=2)
model.plot_acc()
plt.title('unscaled acc')
# StandardScaler
model.fit(yy_train = data.y_train, yy_test = data.y_test
, XX_train = data.X_train_S_scaled, XX_test = data.X_test_S_scaled
, n_epochs = eps)
plt.figure(num=3)
model.plot_loss()
plt.title('Standard scaled loss')
plt.figure(num=4)
model.plot_acc()
plt.title('Standard scaled acc')
# Normalizer
model.fit(yy_train = data.y_train, yy_test = data.y_test
, XX_train = data.X_train_N_scaled, XX_test = data.X_test_N_scaled
, n_epochs = eps)
plt.figure(num=5)
model.plot_loss()
plt.title('Normalizer scaled loss')
plt.figure(num=6)
model.plot_acc()
plt.title('Normalizer scaled acc')
# Quantile Transformer (normalization)
model.fit(yy_train = data.y_train, yy_test = data.y_test
, XX_train = data.X_train_QN_scaled, XX_test = data.X_test_QN_scaled
, n_epochs = eps)
plt.figure(num=7)
model.plot_loss()
plt.title('QuantileTrans (norm) scaled loss')
plt.figure(num=8)
model.plot_acc()
plt.title('QuantileTrans (norm) scaled acc')
|
{"hexsha": "c56006cb37616bee24e30821ee280943df5b49ed", "size": 9070, "ext": "py", "lang": "Python", "max_stars_repo_path": "synth_classifiaction_NN_transform.py", "max_stars_repo_name": "adriennebohlmann/synthPlay", "max_stars_repo_head_hexsha": "1496119348369fe5d1485f37f632ef2e3e7623e6", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synth_classifiaction_NN_transform.py", "max_issues_repo_name": "adriennebohlmann/synthPlay", "max_issues_repo_head_hexsha": "1496119348369fe5d1485f37f632ef2e3e7623e6", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synth_classifiaction_NN_transform.py", "max_forks_repo_name": "adriennebohlmann/synthPlay", "max_forks_repo_head_hexsha": "1496119348369fe5d1485f37f632ef2e3e7623e6", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9339933993, "max_line_length": 143, "alphanum_fraction": 0.6147739802, "include": true, "reason": "import numpy", "num_tokens": 2147}
|
% SPDX-License-Identifier: MIT
% Copyright (c) 2017-2020 Forschungszentrum Juelich GmbH
% This code is licensed under MIT license (see the LICENSE file for details)
%
\documentclass[
t, % align text inside frame to t=top, b=bottom, c=center
10pt, % 8pt, 9pt, 10pt, 11pt, 12pt, 14pt, 17pt, 20pt available as text font
aspectratio=1610, % select your aspect ratio 4:3=43, 16:9=169, 16:10=1610
ngerman,
english,
%handout,
]{beamer}
\usetheme{Juelich}
\usepackage{babel}
\usepackage[utf8]{inputenc}
\usepackage{verbatim}
\newcommand{\templateversion}{18.10}
\newcommand{\tutorialversion}{18.10}
\title{\LaTeX{} Beamer Template}
\subtitle{Howto for {\tt beamer}-Slides v\templateversion}
\author{Template Version \templateversion~~\vrule width0.3pt~~Tutorial Version \tutorialversion}
\institute[My Institute]{My Institute}
\date{\today}
\titlegraphic{\includegraphics[width=\paperwidth]{placeholder}}
\begin{document}
% only use \maketitle to set your titlepage
\fzjset{title page=image}
\maketitle
\fzjset{title page=text}
\maketitle
\part{Introduction}
\makepart
\begin{frame}[label=introduction,fragile]
\frametitle{{\LaTeX} {\tt .sty} Files -- Version 18.10}
\begin{itemize}
\item Deprecate enabled compat mode for old FZJ colors.
\item Using the old colors requires the switch \verb!\fzjset{compat mode=enabled}!
\item Failing to do so, will result in missing colors and a broken build.
\end{itemize}
\end{frame}
\begin{frame}[label=introduction]
\frametitle{{\LaTeX} {\tt .sty} Files -- Version 18.09}
\begin{itemize}
\item first version with new corporate design
\item tutorial is not complete yet, will be updated periodically
\end{itemize}
\end{frame}
\input{part_installation}
\input{part_examples}
\input{part_fzjcolors}
\input{part_localization}
\input{part_tweaks}
\input{part_handouts}
\part{Aspect Ratio}
\begin{frame}[fragile]
\frametitle{Aspect Ratio}
The documentclass allows several ratios for the slide. Just change the variable \verb+aspectratio+.
\begin{itemize}
\item \verb+aspectratio=43+ gives classical 4:3 ratio
\item \verb+aspectratio=169+ gives classical 16:9 ratio
\item \verb+aspectratio=1610+ gives classical 16:10 ratio
\end{itemize}
\end{frame}
\part{Style}
\begin{frame}[fragile]
\frametitle{Style}
The design allows two styles.
\begin{block}{Style with Image}
\begin{itemize}
\item for the title page: \verb+\fzjset{title page=image}+
\item for the part page: \verb+\fzjset{section page=image}+
\item for the section page: \verb+\fzjset{section page=image}+
\end{itemize}
\end{block}
\begin{block}{Style with Text}
\begin{itemize}
\item for the title page: \verb+\fzjset{title page=text}+
\item for the part page: \verb+\fzjset{section page=text}+
\item for the section page: \verb+\fzjset{section page=text}+
\end{itemize}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Allcaps or Regular Title Fonts}
It is possible to switch the style of the font via the options
\begin{itemize}
\item \verb+\fzjset{title=allcaps}+ to set the title in allcaps
\item \verb+\fzjset{title=regular}+ to set the title regular
\item \verb+\fzjset{subtitle=allcaps}+ to set the title in allcaps for short text
\item \verb+\fzjset{subtitle=regular}+ to set the title regular and in a smaller font for long text
\item \verb+\fzjset{part=allcaps}+ to set the part in allcaps for short text
\item \verb+\fzjset{part=regular}+ to set the part regular and in a smaller font for long text
\item \verb+\fzjset{frametitle=allcaps}+ to set the frametitle in allcaps for short text
\item \verb+\fzjset{frametitle=regular}+ to set the frametitle regular font for long text
\end{itemize}
\end{frame}
{\setbeamertemplate{background}{\begin{tikzpicture}[overlay, remember picture]
\draw ([yshift=-0.05\paperheight, xshift=0.05\paperheight]current page.north west) |- ([yshift=0.05\paperheight, xshift=-0.05\paperheight]current page.south east) |- cycle;
\end{tikzpicture}
}
\begin{frame}
\frametitle{Margins, Sizes}
This slide has an outline on the background canvas
\begin{itemize}
\item One can see the alignments in a normal list
\end{itemize}
\begin{block}{And also}
\begin{itemize}
\item for lists
\item in a block
\end{itemize}
\end{block}
\end{frame}
}
\fzjset{title=regular}
\fzjset{title page=text}
\subtitle{Now in text only mode and regular text}
\maketitle
\fzjset{title page=image}
\subtitle{Now back in image mode}
\maketitle
\part{This is a part page}
\fzjset{part page=text}
\makepart
\section{This is a section page}
\fzjset{section page=text}
\subtitle{This is a section page}
\makesection
\fzjset{frametitle=allcaps}
\begin{frame}
\frametitle{Title in allcaps}
\end{frame}
\fzjset{frametitle=regular}
\begin{frame}
\frametitle{Title in a regular style}
\end{frame}
\section{Bugs}
\begin{frame}[fragile,label=bugs]
\frametitle{Fixed Bugs}
\begin{block}{Periodically}
The \verb+.zip+ archive will be updated periodically for bug fixes. The name and URL of the archive will be the same.
\end{block}
\begin{block}{More Pitfalls/bugs?}
Please report them to {\tt i.kabadshow@fz-juelich.de}
\end{block}
\end{frame}
\part{Extensions}
\makepart
\begin{frame}[fragile]
\frametitle{Poster with \LaTeX -Beamer}
To create scientific posters with {\LaTeX} the \verb!beamerposter! extension
can be used. Template will be provided soon.
\begin{block}{More Information at}
\small
\url{http://www-i6.informatik.rwth-aachen.de/~dreuw/latexbeamerposter.php}
\end{block}
\end{frame}
\part{Contact Information}
\begin{frame}[c,label=contact]
\frametitle{Contact}
\begin{center}
\Large \emph{Thank you for using this template!}
\end{center}
\begin{block}{Enhance Missing Functionality Yourself!}
Please send your enhancements along with a short description to {\tt i.kabadshow@fz-juelich.de}
\end{block}
\begin{block}{Report Problems}
Please report problems with the template or uncommon behavior to {\tt i.kabadshow@fz-juelich.de}
\end{block}
\end{frame}
\end{document}
|
{"hexsha": "a4c298e52fec0255418591dd6c35ae38ff6d5801", "size": 6499, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tutorial/tutorial.tex", "max_stars_repo_name": "SFKV/Hackathon-Talks", "max_stars_repo_head_hexsha": "ca7040b8877e934ad030bd5d29fa7d449120706c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tutorial/tutorial.tex", "max_issues_repo_name": "SFKV/Hackathon-Talks", "max_issues_repo_head_hexsha": "ca7040b8877e934ad030bd5d29fa7d449120706c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorial/tutorial.tex", "max_forks_repo_name": "SFKV/Hackathon-Talks", "max_forks_repo_head_hexsha": "ca7040b8877e934ad030bd5d29fa7d449120706c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6582914573, "max_line_length": 174, "alphanum_fraction": 0.6925680874, "num_tokens": 1931}
|
/* bst/gsl_bst_avl.h
*
* Copyright (C) 2018 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __GSL_BST_AVL_H__
#define __GSL_BST_AVL_H__
#include <gsl/gsl_math.h>
#undef __BEGIN_DECLS
#undef __END_DECLS
#ifdef __cplusplus
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
#else
# define __BEGIN_DECLS /* empty */
# define __END_DECLS /* empty */
#endif
__BEGIN_DECLS
#ifndef GSL_BST_AVL_MAX_HEIGHT
#define GSL_BST_AVL_MAX_HEIGHT 32
#endif
/* AVL node */
struct gsl_bst_avl_node
{
struct gsl_bst_avl_node *avl_link[2]; /* subtrees */
void *avl_data; /* pointer to data */
signed char avl_balance; /* balance factor */
};
/* tree data structure */
typedef struct
{
struct gsl_bst_avl_node *avl_root; /* tree's root */
gsl_bst_cmp_function *avl_compare; /* comparison function */
void *avl_param; /* extra argument to |avl_compare| */
const gsl_bst_allocator *avl_alloc; /* memory allocator */
size_t avl_count; /* number of items in tree */
unsigned long avl_generation; /* generation number */
} gsl_bst_avl_table;
/* AVL traverser structure */
typedef struct
{
const gsl_bst_avl_table *avl_table; /* tree being traversed */
struct gsl_bst_avl_node *avl_node; /* current node in tree */
struct gsl_bst_avl_node *avl_stack[GSL_BST_AVL_MAX_HEIGHT]; /* all the nodes above |avl_node| */
size_t avl_height; /* number of nodes in |avl_parent| */
unsigned long avl_generation; /* generation number */
} gsl_bst_avl_traverser;
__END_DECLS
#endif /* __GSL_BST_AVL_H__ */
|
{"hexsha": "6560777ce27a479a16e81f1842ea1e9d67cf318d", "size": 2461, "ext": "h", "lang": "C", "max_stars_repo_path": "include/gsl/gsl_bst_avl.h", "max_stars_repo_name": "vinej/sml", "max_stars_repo_head_hexsha": "115c007926ca80d51a37cdf887b5252338d8bc8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/gsl/gsl_bst_avl.h", "max_issues_repo_name": "vinej/sml", "max_issues_repo_head_hexsha": "115c007926ca80d51a37cdf887b5252338d8bc8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/gsl/gsl_bst_avl.h", "max_forks_repo_name": "vinej/sml", "max_forks_repo_head_hexsha": "115c007926ca80d51a37cdf887b5252338d8bc8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7123287671, "max_line_length": 99, "alphanum_fraction": 0.6663957741, "num_tokens": 594}
|
\section{Use of Recursive Functions}
\begin{lstlisting}[language=Haskell]
module ProgExercises.FS_2019_ProgExer03Prob_V01 where
-- Develop some functions using recursion over lists.
-- Higher-order functions are not required yet.
toBeImplemented = undefined
--delDups deletes duplicates from a list
testDelDups =
delDups [1,2,3,4,5] == [1,2,3,4,5] &&
delDups [1,1,1,1,1] == [1] &&
(delDups [1,2,2,4,1] == [1,2,4] || delDups [1,2,2,4,1] == [2,4,1]) &&
delDups [] == ([] :: [Int])
delDups :: Eq a => [a] -> [a]
delDups [] = []
delDups (x : xs)
| x `elem` xs = delDups xs
| otherwise = x : delDups xs
-- removeEachSnd removes each second element from a list.
testRemoveEachSnd =
removeEachSnd [1,2,3,4,5,6,7,8] == [1,3,5,7] &&
removeEachSnd [1,2,3,4,5,6,7] == [1,3,5,7] &&
removeEachSnd [1] == [1] &&
removeEachSnd [] == ([] :: [Int])
removeEachSnd :: [a] -> [a]
removeEachSnd [] = []
removeEachSnd (a : []) = [a]
removeEachSnd (x : xs : xz) = x : removeEachSnd xz
-- makePairs pairs adjacent elements of a list
testMakePairs =
makePairs [1,2,3,4,5,6,7,8] == [(1,2),(3,4),(5,6),(7,8)] &&
makePairs [1,2,3,4,5,6,7] == [(1,2),(3,4),(5,6)] &&
makePairs [1,2] == [(1,2)] &&
makePairs [1] == [] &&
makePairs [] == ([] :: [(Int,Int)])
makePairs :: [a] -> [(a, a)]
makePairs [] = []
makePairs (a : []) = []
makePairs (x : xs : xz) = (x, xs) : makePairs xz
testMakePairsV2 =
makePairsV2 [1,2,3,4,5,6,7,8] == [(1,2),(3,4),(5,6),(7,8)] &&
makePairsV2 [1,2,3,4,5,6,7] == [(1,2),(3,4),(5,6)] &&
makePairsV2 [1,2] == [(1,2)] &&
makePairsV2 [1] == [] &&
makePairsV2 [] == ([] :: [(Int,Int)])
makePairsV2 :: [a] -> [(a, a)]
makePairsV2 [] = []
makePairsV2 xs@(_ : ys) = removeEachSnd (zip xs ys)
-- halve divides a list into two lists containing each second element,
-- the first list beginning with the first,
-- the second list beginning with the second
testHalve =
halve [1,2,3,4,5,6] == ([1,3,5], [2,4,6]) &&
halve [1,2,3,4,5] == ([1,3,5], [2,4]) &&
halve [1] == ([1], []) &&
halve [] == ([], [] :: [Int])
halve :: [a] -> ([a], [a])
halve xs = h xs [] []
where
h (x1 : x2 : xs) accu1 accu2 = h xs (x1 : accu1) (x2 : accu2)
h [x] accu1 accu2 = h [] (x : accu1) accu2
h [] accu1 accu2 = (reverse accu1, reverse accu2)
-- divideList divides a list into chunks of length n each, except
-- of the last chunk, which might be shorter
-- Precondition:
-- n > 0
-- Theorem:
-- For all n > 0 and all xs: concat (divideList n xs) == xs
testDivideList =
divideList 3 [1 .. 10] == [[1,2,3],[4,5,6],[7,8,9],[10]] &&
divideList 3 [1 .. 9] == [[1,2,3],[4,5,6],[7,8,9]] &&
divideList 3 [1] == [[1]] &&
divideList 3 [] == ([] :: [[Int]])
divideList :: Int -> [a] -> [[a]]
divideList _ [] = []
divideList n xs = take n xs : divideList n (drop n xs)
\end{lstlisting}
\clearpage
|
{"hexsha": "5c4e4ccdfe230ab8babd60dabaa91edfed8ca0de", "size": 3023, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "TSM_AdvPrPa/Excercises/Haskell/06_RecursiveFunctions.tex", "max_stars_repo_name": "nortismo/mse-documentations", "max_stars_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59", "max_stars_repo_licenses": ["Beerware"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TSM_AdvPrPa/Excercises/Haskell/06_RecursiveFunctions.tex", "max_issues_repo_name": "nortismo/mse-documentations", "max_issues_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59", "max_issues_repo_licenses": ["Beerware"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TSM_AdvPrPa/Excercises/Haskell/06_RecursiveFunctions.tex", "max_forks_repo_name": "nortismo/mse-documentations", "max_forks_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59", "max_forks_repo_licenses": ["Beerware"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-15T07:10:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-15T07:10:24.000Z", "avg_line_length": 31.4895833333, "max_line_length": 71, "alphanum_fraction": 0.5256367847, "num_tokens": 1182}
|
import pandas as pd
pd.options.mode.chained_assignment = None
from pkg_resources import parse_version
import warnings
from ete3 import NCBITaxa
import numpy as np
import argparse
import tarfile
import re
import math
# Helper function to import tables
def safely_read_csv(path, **kwargs):
try:
return pd.read_csv(path, **kwargs)
except pd.errors.EmptyDataError:
pass
class pandasVersionWarning(UserWarning):
pass
if parse_version(pd.__version__) < parse_version("0.24.0"):
warnings.warn(
"Pandas version is {} and taxon ids will be floating point numbers in results\nand column order is not preserved. To avoid this, please install pandas>=0.24.0.".format(
pd.__version__
),
pandasVersionWarning,
)
# Create BLAST BD and query classes
class BlastDB:
def __init__(self, dbfile=None):
self.ncbi = NCBITaxa(dbfile=dbfile)
def get_lineage(self, taxid):
return self.ncbi.get_lineage(taxid)
def get_rank(self, taxids):
return self.ncbi.get_rank(taxids)
def get_topology(self, lineage):
return self.ncbi.get_topology(lineage)
def translate_to_names(self, taxids):
return self.ncbi.translate_to_names(taxids)
class BlastTaxonomy(BlastDB):
def __init__(
self,
results,
query_key="qseqid",
taxid_key="staxid",
pp_sway=1,
ranks_of_interest=None,
dbfile=None,
):
BlastDB.__init__(self, dbfile)
self.query_key = query_key
self.taxid_key = taxid_key
self.by_query = results.groupby(self.query_key)
self.pp_sway = pp_sway
if ranks_of_interest:
self.ranks_of_interest = ranks_of_interest
else:
self.ranks_of_interest = [
"superkingdom",
"order",
"family",
"genus",
"species",
]
self.unidentified = 32644
def get_consensus_taxonomy(self):
consensus_taxonomy = []
for query, hits in self.by_query:
if hits.shape[0] > 1:
# Keep only one top hit from each taxon
hits["pident_rank"] = hits.groupby([self.taxid_key])["pident"].rank(
method="first", ascending=False
)
hits = hits[hits["pident_rank"] == 1]
# Try to remove unidentified taxa
hits["name"] = hits[self.taxid_key].apply(
lambda x: "unidentified"
if math.isnan(x)
else self.translate_to_names([x])[0]
)
unidentified = hits["name"].apply(
lambda x: bool(re.search("unident", x))
)
identified = np.invert(unidentified)
if sum(identified) >= 1:
# Keeping only identified taxids
hits = hits[identified]
# Filtering by percent identity
pident_threshold = hits["pident"].aggregate("max") - self.pp_sway
within = hits["pident"].apply(lambda x: x >= pident_threshold)
hits_filtered = hits[within]
# Getting consensus taxonomy
taxlist = hits_filtered[self.taxid_key].tolist()
if len(taxlist) > 1:
tree = self.get_topology(taxlist)
for node in tree.traverse():
if node.is_root():
consensus = [node.name]
else:
consensus = taxlist
else:
consensus = [self.unidentified]
else:
consensus = hits[self.taxid_key].tolist()
lineage = self.get_lineage(*consensus)
ranks = self.get_rank(lineage)
con_lin = {
rank: id for id, rank in ranks.items() if rank in self.ranks_of_interest
}
lin_names = {
rank + "_name": self.translate_to_names([id])[0]
for rank, id in con_lin.items()
}
consensus_taxonomy.append(
dict(
{
"query": query,
"consensus": consensus[0],
"pident": hits["pident"].aggregate("max"),
"hits": hits.shape[0],
},
**con_lin,
**lin_names
)
)
consensus_taxonomy = pd.DataFrame(consensus_taxonomy)
# Convert tax_ids to integers
ranks_avail = [
i for i in consensus_taxonomy.columns if i in self.ranks_of_interest
]
consensus_taxonomy[ranks_avail] = consensus_taxonomy[ranks_avail].apply(
lambda x: pd.Series(x, dtype="Int64")
)
return consensus_taxonomy
def blast_taxonomy(input, output, sep="\t", **kwargs):
# Import file with BLAST results
run = []
for file in input:
if tarfile.is_tarfile(file):
with tarfile.open(file, "r:*") as tar:
splits = []
for member in tar.getmembers():
m = tar.extractfile(member)
splits.append(safely_read_csv(m, sep=sep))
run.append(pd.concat(splits))
else:
run.append(safely_read_csv(file, sep=sep))
if all(v is None for v in run):
consensus_taxonomy = pd.DataFrame()
else:
results = pd.concat(run, sort=False)
bt = BlastTaxonomy(results, **kwargs)
consensus_taxonomy = bt.get_consensus_taxonomy()
with open(output, "w") as outfile:
consensus_taxonomy.to_csv(outfile, index=False)
if __name__ == "__main__":
blast_taxonomy(snakemake.input, snakemake.output[0], **snakemake.params)
|
{"hexsha": "6beb3bd75e5066b6b53e5f18534ba109b21c4b3c", "size": 5972, "ext": "py", "lang": "Python", "max_stars_repo_path": "blast/taxonomy/wrapper.py", "max_stars_repo_name": "avilab/vs-wrappers", "max_stars_repo_head_hexsha": "65524b3566969da7909e0d08c423b6eecadea039", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-25T08:20:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-25T08:20:14.000Z", "max_issues_repo_path": "blast/taxonomy/wrapper.py", "max_issues_repo_name": "avilab/vs-wrappers", "max_issues_repo_head_hexsha": "65524b3566969da7909e0d08c423b6eecadea039", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-12-28T08:40:03.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-22T13:28:34.000Z", "max_forks_repo_path": "blast/taxonomy/wrapper.py", "max_forks_repo_name": "avilab/vs-wrappers", "max_forks_repo_head_hexsha": "65524b3566969da7909e0d08c423b6eecadea039", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3631284916, "max_line_length": 176, "alphanum_fraction": 0.542196919, "include": true, "reason": "import numpy", "num_tokens": 1257}
|
#================================AxionFuncs.py=================================#
# Written by C. O'Hare
# Contains:
# Functions for calculating Solar Axion fluxes for photon & electron coupling
# Functions for calculating X-ray spectra in a haloscope
# Functions to smear X-ray spectra by an angular resolution
# Script to generate binned X-ray data for given flux+experiment
#==============================================================================#
from numpy import pi, sqrt, exp, zeros, size, shape
from numpy import sinc, linspace, trapz, loadtxt, interp
from scipy.integrate import cumtrapz, quad
#==============================================================================#
def AxionFlux_Primakoff(gag,E):
# Parameterised differential Axion Flux in [m^-1 yr^-1 keV^-1]
# gag = Axion-photon coupling in GeV^-1
# E = Axion/X-ray energy in keV
norm = 6.02e10*(gag/1e-10)**2.0
return norm*((E**2.481)/exp(E/1.205))
def AxionFlux_Axioelectron(gae,E):
# Differential Axion Flux from the axion electron coupling
# Flux = AxionRecomb+Compton+Bremsstrahlung
# column 1 = Energy [keV]
# column 2 = Axion Flux 1/[10^19 keV cm^2 day]
# Output: flux in cm^-1 s^-1 keV^-1
# gae = Axion-electron coupling in GeV^-1
# E = Axion/Xray energy in keV
data = loadtxt('gaeflux.txt')
E1 = data[:,0]
F1 = data[:,1]
norm = 1e19*(gae/(0.511e-10))**2.0/(3600*24)
Flux = interp(E,E1,F1)*norm
return Flux
def AxionFlux_Compton(gae,E):
# Parameterised Compton axion flux (unused in paper)
norm = 13.314e6*(gae/1e-13)**2.0
return norm*((E**2.987)/exp(E*0.776))
def AxionFlux_Brem(gae,E):
# Parameterised Bremsstrahlung axion flux (unused in paper)
norm = 26.311e8*(gae/1e-13)**2.0
return norm*E*exp(-0.77*E)/(1+0.667*E**1.278)
#==============================================================================#
#==============================================================================#
def PhotonNumber_Primakoff(Flux_scale,E,m_a,\
Bfield=2.5,Exposure=1.5,Length=20.0,\
N_bores=8,BoreDiameter=60.0,eps_D=0.7,eps_T=0.8):
# differential Xray count dN/dE (in keV^-1) for axionphoton flux
# (Optional) Flux_scale = scaling for normalisation (set to 1 for units used in paper)
# E = Xray energy (keV)
# m_a = axion mass (eV)
norm,normq = NgammaNorm(Bfield,Exposure,Length,N_bores,BoreDiameter,eps_D,eps_T)
norm = Flux_scale*norm
return norm*((E**2.481)/exp(E/1.205))*(sinc(normq/pi*m_a**2.0/E))**2.0 # keV^-1
def PhotonNumber_Electron(Flux,E,m_a,\
Bfield=2.5,Exposure=1.5,Length=20.0,\
N_bores=8,BoreDiameter=60.0,eps_D=0.7,eps_T=0.8):
# differential Xray count dN/dE (in keV^-1) for axionelectron flux
# Flux_scale = scaling for normalisation (set to 1 for units used in paper)
# E = Xray energy (keV)
# m_a = axion mass (eV)
norm,normq = NgammaNorm(Bfield,Exposure,Length,N_bores,BoreDiameter,eps_D,eps_T)
norm = norm/(6.02e10)
return norm*Flux*(sinc(normq/pi*m_a**2.0/E))**2.0 # keV^-1
def NgammaNorm(Bfield,Exposure,Length,N_bores,BoreDiameter,eps_D,eps_T):
# Nnorm = normalisation of overall photon number to get it in keV^-1 and constant that enters into t
S_cm = N_bores*pi*(BoreDiameter/2.0)**2.0 # cm^2
L_eV = Length/1.97e-7 # eV^-1
t_secs = Exposure*3600*24*365 # s
B = Bfield*(1e-19*195)
norm = 6.02e10*t_secs*S_cm*eps_D*eps_T*(B*L_eV/2.0)**2.0
normq = L_eV/(4*1000)
return norm,normq
#==============================================================================#
#==============================================================================#
def smear(dN,E,E_res):
# Smear spectrum dN(E) by energy resolution Eres
# dN = spectrum (arbitrary units)
# E = Energies defining dN
# E_res = Energy resolution to smear by
n = size(dN)
Norm = 1.0/sqrt(2*pi*E_res**2.0)
dN_smeared = zeros(shape=n)
for i in range(0,n):
# Each new energy is the full spectrum convolved by a gaussian
K = Norm*exp(-(E-E[i])**2.0/(2*E_res**2.0))
dN_smeared[i] = trapz(K*dN,E)
return dN_smeared
def smearFast(dN,E,E_res):
# Does the same as 'smear' but is faster and less accurate for E_res>100 eV
n = size(dN)
dE = E[1]-E[0]
irange = int(3*E_res/dE)
Norm = 1.0/sqrt(2*pi*E_res**2.0)
dN_smeared = zeros(shape=n)
for i in range(0,n):
i1 = max(0,i-irange)
i2 = min(n-1,i+irange)
Eint = E[i1:i2]
K = Norm*exp(-(Eint-E[i])**2.0/(2*E_res**2.0))
dN_smeared[i] = trapz(K*dN[i1:i2],Eint)
return dN_smeared
#==============================================================================#
#==============================================================================#
def EnergyBins(E_min,E_max,nfine,nE_bins):
# Define energy array for doing the trapz integration below
# E_min = energy threshold
# E_max = max energy
# nfine = number of energies within one bin to integrate over
# nE_bins = number of energy bins between E_min and E_max
E_bin_edges = linspace(E_min,E_max,nE_bins+1)
E_bw = (E_max-E_min)/(nE_bins+1.0)
E_bins = (E_bin_edges[1:]+E_bin_edges[:-1])/2
Ei = zeros(shape=(nE_bins*nfine))
for i in range(0,nE_bins):
Ei[i*nfine:(i+1)*nfine] = linspace(E_bin_edges[i],E_bin_edges[i+1]-E_bw/nfine,nfine)
return Ei,E_bins
def BinnedPhotonNumberTable(m_vals,E_min,E_max,nE_bins,coupling='Photon',\
nfine=100,res_on=False,\
Bfield=2.5,Exposure=1.5,Length=20.0,\
N_bores=8,BoreDiameter=60.0,eps_D=0.7,eps_T=0.8):
# Generate tabulated values of data for a range of axion masses
# OUTPUT: R1_tab = Tabulated values of the binned Xray counts (columns) vs axion mass (rows)
# R0 = massless data
# E_bins = centers of energy bins
# INPUT: m_vals = masses to add to the tabulation
# E_min = threshold energy (also resolution if res_on=True)
# E_max = maximum energy
# nE_bins = number of energy bins
# coupling = 'Photon' or 'Electron' for g_ag or g_ae
# nfine = number of points to integrate over within one bin (controls accuracy)
# res_on = True/False, whether to do energy resolution integral or not
nm = size(m_vals)
R1_tab = zeros(shape=(nE_bins,nm))
Ei,E_bins = EnergyBins(E_min,E_max,nfine,nE_bins)
if coupling=='Electron':
Flux = AxionFlux_Axioelectron(1e-10,Ei)
dN_func = PhotonNumber_Electron
else:
Flux = 1.0
dN_func = PhotonNumber_Primakoff
# Tabulate m != 0 rates
for j in range(0,nm):
dN = dN_func(Flux,Ei,m_vals[j],\
Bfield,Exposure,Length,\
N_bores,BoreDiameter,eps_D,eps_T)
if res_on:
dN = smear(dN,Ei,E_min)
for i in range(0,nE_bins):
Ebin = Ei[i*nfine:(i+1)*nfine]
dNbin = dN[i*nfine:(i+1)*nfine]
R1_tab[i,j] = sum(0.5*(Ebin[1:]-Ebin[0:-1])*(dNbin[1:]+dNbin[0:-1]))
# Get m = 0 rate
R0 = zeros(shape=(nE_bins))
dN = dN_func(Flux,Ei,0.0)
if res_on:
dN = smear(dN,Ei,E_min)
for i in range(0,nE_bins):
Ebin = Ei[i*nfine:(i+1)*nfine]
dNbin = dN[i*nfine:(i+1)*nfine]
R0[i] = sum(0.5*(Ebin[1:]-Ebin[0:-1])*(dNbin[1:]+dNbin[0:-1]))
return E_bins,R1_tab,R0
#==============================================================================#
|
{"hexsha": "4bc77d7318342c7d906e522b5cd4ed33bf434843", "size": 7611, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/AxionFuncs.py", "max_stars_repo_name": "cajohare/IAXOmass", "max_stars_repo_head_hexsha": "978306f2a504f5f05562a4f5fcecf55b789e8289", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-19T11:28:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-28T16:22:12.000Z", "max_issues_repo_path": "code/AxionFuncs.py", "max_issues_repo_name": "cajohare/IAXOmass", "max_issues_repo_head_hexsha": "978306f2a504f5f05562a4f5fcecf55b789e8289", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/AxionFuncs.py", "max_forks_repo_name": "cajohare/IAXOmass", "max_forks_repo_head_hexsha": "978306f2a504f5f05562a4f5fcecf55b789e8289", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-19T11:28:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-19T11:28:17.000Z", "avg_line_length": 40.2698412698, "max_line_length": 104, "alphanum_fraction": 0.5595848115, "include": true, "reason": "from numpy,from scipy", "num_tokens": 2391}
|
\begin{publications}
\section*{已发表论文}
\begin{enumerate}
\item
\textbf{Xuda~Zhou}, Zidong~Du, Shijin~Zhang, Lei~Zhang, Huiying~Lan, Shaoli~Liu, Ling~Li, Qi~Guo, Tianshi~Chen, Yunji~Chen: Addressing Sparsity in Deep Neural Networks. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2018.
\item
\textbf{Xuda~Zhou}, Zidong~Du, Qi Guo, Chengsi Liu, Chao Wang, Xuehai Zhou, Ling Li, Tianshi Chen, Yunji Chen: Cambricon-S, Addressing Irregularity in Sparse Neural Networks Through A Cooperative Software/Hardware Approach. MICRO 2018.
\end{enumerate}
\end{publications}
|
{"hexsha": "67f650a18cb36e036fa8ae0791b0b6f9f0d8bbaf", "size": 598, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/publications.tex", "max_stars_repo_name": "ustcanycall/graduate", "max_stars_repo_head_hexsha": "4c92658dfd4069b3697b1590a0b2b9b61ef35019", "max_stars_repo_licenses": ["LPPL-1.3c"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/publications.tex", "max_issues_repo_name": "ustcanycall/graduate", "max_issues_repo_head_hexsha": "4c92658dfd4069b3697b1590a0b2b9b61ef35019", "max_issues_repo_licenses": ["LPPL-1.3c"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/publications.tex", "max_forks_repo_name": "ustcanycall/graduate", "max_forks_repo_head_hexsha": "4c92658dfd4069b3697b1590a0b2b9b61ef35019", "max_forks_repo_licenses": ["LPPL-1.3c"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.0, "max_line_length": 253, "alphanum_fraction": 0.7775919732, "num_tokens": 201}
|
import numpy as np
import matplotlib.pyplot as plt
t = np.linspace(0, 2 * np.pi, 150)
x1, y1 = np.cos(t), np.sin(t)
x2, y2 = 2 * x1, 2 * y1
colors = ['darkred', 'darkgreen']
fig, ax = plt.subplots()
ax.plot(x1, y1, color=colors[0], label='Inner', linewidth=3)
ax.plot(x2, y2, color=colors[1], label='Outer', linewidth=3)
ax.legend()
ax.axis('equal')
ax.margins(0.05)
plt.show()
|
{"hexsha": "2dda7635941960c988c96a63a49aba3f21b24b63", "size": 383, "ext": "py", "lang": "Python", "max_stars_repo_path": "anatomy-of-matplotlib-scipyconf-2018/solutions/4.1-legends_and_scaling.py", "max_stars_repo_name": "dlimpid/np-mpl-pd-nims-moderator-2019-04", "max_stars_repo_head_hexsha": "7b48116d9e4449e05b46b5f7abb651eb67a9215d", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-07T02:21:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-07T02:21:40.000Z", "max_issues_repo_path": "anatomy-of-matplotlib-scipyconf-2018/solutions/4.1-legends_and_scaling.py", "max_issues_repo_name": "dlimpid/np-mpl-pd-nims-moderator-2019-04", "max_issues_repo_head_hexsha": "7b48116d9e4449e05b46b5f7abb651eb67a9215d", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "anatomy-of-matplotlib-scipyconf-2018/solutions/4.1-legends_and_scaling.py", "max_forks_repo_name": "dlimpid/np-mpl-pd-nims-moderator-2019-04", "max_forks_repo_head_hexsha": "7b48116d9e4449e05b46b5f7abb651eb67a9215d", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-07T02:21:43.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-07T02:21:43.000Z", "avg_line_length": 20.1578947368, "max_line_length": 60, "alphanum_fraction": 0.6475195822, "include": true, "reason": "import numpy", "num_tokens": 139}
|
# -*- coding: utf-8 -*-
"""
Objective: create an airfoil with a leading edge restriction, same upper length
restriction, othogonal upper spars and constant thicknesses in four places
Created on Mon Oct 17 10:36:34 2016
@author: Pedro
"""
from __future__ import print_function
import os
import math
import numpy as np
from numpy.linalg import inv
from aeropy.airfoil_module import CST
from aeropy.CST.module_2D import *
# Just as quick trick, to make upper morph I just mirror the image in regards to x
inverted = False
# Defines if basckwards or forwards morphing
morphing_direction = 'forwards'
def calculate_c_baseline(c_L, Au_C, Au_L, deltaz,l_LE=0, eps_LE = 0, psi_P_u1 = 0):
"""Equations in the New_CST.pdf. Calculates the upper chord in order for
the cruise and landing airfoils ot have the same length."""
def integrand(psi, Au, delta_xi ):
return np.sqrt(1 + dxi_u(psi, Au, delta_xi)**2)
def f(c_C):
"""Function dependent of c_C and that outputs c_C."""
y_C, err = quad(integrand, 0, 1, args=(Au_C, deltaz/c_C))
y_L, err = quad(integrand, psi_P_u1, 1, args=(Au_L, deltaz/c_L))
y_LE, err = quad(integrand, 0, psi_P_u1, args=(Au_L, deltaz/c_L))
return c_L*((1-eps_LE)*(l_LE+y_LE)+y_L)/y_C
c_C = optimize.fixed_point(f, [c_L])
#In case the calculated chord is really close to the original, but the
#algorithm was not able to make them equal
if abs(c_L - c_C) < 1e-7:
return c_L
#The output is an array so it needs the extra [0]
return c_C[0]
def calculate_psi_goal(psi_baseline, Au_baseline, Au_goal, deltaz,
c_baseline, c_goal, l_LE, eps_LE, psi_1):
"""Find the value for psi that has the same location w on the upper
surface of the goal as psi_baseline on the upper surface of the
baseline"""
def integrand(psi_baseline, Au, deltaz, c ):
return c*np.sqrt(1 + dxi_u(psi_baseline, Au, deltaz/c)**2)
def equation(psi_goal, Au_goal, deltaz, c):
if psi_goal != psi_1:
L_baseline, err = quad(integrand, psi_1, psi_baseline, args=(Au_baseline, deltaz,
c_baseline))
else:
L_baseline = 0
L_LE, err = quad(integrand, 0, psi_1, args=(Au_baseline, deltaz,
c_baseline))
y, err = quad(integrand, 0, psi_goal, args=(Au_goal, deltaz, c))
return y - (1-eps_LE)*(L_LE+c_baseline*l_LE) - L_baseline
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = fsolve(equation, psi_baseline, args=(Au_goal, deltaz,
c_goal))
return y[0]
def calculate_A0_moving_LE(psi_baseline, psi_goal_0, Au_baseline, Au_goal, deltaz,
c_baseline, l_LE, eps_LE):
"""Find the value for A_P0^c that has the same arc length for the first bay
as for the parent."""
def integrand(psi_baseline, Al, deltaz, c ):
return c*np.sqrt(1 + dxi_u(psi_baseline, Al, deltaz/c)**2)
def equation(A0, L_baseline, Au_goal, deltaz):
Au_goal[0] = A0
c = calculate_c_baseline(c_P, Au_goal, Au_baseline, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
y, err = quad(integrand, 0, psi_goal_0, args=(Au_goal, deltaz, c))
print('y', y, y - (1-eps_LE)*L_baseline, A0, c)
return y - (1-eps_LE)*(L_baseline - c*l_LE)
L_baseline, err = quad(integrand, 0, psi_baseline[0], args=(Au_baseline, deltaz,
c_baseline))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = fsolve(equation, Au_goal[0], args=(L_baseline, Au_goal, deltaz))
return y[0]
def calculate_spar_direction(psi_baseline, Au_baseline, Au_goal, deltaz, c_goal, l_LE, eps_LE, psi_spars):
"""Calculate the direction of the spar component based on a location
at the upper surface for the cruise airfoil."""
# Calculate cruise chord
c_baseline = calculate_c_baseline(c_goal, Au_baseline, Au_goal, deltaz, l_LE, eps_LE, psi_spars[0])
# Calculate psi at goal arifoil
psi_goal = calculate_psi_goal(psi_baseline, Au_baseline, Au_goal, deltaz,
c_baseline, c_goal, l_LE, eps_LE, psi_spars[0])
# non-normalized direction
s = np.zeros(2)
t = np.zeros(2)
# t_norm = np.sqrt(1 + (dxi_u(psi_goal, Au_goal[0], Au_goal[1], deltaz))**2)
cbeta = calculate_cbeta(psi_baseline, Au_baseline,
deltaz/c_baseline)
sbeta = np.sqrt(1-cbeta**2)
t[0] = 1
t[1] = dxi_u(psi_goal, Au_goal, deltaz/c_goal)
t_norm = np.sqrt(t[0]**2 + t[1]**2)
t = (1./t_norm)*t
# s[0] = t_norm*cbeta - dxi_u(psi_goal, Au_goal[0], Au_goal[1], deltaz)
# s[1] = 1
s[1] = t[1]*cbeta + t[0]*sbeta
s[0] = (cbeta - s[1]*t[1])/t[0]
return s
#==============================================================================
# Calculate dependent shape function parameters
#==============================================================================
def calculate_dependent_shape_coefficients(Au_C_1_to_n,
psi_spars, Au_P, Al_P, deltaz, c_P,
morphing = 'backwards', l_LE=0, eps_LE=0):
"""Calculate dependent shape coefficients for children configuration for a 4 order
Bernstein polynomial and return the children upper, lower shape
coefficients, children chord and spar thicknesses. _P denotes parent parameters"""
def calculate_AC_u0(AC_u0, constant_LE = True):
Au_C = [AC_u0] + Au_C_1_to_n
if constant_LE:
return np.sqrt(c_P/c_C)*Au_P[0]
else:
return calculate_A0_moving_LE(psi_spars, psi_lower_children[0], Au_P, Au_C, deltaz,
c_P, l_LE, eps_LE)
# Bersntein Polynomial
def K(r,n):
K=math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
# Bernstein Polynomial order
# In case of leading edge radius constraint
n = len(Au_C_1_to_n)
# Find upper shape coefficient though iterative method since Au_0 is unknown
# via fixed point iteration
#AC_u0 = optimize.fixed_point(calculate_AC_u0, Au_P[0])
#print AC_u0
error = 9999
psi_lower_children = psi_spars
Au_C = [Au_P[0]] + Au_C_1_to_n # [Au_P[0]] +
former_chord = c_P
while error > 1e-5:
former_Au_C = []
for i in range(len(Au_C)):
former_Au_C.append(Au_C[i])
# Because the output is an array, need the extra [0]
error_A0 = 999
# Now that AC_u0 is known we can calculate the actual chord and AC_l0
# c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
Au_C[0] = calculate_AC_u0(Au_C[0], constant_LE = False)
Al_C0 = Au_C[0]
c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
#Al_C0 = Au_C[0]
# print '0 lower shape coefficient: ',AC_l0
# Calculate thicknessed and tensor B for the constraint linear system problem
spar_thicknesses = []
if morphing == 'forwards':
f = np.zeros((n,1))
# psi/xi coordinates for lower surface of the children configuration
psi_lower_children = []
xi_lower_children = []
xi_upper_children = []
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_upper_children = []
for j in range(len(psi_spars)):
print(j)
psi_upper_children.append(calculate_psi_goal(psi_spars[j], Au_P, Au_C, deltaz,
c_P, c_C,l_LE, eps_LE, psi_spars[0]))
# Calculate xi for upper children. Do not care about lower so just gave it random shape coefficients
xi_upper_children = CST(psi_upper_children, 1., deltasz= [deltaz/2./c_C, deltaz/2./c_C], Al= Au_C, Au =Au_C)
xi_upper_children = xi_upper_children['u']
# print xi_upper_children
#Debugging section
# x = np.linspace(0,1)
# y = CST(x, 1., deltasz= [deltaz/2./c_C, deltaz/2./c_C], Al= Au_C, Au =Au_C)
# plt.plot(x,y['u'])
# plt.scatter(psi_upper_children, xi_upper_children)
# plt.grid()
# plt.show()
# BREAK
print(Au_P, Au_C, len(psi_spars), n)
for j in range(len(psi_spars)):
xi_parent = CST(psi_spars, 1., deltasz= [deltaz/2./c_P, deltaz/2./c_P], Al= Al_P, Au =Au_P)
delta_j_P = xi_parent['u'][j]-xi_parent['l'][j]
t_j = c_P*(delta_j_P)
# Claculate orientation for children
s_j = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C, l_LE, eps_LE, psi_spars)
psi_l_j = psi_upper_children[j]-delta_j_P/c_C*s_j[0]
xi_l_j = xi_upper_children[j]-delta_j_P/c_C*s_j[1]
spar_thicknesses.append(t_j)
psi_lower_children.append(psi_l_j)
xi_lower_children.append(xi_l_j)
f[j] = (2*xi_l_j + psi_l_j*deltaz/c_C)/(2*(psi_l_j**0.5)*(psi_l_j-1)) - Al_C0*(1-psi_l_j)**n
F = np.zeros((n,n))
#j is the row dimension and i the column dimension in this case
for j in range(n):
for i in range(n):
#Because in Python counting starts at 0, need to add 1 to be
#coherent for equations
r = i + 1
F[j][i] = K(r,n)*(psi_lower_children[j]**r)*(1-psi_lower_children[j])**(n-r)
print(F)
print(f)
A_lower = np.dot(inv(F), f)
print('result', A_lower)
Al_C = [Al_C0]
for i in range(len(A_lower)):
Al_C.append(A_lower[i][0]) #extra [0] is necessary because of array
error_denominator = 0
print('before', former_Au_C, Au_C)
for i in range(len(Au_C)):
error_denominator += Au_C[i]**2
error = 0
for i in range(len(Al_C)):
error += (former_Au_C[i] - Au_C[i])**2/error_denominator
error = math.sqrt(error)
# error = abs(c_C-former_chord)/c_C
# AC_u0 = calculate_AC_u0(AC_u0, constant_LE=False)
print(error, Al_C, Au_C)
# former_chord = c_C
return Au_C, Al_C, c_C, spar_thicknesses
def calculate_shape_coefficients_tracing(A0, x, y, N1, N2, chord = 1., EndThickness = 0):
"""
inputs:
- tip_displacement: {'x': value, 'y': value}
- other_points: {'x': value, 'y': value}
- A0: float value for first shape coefficient. Usually related to a constraint.
"""
# Bersntein Polynomial
def K(r,n):
K=math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
n = len(x)
print(x)
Psi = np.array(x)/chord
Xi = np.array(y)/chord
EndThickness = EndThickness/chord
T = np.zeros((n,n))
t = np.zeros((n,1))
for j in range(1,n+1):
jj = j - 1
for i in range(1,n+1):
ii = i -1
T[jj][ii] = K(i,n)* Psi[jj]**i * (1-Psi[jj])**(n-i)
print(Xi[jj], EndThickness, Psi[jj], A0,Psi[jj]**N1*(1-Psi[jj])**N2)
t[jj] = (Xi[jj] - Psi[jj]*EndThickness)/(Psi[jj]**N1*(1-Psi[jj])**N2) - A0*(1-Psi[jj])**n
# Calculate the inverse
A = np.dot(inv(T), t)
A = [A0] + list(A.transpose()[0])
return A
def calculate_strains( Au_P, Al_P, c_P, Au_C, Al_C, c_C, deltaz, psi_spars, spar_thicknesses):
# Calculate psi_flats (non-dimensional location of the itersection of
# the spars with the lower surface
psi_flats = []
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C, l_LE, eps_LE, psi_spars[0])
x_children_j = psi_children_j*c_C
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C, l_LE, eps_LE, psi_spars)
psi_flats.append(x_children_j - spar_thicknesses[j]*s[0])
# Calculate initial lengths
initial_lengths = []
psi_list = [0.] + psi_spars + [c_P]
for i in range(len(psi_list)-1):
initial_lengths.append(calculate_arc_length(psi_list[i], psi_list[i+1], Al_P, deltaz, c_P))
# Calculate final lengths
final_lengths = []
psi_list = [0.] + psi_flats + [c_C] # In P configuration
for i in range(len(psi_list)-1):
final_lengths.append(calculate_arc_length(psi_list[i]*c_P/c_C, psi_list[i+1]*c_P/c_C, Al_C, deltaz, c_C))
# Calculate strains
strains = []
for i in range(len(final_lengths)):
strains.append((final_lengths[i]-initial_lengths[i])/initial_lengths[i])
av_strain = (sum(final_lengths)-sum(initial_lengths))/sum(initial_lengths)
# for i in range(len(strains)):
# print 'Initial length: ' + str(initial_lengths[i]) + ', final length: ' + str(final_lengths[i]) + ', strains: ' + str(strains[i])
return strains, av_strain
def plot_airfoil(AC, psi_spars, c_L, deltaz, Au_L, Al_L, image = 'plot',
iteration=0, return_coordinates=True, dir = 'current'):
import matplotlib.pyplot as plt
plt.figure()
n = len(Au_L) - 1
Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
AC,
psi_spars, Au_L, Al_L,
deltaz, c_L, morphing=morphing_direction)
#==============================================================================
# Plot results
#==============================================================================
np.set_printoptions(precision=20)
x = np.linspace(0, c_C, 1000)
y = CST(x, c_C, deltasz= [deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
plt.plot(x, y['u'], 'b', label = 'Children')
plt.plot(x, y['l'], '-b', label = None)
# store variables in case return_coordinates is True
x = list(x[::-1]) + list(x[1:])
y = list(y['u'][::-1]) + list(y['l'][1:])
children_coordinates = {'x':x, 'y':y}
x = np.linspace(0, c_L, 1000)
y = CST(x, c_L, deltasz= [deltaz/2., deltaz/2.], Al= Al_L, Au =Au_L)
plt.plot(x, y['u'], 'r--', label='Parent')
plt.plot(x, y['l'], 'r--', label=None)
y_limits = y
for i in range(len(psi_spars)):
psi_i = psi_spars[i]
# Calculate psi at landing
psi_goal_i = calculate_psi_goal(psi_i, Au_C, Au_L, deltaz, c_C, c_L)
x_goal_i = psi_goal_i*c_L
# Calculate xi at landing
temp = CST(x_goal_i, c_L, [deltaz/2., deltaz/2.], Al= Al_L, Au =Au_L)
y_goal_i = temp['u']
#calculate spar direction
s = calculate_spar_direction(psi_i, Au_C, Au_L, deltaz, c_L)
plt.plot([x_goal_i, x_goal_i - spar_thicknesses[i]*s[0]],[y_goal_i, y_goal_i - spar_thicknesses[i]*s[1]], 'r--')
y = CST(np.array([psi_i*c_C]), c_C, deltasz=[deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u']-spar_thicknesses[i]], 'b', label = None)
plt.xlabel('$\psi$', fontsize = 16)
plt.ylabel(r'$\xi$', fontsize = 16)
plt.grid()
plt.legend(loc="upper right")
plt.gca().set_aspect('equal', adjustable='box')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,y1,2*y2))
# plt.axis([-0.005, c_L+0.005, min(y_limits['l'])-0.005, max(y_limits['l'])+0.01])
if image == 'plot':
plt.show()
elif image == 'save':
if dir == 'current':
plt.savefig('%03i.png' % (iteration), bbox_inches='tight')
else:
cwd = os.getcwd()
directory = os.path.join(cwd, dir)
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, '%05i.png' % (iteration))
plt.savefig(filename, bbox_inches='tight')
if return_coordinates:
return children_coordinates
if __name__ == '__main__':
import matplotlib.pyplot as plt
testing = 'structurally_consistent'
# testing = 'tracing'
if testing == 'tracing':
N1 = 1.
N2 = 1.
tip_displacement = {'x': 1., 'y':.5}
other_points = {'x': [0.7], 'y':[0.25]}
A0 = -tip_displacement['x']/tip_displacement['y']
# Check if y values are smaller than tip y
for y_i in other_points['y']:
if y_i>=tip_displacement['y']:
print('Y value out of bounds!')
A = calculate_shape_coefficients_tracing(A0, other_points['y'], other_points['x'], N1, N2, chord = tip_displacement['y'], EndThickness = tip_displacement['x'])
#plotting
y = np.linspace(0, tip_displacement['y'], 100000)
x = CST(y, tip_displacement['y'], deltasz= tip_displacement['x'], Au = A, N1=N1, N2=N2)
plt.plot(x,y)
plt.scatter(other_points['x'] + [tip_displacement['x']],
other_points['y'] + [tip_displacement['y']])
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
elif testing == 'structurally_consistent':
#==============================================================================
# Inputs
#==============================================================================
# Parameter
c_P = 1. #m
deltaz = 0.*c_P #m
# Avian wing, order 5
# Au_P = [0.23993240191629417, 0.34468227138908186, 0.18125405377549103,
# 0.35371349126072665, 0.2440815012119143, 0.25724974995738387]
# Al_P = [0.18889012559339036, -0.24686758992053115, 0.077569769493868401,
# -0.547827192265256, -0.0047342206759065641, -0.23994805474814629]
# NACA0012
Au_P = [0.10887, 0.1187, 0.07843, 0.12084, 0.07919, 0.09840]
Al_P = [0.11117, 0.1000, 0.1239, 0.06334, 0.11539, 0.10400]
n = len(Au_P) - 1
if inverted:
temp = Au_P
Au_P = list(-np.array(Al_P))
Al_P = list(-np.array(temp))
# Shape coefficients for upper surface of cruise airfoil
# AC_u1 = 0.25 #Adimensional
# AC_u2 = 0.25 #Adimensional
# AC_u3 = 0.25 #Adimensional
# AC_u4 = 0.25 #Adimensional
# AC_u5 = 0.25
# Medium
# AC_u1 = 0.2187 #Adimensional
# AC_u2 = 0.17843 #Adimensional
# AC_u3 = 0.22084 #Adimensional
# AC_u4 = 0.17919 #Adimensional
# AC_u5 = 0.19840 #Adimensional
# Small
# AC_u1 = 0.1487 #Adimensional
# AC_u2 = 0.10843 #Adimensional
# AC_u3 = 0.15084 #Adimensional
# AC_u4 = 0.10919 #Adimensional
# AC_u5 = 0.12840 #Adimensional
# AC_u1 = 0.34468227138908186 #Adimensional
# AC_u2 = 0.18125405377549103 #Adimensional
# AC_u3 = 0.35371349126072665 #Adimensional
# AC_u4 = 0.2440815012119143 #Adimensional
# AC_u5 = 0.25724974995738387 #Adimensional
#Spar position for cruise (adiminesional because the chord will still be calculated)
# psi_spar0 = 0.1
psi_spar1 = 0.2 #Adimensional
psi_spar2 = 0.3 #Adimensional
psi_spar3 = 0.5 #Adimensional
psi_spar4 = 0.7 #Adimensional
psi_spar5 = 0.9 #Adimensional
psi_spars = [psi_spar1, psi_spar2, psi_spar3, psi_spar4, psi_spar5]
l_LE = 0.
eps_LE = 0.05
#==============================================================================
# Calculate dependent coefficients
#==============================================================================
Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
Au_P[1:],
psi_spars, Au_P, Al_P,
deltaz, c_P, morphing=morphing_direction, l_LE=l_LE, eps_LE=eps_LE)
#==============================================================================
# Plot results
#==============================================================================
np.set_printoptions(precision=20)
# Print shape for children
x = np.linspace(0, c_C, 100000)
y = CST(x, c_C, deltasz= [deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
plt.plot(x, y['u'], 'b', label = 'Children', lw=2)
plt.plot(x, y['l'], 'b', label = None, lw=2)
# Print shape for parent
x = np.linspace(0, c_P, 100000)
y = CST(x, c_P, deltasz= [deltaz/2., deltaz/2.], Al= Al_P, Au =Au_P)
plt.plot(x, y['u'], 'r--', label='Parent', lw=2)
plt.plot(x, y['l'], 'r--', label = None, lw=2)
if morphing_direction == 'forwards':
psi_flats = []
intersections_x_children = [0]
intersections_y_children = [0]
intersections_x_parent = [0]
intersections_y_parent = [0]
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C,l_LE, eps_LE, psi_spars[0])
x_children_j = psi_children_j*c_C
# Calculate xi at landing
temp = CST(x_children_j, c_C, [deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
y_children_j = temp['u']
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C, l_LE, eps_LE, psi_spars)
# Print spars for children
if not inverted:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]],[y_children_j, y_children_j - spar_thicknesses[j]*s[1]], c = 'b', lw=2, label=None)
else:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]],[-y_children_j, -y_children_j + spar_thicknesses[j]*s[1]], c = 'b', lw=2, label=None)
psi_flats.append(x_children_j - spar_thicknesses[j]*s[0])
y = CST(np.array([psi_parent_j*c_P]), c_P, deltasz=[deltaz/2., deltaz/2.], Al= Al_P, Au =Au_P)
intersections_x_children.append(x_children_j - spar_thicknesses[j]*s[0])
intersections_y_children.append(y_children_j - spar_thicknesses[j]*s[1])
# Print spars for parents
if not inverted:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P], [y['u'], y['u']-spar_thicknesses[j]], 'r--', lw=2, label = None)
else:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P], [-y['u'], -y['u']+spar_thicknesses[j]], 'r--', lw=2, label = None)
intersections_x_parent.append(psi_parent_j*c_P)
intersections_y_parent.append(y['u']-spar_thicknesses[j])
elif morphing_direction == 'backwards':
# For backwards, goal is the parent and deformed is children
for i in range(len(psi_spars)):
psi_i = psi_spars[i]
# Calculate psi at landing
psi_goal_i = calculate_psi_goal(psi_i, Au_C, Au_P, deltaz, c_C, c_P)
x_goal_i = psi_goal_i*c_P
# Calculate xi at landing
temp = CST(x_goal_i, c_P, [deltaz/2., deltaz/2.], Al= Al_P, Au =Au_P)
y_goal_i = temp['u']
#calculate spar direction
s = calculate_spar_direction(psi_i, Au_C, Au_P, deltaz, c_P, spar_thicknesses)
plt.plot([x_goal_i, x_goal_i - spar_thicknesses[i]*s[0]],[y_goal_i, y_goal_i - spar_thicknesses[i]*s[1]], 'r--')
y = CST(np.array([psi_i*c_C]), c_C, deltasz=[deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u']-spar_thicknesses[i]], 'b', lw=2, label = None)
plt.xlabel('$\psi^p$', fontsize = 14)
plt.ylabel(r'$\xi^p$', fontsize = 14)
plt.ylim([-0.06,0.17])
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(loc=1)
plt.show()
print(calculate_strains( Au_P, Al_P, c_P, Au_C, Al_C, c_C, deltaz, psi_spars, spar_thicknesses))
|
{"hexsha": "7cec37a6ddd26682a094b85c93c0b04c7e4024ca", "size": 26018, "ext": "py", "lang": "Python", "max_stars_repo_path": "aeropy/morphing/twist_3D.py", "max_stars_repo_name": "belac626/AeroPy", "max_stars_repo_head_hexsha": "4f045306427e08b742237b7393ce9602f1072d60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aeropy/morphing/twist_3D.py", "max_issues_repo_name": "belac626/AeroPy", "max_issues_repo_head_hexsha": "4f045306427e08b742237b7393ce9602f1072d60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aeropy/morphing/twist_3D.py", "max_forks_repo_name": "belac626/AeroPy", "max_forks_repo_head_hexsha": "4f045306427e08b742237b7393ce9602f1072d60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.1312056738, "max_line_length": 171, "alphanum_fraction": 0.534399262, "include": true, "reason": "import numpy,from numpy", "num_tokens": 7099}
|
function scan_data = rdprfile(ufilename,plotmode,verbose)
%RDPRFILE Reads data from a profilometer data file
%
% DATA = RDPRFILE(UFILENAME,VERBOSE)
%
% RDPRFILE examines the header in a profilometer data file and attempts to
% determine the type of data file. If a supported type is found, it then
% uses the appropriate function to open the file and return the data.
% Returns SCAN_DATA=[] if unable to determine the file type.
%
% Supported profilometers:
% Dektak IIA
% AlphaStep 200
% Alphastep 500
% Dektak 3ST
% Veeco Dektak 8
% Bruker Dektak XT
% Tencor P-10
%
% Options:
% If VERBOSE == 0, no messages will be displayed.
% If VERBOSE == 1, the data summary will be displayed. (default)
% If VERBOSE == 2, the file header and data summary will be displayed.
% If PLOTMODE == 0, the data will not be displayed.
% if PLOTMODE == 1, the data will be displayed in a new figure. (default)
%
% See also dk2rd.m, as2rd.m, as5rd.m, dk3rd.m, dk8rd.m, dkXTrd.m, p10rd.m
%
% M.A. Hopcroft
% mhopeng at gmail dot com
%
% The license governing use of this code is in the accompanying file
% "license.txt".
%
versionstr='rdprfile 1.1';
%
% MH Jul2013
% v1.1 add Dektak XT
% MH Sep2012
% v1.0 works
% handle input arguments - set defaults
% default to verbose mode 1 (summary only)
if nargin < 3
verbose = 1;
end
% default to plot mode 1
if nargin < 2
plotmode = 1;
end
% get interactive if not being called by another program
if (nargout == 0)
if (verbose >= 1 && nargin == 0)
clc;
fprintf(1, '\n\n %s\n %s\n\n', versionstr, 'Load profilometer data files');
end
end
%%%%
%% open user's file
if nargin >= 1 % if the user already specified a file, use it
userfilename=ufilename;
% check to see if file exists
cfid=fopen(userfilename);
if cfid== -1
if cfid== -1
disp(pwd);
fprintf(1,'ERROR: data file ["%s"] not found in current directory',userfilename);
scan_data=[];
return
end
fclose(cfid);
end
else % if user did not specify the file name, ask for one
[ufilename, ufilepath]=uigetfile({'.txt','.csv'},'Select the Profilometer data file');
if isequal(ufilename,0)
scan_data=[];
return
end
userfilename=fullfile(ufilepath,ufilename);
%userfilename=input(' Enter the name of the profilometer data file: ','s');
end
% open the file
userfile=fopen(userfilename);
% % %
%% read lines from the header until we have an answer
if (nargout == 0), fprintf(1,'\n '); end % screen formatting for interactive mode
%% read the first line in the file
linein1=fgetl(userfile);
while isempty(linein1), linein1=fgetl(userfile); end % handle spurious carriage returns
if verbose >= 2, fprintf(1,'%s\n',linein1); end
% is this a Dektak II file?
if strfind(linein1,'SLOAN DEKTAK II')
fprintf(1,'rdprfile: This appears to be a data file from a Dektak IIA\n');
fclose(userfile);
scan_data=dk2rd(userfilename,plotmode,verbose);
return
end
% is this a Dektak XT file?
if strfind(linein1,'Meta Data')
fprintf(1,'rdprfile: This appears to be a data file from a Dektak XT\n');
fclose(userfile);
scan_data=dkXTrd(userfilename,plotmode,verbose);
return
end
%% read another line
linein2=fgetl(userfile);
while isempty(linein2), linein2=fgetl(userfile); end % handle spurious carriage returns
if verbose >= 2, fprintf(1,'%s\n',linein2); end
% is this a AlphaStep 200 file?
if length(linein2)>=4 && strcmp(linein2(1:4),'VERT')
fprintf(1,'rdprfile: This appears to be a data file from an AlphaStep 200\n');
fclose(userfile);
scan_data=as2rd(userfilename,plotmode,verbose);
return
end
% is this a AlphaStep 500 file?
dbl1=strtrim(linein1);
dbl2=strtrim(linein2);
if strcmp(dbl1(1),'A') && strcmp(dbl2(1),'B')
fprintf(1,'rdprfile: This appears to be a data file from an AlphaStep 500\n');
fclose(userfile);
scan_data=as5rd(userfilename,plotmode,verbose);
return
end
% is this a Tencor P-10 file?
if strcmp(linein1(1),'*') && strfind(linein2,'data points')
fprintf(1,'rdprfile: This appears to be a data file from a Tencor P-10\n');
fclose(userfile);
scan_data=p10rd(userfilename,plotmode,verbose);
return
end
% is this a Dektak 3ST file?
if length(linein1)>=9 && strcmp(linein1(1:9),'DATA FILE') && length(linein2)>=9 && strcmp(linein2(1:9),'AUTO PROG')
fprintf(1,'rdprfile: This appears to be a data file from a Dektak 3ST\n');
fclose(userfile);
scan_data=dk3rd(userfilename,plotmode,verbose);
return
end
%% read more lines
linein3=fgetl(userfile);
while isempty(linein3), linein3=fgetl(userfile); end
if verbose >= 2, fprintf(1,'%s\n',linein3); end
linein4=fgetl(userfile);
while isempty(linein4), linein4=fgetl(userfile); end
if verbose >= 2, fprintf(1,'%s\n',linein4); end
linein5=fgetl(userfile);
while isempty(linein5), linein5=fgetl(userfile); end
if verbose >= 2, fprintf(1,'%s\n',linein5); end
% is this a Dektak 8 file?
if length(linein4)>=9 && strcmp(linein4(1:9),'DATA FILE') && length(linein5)>=4 && strcmpi(linein5(1:4),'Date')
fprintf(1,'rdprfile: This appears to be a data file from an Dektak 8\n');
fclose(userfile);
scan_data=dk8rd(userfilename,plotmode,verbose);
return
end
% fail
fprintf(1,'rdprfile: Unable to determine file type!\n');
scan_data=[];
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/15096-rd-scripts/rd_scripts_v13/rdprfile.m"}
|
[STATEMENT]
lemma Nil_rsp2 [quot_respect]:
shows "(list_all2 (\<approx>) OOO (\<approx>)) Nil Nil"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_all2 (\<approx>) OOO (\<approx>)) [] []
[PROOF STEP]
by (rule compose_list_refl, rule list_eq_equivp)
|
{"llama_tokens": 107, "file": null, "length": 1}
|
from __future__ import division
import numpy as np
from numpy.core.numeric import NaN
from scipy.signal import spectrogram
from . import timbral_util
import tensorflow as tf
import tensorflow.keras.backend as K
def timbral_depth(fname, fs=0, dev_output=False, phase_correction=False, clip_output=False, threshold_db=-60,
low_frequency_limit=20, centroid_crossover_frequency=2000, ratio_crossover_frequency=500,
db_decay_threshold=-40, take_first=None):
"""
This function calculates the apparent Depth of an audio file.
This version of timbral_depth contains self loudness normalising methods and can accept arrays as an input
instead of a string filename.
Version 0.4
Required parameter
:param fname: string or numpy array
string, audio filename to be analysed, including full file path and extension.
numpy array, array of audio samples, requires fs to be set to the sample rate.
Optional parameters
:param fs: int/float, when fname is a numpy array, this is a required to be the sample rate.
Defaults to 0.
:param phase_correction: bool, perform phase checking before summing to mono. Defaults to False.
:param dev_output: bool, when False return the depth, when True return all extracted
features. Default to False.
:param threshold_db: float/int (negative), threshold, in dB, for calculating centroids.
Should be negative. Defaults to -60.
:param low_frequency_limit: float/int, low frequency limit at which to highpass filter the audio, in Hz.
Defaults to 20.
:param centroid_crossover_frequency: float/int, crossover frequency for calculating the spectral centroid, in Hz.
Defaults to 2000
:param ratio_crossover_frequency: float/int, crossover frequency for calculating the ratio, in Hz.
Defaults to 500.
:param db_decay_threshold: float/int (negative), threshold, in dB, for estimating duration. Should be
negative. Defaults to -40.
:return: float, aparent depth of audio file, float.
Copyright 2018 Andy Pearce, Institute of Sound Recording, University of Surrey, UK.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
'''
Read input
'''
audio_samples, fs = timbral_util.file_read(
fname, fs, phase_correction=phase_correction)
# audio_samples = audio_samples[:128*128]
fs = float(fs)
if take_first:
audio_samples = audio_samples[:take_first]
'''
Filter audio
'''
# highpass audio - run 3 times to get -18dB per octave - unstable filters produced when using a 6th order
audio_samples = timbral_util.filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
audio_samples = timbral_util.filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
audio_samples = timbral_util.filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
# running 3 times to get -18dB per octave rolloff, greater than second order filters are unstable in python
lowpass_centroid_audio_samples = timbral_util.filter_audio_lowpass(
audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_centroid_audio_samples = timbral_util.filter_audio_lowpass(
lowpass_centroid_audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_centroid_audio_samples = timbral_util.filter_audio_lowpass(
lowpass_centroid_audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.filter_audio_lowpass(
audio_samples, crossover=ratio_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.filter_audio_lowpass(
lowpass_ratio_audio_samples, crossover=ratio_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.filter_audio_lowpass(
lowpass_ratio_audio_samples, crossover=ratio_crossover_frequency, fs=fs)
'''
Get spectrograms and normalise
'''
# normalise audio
lowpass_ratio_audio_samples *= (1.0 / max(abs(audio_samples)))
lowpass_centroid_audio_samples *= (1.0 / max(abs(audio_samples)))
audio_samples *= (1.0 / max(abs(audio_samples)))
# set FFT parameters
nfft = 4096
hop_size = int(3 * nfft / 4)
# get spectrogram
if len(audio_samples) > nfft:
freq, time, spec = spectrogram(audio_samples, fs, 'hamming', nfft, hop_size,
nfft, False, True, 'spectrum')
lp_centroid_freq, _, lp_centroid_spec = spectrogram(lowpass_centroid_audio_samples, fs,
'hamming', nfft, hop_size, nfft,
False, True, 'spectrum')
_, _, lp_ratio_spec = spectrogram(lowpass_ratio_audio_samples, fs, 'hamming', nfft,
hop_size, nfft, False, True, 'spectrum')
else:
# file is shorter than 4096, just take the fft
freq, time, spec = spectrogram(audio_samples, fs, 'hamming', len(audio_samples), len(audio_samples)-1,
nfft, False, True, 'spectrum')
lp_centroid_freq, _, lp_centroid_spec = spectrogram(lowpass_centroid_audio_samples, fs,
'hamming',
len(
lowpass_centroid_audio_samples),
len(
lowpass_centroid_audio_samples)-1,
nfft, False, True, 'spectrum')
_, _, lp_ratio_spec = spectrogram(lowpass_ratio_audio_samples, fs, 'hamming',
len(lowpass_ratio_audio_samples),
len(lowpass_ratio_audio_samples)-1,
nfft, False, True, 'spectrum')
threshold = timbral_util.db2mag(threshold_db)
'''
METRIC 1 - limited weighted mean normalised lower centroid
'''
# define arrays for storing metrics
all_normalised_lower_centroid = []
all_normalised_centroid_tpower = []
# get metrics for e
# ach time segment of the spectrogram
for idx in range(len(time)):
# get overall spectrum of time frame
current_spectrum = spec[:, idx]
# calculate time window power
tpower = np.sum(current_spectrum)
all_normalised_centroid_tpower.append(tpower)
# estimate if time segment contains audio energy or just noise
if tpower > threshold:
# get the spectrum
lower_spectrum = lp_centroid_spec[:, idx]
lower_power = np.sum(lower_spectrum)
# get lower centroid
lower_centroid = np.sum(
lower_spectrum * lp_centroid_freq) / float(lower_power)
# append to list
all_normalised_lower_centroid.append(lower_centroid)
else:
all_normalised_lower_centroid.append(0)
# calculate the weighted mean of lower centroids
weighted_mean_normalised_lower_centroid = np.average(all_normalised_lower_centroid,
weights=all_normalised_centroid_tpower)
# limit to the centroid crossover frequency
if weighted_mean_normalised_lower_centroid > centroid_crossover_frequency:
limited_weighted_mean_normalised_lower_centroid = np.float64(
centroid_crossover_frequency)
else:
limited_weighted_mean_normalised_lower_centroid = weighted_mean_normalised_lower_centroid
'''
METRIC 2 - weighted mean normalised lower ratio
'''
# define arrays for storing metrics
all_normalised_lower_ratio = []
all_normalised_ratio_tpower = []
# get metrics for each time segment of the spectrogram
for idx in range(len(time)):
# get time frame of broadband spectrum
current_spectrum = spec[:, idx]
tpower = np.sum(current_spectrum)
all_normalised_ratio_tpower.append(tpower)
# estimate if time segment contains audio energy or just noise
if tpower > threshold:
# get the lowpass spectrum
lower_spectrum = lp_ratio_spec[:, idx]
# get the power of this
lower_power = np.sum(lower_spectrum)
# get the ratio of LF to all energy
lower_ratio = lower_power / float(tpower)
# append to array
all_normalised_lower_ratio.append(lower_ratio)
else:
all_normalised_lower_ratio.append(0)
# calculate
weighted_mean_normalised_lower_ratio = np.average(
all_normalised_lower_ratio, weights=all_normalised_ratio_tpower)
'''
METRIC 3 - Approximate duration/decay-time of sample
'''
all_my_duration = []
# get envelpe of signal
envelope = timbral_util.sample_and_hold_envelope_calculation(
audio_samples, fs)
# estimate onsets
onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs)
# get RMS envelope - better follows decays than the sample-and-hold
rms_step_size = 256
rms_envelope = timbral_util.calculate_rms_enveope(
audio_samples, step_size=rms_step_size)
# convert decay threshold to magnitude
decay_threshold = timbral_util.db2mag(db_decay_threshold)
# rescale onsets to rms stepsize - casting to int
time_convert = fs / float(rms_step_size)
onsets = (np.array(onsets) / float(rms_step_size)).astype('int')
onsets = [0]
for idx, onset in enumerate(onsets):
if onset == onsets[-1]:
segment = rms_envelope[onset:]
else:
segment = rms_envelope[onset:onsets[idx + 1]]
# get location of max RMS frame
max_idx = np.argmax(segment)
# get the segment from this max until the next onset
post_max_segment = segment[max_idx:]
# estimate duration based on decay or until next onset
if min(post_max_segment) >= decay_threshold:
my_duration = len(post_max_segment) / time_convert
else:
my_duration = np.where(post_max_segment < decay_threshold)[
0][0] / time_convert
# append to array
all_my_duration.append(my_duration)
# calculate the lof of mean duration
mean_my_duration = np.log10(np.mean(all_my_duration))
'''
METRIC 4 - f0 estimation with peak picking
'''
# get the overall spectrum
all_spectrum = np.sum(spec, axis=1)
# normalise this
norm_spec = (all_spectrum - np.min(all_spectrum)) / \
(np.max(all_spectrum) - np.min(all_spectrum))
# set limit for peak picking
cthr = 0.01
# detect peaks
peak_idx, peak_value, peak_freq = timbral_util.detect_peaks(norm_spec, cthr=cthr, unprocessed_array=norm_spec,
freq=freq)
# estimate peak
pitch_estimate = np.log10(min(peak_freq)) if peak_freq[0] > 0 else 0
# get outputs
if dev_output:
return limited_weighted_mean_normalised_lower_centroid, weighted_mean_normalised_lower_ratio, mean_my_duration, \
pitch_estimate, weighted_mean_normalised_lower_ratio * mean_my_duration, \
timbral_util.sigmoid(
weighted_mean_normalised_lower_ratio) * mean_my_duration
else:
'''
Perform linear regression to obtain depth
'''
# coefficients from linear regression
coefficients = np.array([-0.0043703565847874465, 32.83743202462131, 4.750862716905235, -14.217438690256062,
3.8782339862813924, -0.8544826091735516, 66.69534393444391])
# what are the best metrics
metric1 = limited_weighted_mean_normalised_lower_centroid
metric2 = weighted_mean_normalised_lower_ratio
metric3 = mean_my_duration
metric4 = pitch_estimate
metric5 = metric2 * metric3
metric6 = timbral_util.sigmoid(metric2) * metric3
# pack metrics into a matrix
all_metrics = np.zeros(7)
all_metrics[0] = metric1
all_metrics[1] = metric2
all_metrics[2] = metric3
all_metrics[3] = metric4
all_metrics[4] = metric5
all_metrics[5] = metric6
all_metrics[6] = 1.0
#print(metric1, metric2, metric3, metric4, metric5, metric6)
# perform linear regression
depth = np.sum(all_metrics * coefficients)
if clip_output:
depth = timbral_util.output_clip(depth)
return depth
@tf.function
def tf_timbral_depth(audio_tensor, fs, dev_output=False, phase_correction=False, clip_output=False, threshold_db=-60,
low_frequency_limit=20, centroid_crossover_frequency=2000, ratio_crossover_frequency=500,
db_decay_threshold=-40):
"""
This function calculates the apparent Depth of an audio file.
This version of timbral_depth contains self loudness normalising methods and can accept arrays as an input
instead of a string filename.
Version 0.4
Required parameter
:param fname: string or numpy array
string, audio filename to be analysed, including full file path and extension.
numpy array, array of audio samples, requires fs to be set to the sample rate.
Optional parameters
:param fs: int/float, when fname is a numpy array, this is a required to be the sample rate.
Defaults to 0.
:param phase_correction: bool, perform phase checking before summing to mono. Defaults to False.
:param dev_output: bool, when False return the depth, when True return all extracted
features. Default to False.
:param threshold_db: float/int (negative), threshold, in dB, for calculating centroids.
Should be negative. Defaults to -60.
:param low_frequency_limit: float/int, low frequency limit at which to highpass filter the audio, in Hz.
Defaults to 20.
:param centroid_crossover_frequency: float/int, crossover frequency for calculating the spectral centroid, in Hz.
Defaults to 2000
:param ratio_crossover_frequency: float/int, crossover frequency for calculating the ratio, in Hz.
Defaults to 500.
:param db_decay_threshold: float/int (negative), threshold, in dB, for estimating duration. Should be
negative. Defaults to -40.
:return: float, aparent depth of audio file, float.
Copyright 2018 Andy Pearce, Institute of Sound Recording, University of Surrey, UK.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
'''
Read input
'''
assert len(audio_tensor.get_shape().as_list(
)) == 3, "tf_timbral_depth :: audio_tensor should be of rank 2 or 3, got {}".format(audio_tensor)
audio_samples, fs = audio_tensor[:, :, 0], fs
b, n = audio_samples.get_shape().as_list()
# audio_samples is now of format BN
fs = float(fs)
'''
Filter audio
'''
max_val = 1.0 / K.max(K.abs(audio_samples), axis=-1, keepdims=True)
# highpass audio - run 3 times to get -18dB per octave - unstable filters produced when using a 6th order
audio_samples = timbral_util.tf_filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
audio_samples = timbral_util.tf_filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
audio_samples = timbral_util.tf_filter_audio_highpass(
audio_samples, crossover=low_frequency_limit, fs=fs)
# running 3 times to get -18dB per octave rolloff, greater than second order filters are unstable in python
lowpass_centroid_audio_samples = timbral_util.tf_filter_audio_lowpass(
audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_centroid_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_centroid_audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_centroid_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_centroid_audio_samples, crossover=centroid_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.tf_filter_audio_lowpass(
audio_samples, crossover=ratio_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_ratio_audio_samples, crossover=ratio_crossover_frequency, fs=fs)
lowpass_ratio_audio_samples = timbral_util.tf_filter_audio_lowpass(
lowpass_ratio_audio_samples, crossover=ratio_crossover_frequency, fs=fs)
'''
Get spectrograms and normalise
'''
# normalise audio
max_val = 1.0 / K.max(K.abs(audio_samples), axis=-1, keepdims=True)
lowpass_ratio_audio_samples = max_val * lowpass_ratio_audio_samples
lowpass_centroid_audio_samples = max_val*lowpass_centroid_audio_samples
audio_samples = max_val * audio_samples
# set FFT parameters
nfft = 4096
hop_size = int(3*nfft / 4)
# get spectrogram
nn = len(audio_samples[0])
nn_lp = len(lowpass_centroid_audio_samples[0])
nn_lpr = len(lowpass_ratio_audio_samples[0])
if nn > nfft:
freq, time, spec = timbral_util.compat_spectrogram(
audio_samples, fs,
'hamming', nfft, hop_size, nfft,
False, True, 'spectrum')
lp_centroid_freq, lp_centroid_time, lp_centroid_spec = timbral_util.compat_spectrogram(lowpass_centroid_audio_samples, fs,
'hamming', nfft, hop_size, nfft,
False, True, 'spectrum')
_, _, lp_ratio_spec = timbral_util.compat_spectrogram(lowpass_ratio_audio_samples, fs, 'hamming', nfft,
hop_size, nfft, False, True, 'spectrum')
else:
# file is shorter than 4096, just take the fft
print("Hello problem :!")
freq, _, spec = timbral_util.compat_spectrogram(audio_samples, fs, 'hamming', nn, nn-1,
nfft, False, True, 'spectrum')
lp_centroid_freq, _, lp_centroid_spec = timbral_util.compat_spectrogram(lowpass_centroid_audio_samples, fs,
'hamming',
nn_lp,
nn_lp-1,
nfft, False, True, 'spectrum')
_, _, lp_ratio_spec = timbral_util.compat_spectrogram(lowpass_ratio_audio_samples, fs, 'hamming',
nn_lpr,
nn_lpr-1,
nfft, False, True, 'spectrum')
threshold = timbral_util.db2mag(threshold_db)
# NOTE :: comapt_spectrogram may need to be transposed compared to scipy spectrogram;
'''
METRIC 1 - limited weighted mean normalised lower centroid
'''
all_normalised_centroid_tpower = []
all_normalised_lower_centroid = []
# get metrics for each time segment of the spectrogram
# TODO :: reduce this to this. Should be tested.
all_normalised_lower_centroid = K.sum(
lp_centroid_freq * lp_centroid_spec, axis=[2]) / K.sum(lp_centroid_spec, axis=2)
all_normalised_centroid_tpower = K.sum(spec, axis=-1)
all_normalised_lower_centroid = tf.where(tf.math.greater(
all_normalised_centroid_tpower, threshold), all_normalised_lower_centroid, 0.)
# calculate the weighted mean of lower centroids
"""
weighted_mean_normalised_lower_centroid = np.average(all_normalised_lower_centroid,
weights=all_normalised_centroid_tpower)
all_normalised_lower_centroid = tf.stack(
all_normalised_lower_centroid_array)
"""
weighted_mean_normalised_lower_centroid = timbral_util.tf_average(
all_normalised_lower_centroid, all_normalised_centroid_tpower, epsilon=None)
# limit to the centroid crossover frequency
"""
if weighted_mean_normalised_lower_centroid > centroid_crossover_frequency:
limited_weighted_mean_normalised_lower_centroid = np.float64(
centroid_crossover_frequency)
else:
limited_weighted_mean_normalised_lower_centroid = weighted_mean_normalised_lower_centroid
"""
limited_weighted_mean_normalised_lower_centroid = K.clip(
weighted_mean_normalised_lower_centroid, 0., centroid_crossover_frequency)
# TODO :: convert below.
'''
METRIC 2 - weighted mean normalised lower ratio
'''
# define arrays for storing metrics
all_normalised_ratio_tpower = K.sum(spec, axis=2)
lower_power = K.sum(lp_ratio_spec, axis=2)
all_normalised_lower_ratio = tf.where(tf.math.greater(
all_normalised_ratio_tpower, threshold), lower_power/all_normalised_ratio_tpower, 0.)
# calculate
weighted_mean_normalised_lower_ratio = timbral_util.tf_average(
all_normalised_lower_ratio, all_normalised_ratio_tpower, epsilon=None)
'''
METRIC 3 - Approximate duration/decay-time of sample
'''
"""
TODO :: discrepency fromo original implementation to investigate !!
Original ::
all_my_duration = []
# get envelpe of signal
envelope = timbral_util.sample_and_hold_envelope_calculation(
audio_samples, fs)
# estimate onsets
onsets = timbral_util.calculate_onsets(audio_samples, envelope, fs)
# get RMS envelope - better follows decays than the sample-and-hold
rms_step_size = 256
rms_envelope = timbral_util.calculate_rms_enveope(
audio_samples, step_size=rms_step_size)
# convert decay threshold to magnitude
decay_threshold = timbral_util.db2mag(db_decay_threshold)
# rescale onsets to rms stepsize - casting to int
time_convert = fs / float(rms_step_size)
onsets = (np.array(onsets) / float(rms_step_size)).astype('int')
onsets = [0]
for idx, onset in enumerate(onsets):
# NOTE :: simplification
segment = rms_envelope
# get location of max RMS frame
max_idx = np.argmax(segment)
# get the segment from this max until the next onset
post_max_segment = segment[max_idx:]
# estimate duration based on decay or until next onset
if min(post_max_segment) >= decay_threshold:
my_duration = len(post_max_segment) / time_convert
else:
my_duration = np.where(post_max_segment < decay_threshold)[
0][0] / time_convert
# append to array
all_my_duration.append(my_duration)
# calculate the lof of mean duration
mean_my_duration = np.log10(np.mean(all_my_duration))
"""
onsets = b * [0]
all_my_duration_array = []
decay_threshold = timbral_util.db2mag(db_decay_threshold)
for i in range(b):
all_my_duration = []
# get RMS envelope - better follows decays than the sample-and-hold
rms_step_size = 256
segment = tf.numpy_function(
timbral_util.calculate_rms_enveope, [audio_samples[i], rms_step_size, 256, True], [audio_samples.dtype], name='tf_rms_envelope')
# rms_envelope is float64
# convert decay threshold to magnitude
# rescale onsets to rms stepsize - casting to int
time_convert = fs / float(rms_step_size)
# onsets = (np.array(onsets) / float(rms_step_size)).astype('int')
# assumes there is only one onset
# onset = 0, idx = 0
# segment = np.array(rms_envelope)
# get location of max RMS frame
max_idx = np.argmax(segment)
# get the segment from this max until the next onset
post_max_segment = segment[max_idx:]
# estimate duration based on decay or until next onset
# my_duration = len(post_max_segment) / time_convert
# my_duration = len(post_max_segment) / time_convert
shape = tf.cast(K.sum(tf.shape(post_max_segment)), audio_samples.dtype)
# TODO :: find efficient way to make this condition work
my_duration = shape / time_convert
"""
if min(post_max_segment) >= decay_threshold:
my_duration = len(post_max_segment) / time_convert
else:
my_duration = np.where(post_max_segment < decay_threshold)[
0][0] / time_convert
"""
# append to array
all_my_duration.append(my_duration)
all_my_duration_array.append(all_my_duration)
all_my_duration = tf.cast(
tf.stack(all_my_duration_array), audio_samples.dtype)
# calculate the lof of mean duration
mean_my_duration = timbral_util.tf_log10(
K.mean(all_my_duration, axis=-1))
'''
METRIC 4 - f0 estimation with peak pickingZ
# Original
all_spectrum = np.sum(spec, axis=1)
# normalise this
norm_spec = (all_spectrum - np.min(all_spectrum)) / \
(np.max(all_spectrum) - np.min(all_spectrum))
# set limit for peak picking
cthr = 0.01
# detect peaks
peak_idx, peak_value, peak_freq = timbral_util.detect_peaks(norm_spec, cthr=cthr, unprocessed_array=norm_spec,
freq=freq)
# estimate peak
pitch_estimate = np.log10(min(peak_freq)) if peak_freq[0] > 0 else 0
'''
# get the overall spectrum
all_spectrum = K.sum(spec, axis=1) # norm_spec ::(1,2049)
# normalise this
"""
norm_spec:: (2049)
norm_spec = (all_spectrum - np.min(all_spectrum)) / \
(np.max(all_spectrum) - np.min(all_spectrum))
"""
b_norm = K.max(all_spectrum, axis=-1, keepdims=True) - \
K.min(all_spectrum, axis=-1, keepdims=True)
norm_spec = (all_spectrum - K.min(all_spectrum,
axis=-1, keepdims=True)) / b_norm
# set limit for peak picking
cthr = 0.01
"""
peak_idx, _, peak_x = tf.numpy_function(timbral_util.detect_peaks, [
spec, freq, 0.2, spec, fs], [tf.int64, tf.float64, tf.float64])
(array, freq=0, cthr=0.2, unprocessed_array=False, fs=44100):
"""
# detect peaks
pitch_estimate_array = []
for i in range(b):
_, _, peak_freq = tf.numpy_function(
timbral_util.detect_peaks, [norm_spec[i], freq, cthr, norm_spec[i], fs], [tf.int64, tf.float64, tf.float64], name='detect_peaks')
# estimate peak
if peak_freq[0] > 0:
pitch_estimate = timbral_util.tf_log10(
K.min(peak_freq), peak_freq.dtype)
else:
pitch_estimate = tf.cast(0, peak_freq.dtype)
pitch_estimate_array.append(
tf.cast(pitch_estimate, audio_samples.dtype))
pitch_estimate = tf.stack(pitch_estimate_array)
# get outputs
if dev_output:
return limited_weighted_mean_normalised_lower_centroid, weighted_mean_normalised_lower_ratio, mean_my_duration, \
pitch_estimate, weighted_mean_normalised_lower_ratio * mean_my_duration, \
timbral_util.sigmoid(
weighted_mean_normalised_lower_ratio) * mean_my_duration
else:
'''
Perform linear regression to obtain depth
'''
# coefficients from linear regression
# print("at output")
# metric3 is the main contributor to discreppancy between original and modded
# what are the best metrics
metric1 = limited_weighted_mean_normalised_lower_centroid
metric2 = weighted_mean_normalised_lower_ratio
metric3 = mean_my_duration
metric4 = pitch_estimate
metric5 = metric2 * metric3
metric6 = timbral_util.sigmoid(metric2) * metric3
tf.debugging.assert_all_finite(metric1, "metric 1 is nan")
tf.debugging.assert_all_finite(metric2, "metric 2 is nan")
tf.debugging.assert_all_finite(metric3, "metric 3 is nan")
tf.debugging.assert_all_finite(metric4, "metric 4 is nan")
tf.debugging.assert_all_finite(metric5, "metric 5 is nan")
tf.debugging.assert_all_finite(metric6, "metric 6 is nan")
"""
print(metric1.numpy(), metric2.numpy(), metric3.numpy(),
metric4.numpy(), metric5.numpy(), metric6.numpy())
print("dev output", np.array([limited_weighted_mean_normalised_lower_centroid.numpy(), weighted_mean_normalised_lower_ratio.numpy(),
mean_my_duration,
pitch_estimate.numpy()]).flatten())
"""
# perform linear regression
depth = -0.0043703565847874465 * metric1 + 32.83743202462131*metric2 + 4.750862716905235*metric3 - \
14.217438690256062*metric4 + 3.8782339862813924*metric5 - \
0.8544826091735516*metric6 + 66.69534393444391
if clip_output:
depth = timbral_util.output_clip(depth)
return depth
|
{"hexsha": "e14f3935b4de13c3295b4569aa280e76768370a9", "size": 31467, "ext": "py", "lang": "Python", "max_stars_repo_path": "timbral_models/Timbral_Depth.py", "max_stars_repo_name": "ALavault/tf_timbral_models", "max_stars_repo_head_hexsha": "302923f48e4bb09d7aaa3197758cd7764ac71d47", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "timbral_models/Timbral_Depth.py", "max_issues_repo_name": "ALavault/tf_timbral_models", "max_issues_repo_head_hexsha": "302923f48e4bb09d7aaa3197758cd7764ac71d47", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "timbral_models/Timbral_Depth.py", "max_forks_repo_name": "ALavault/tf_timbral_models", "max_forks_repo_head_hexsha": "302923f48e4bb09d7aaa3197758cd7764ac71d47", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.8258928571, "max_line_length": 142, "alphanum_fraction": 0.6349509009, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 6816}
|
[STATEMENT]
lemma hd_reach_all_if_nfwd_app_fwd:
"\<lbrakk>\<not>forward_arcs (y#xs); forward_arcs (y#ys@xs); x \<in> set (y#ys@xs)\<rbrakk>
\<Longrightarrow> hd (rev (y#ys@xs)) \<rightarrow>\<^sup>*\<^bsub>T\<^esub> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<not> forward_arcs (y # xs); forward_arcs (y # ys @ xs); x \<in> set (y # ys @ xs)\<rbrakk> \<Longrightarrow> hd (rev (y # ys @ xs)) \<rightarrow>\<^sup>*\<^bsub>T\<^esub> x
[PROOF STEP]
using hd_reach_all_forward'[of "rev (y#ys@xs)"] len_gt1_if_not_fwd_conc forward_arcs_alt
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>1 < length (rev (y # ys @ xs)); forward (rev (y # ys @ xs)); ?x \<in> set (rev (y # ys @ xs))\<rbrakk> \<Longrightarrow> hd (rev (y # ys @ xs)) \<rightarrow>\<^sup>*\<^bsub>T\<^esub> ?x
\<not> forward_arcs (?y # ?xs) \<Longrightarrow> 1 < length (?y # ?xs)
forward ?xs = forward_arcs (rev ?xs)
goal (1 subgoal):
1. \<lbrakk>\<not> forward_arcs (y # xs); forward_arcs (y # ys @ xs); x \<in> set (y # ys @ xs)\<rbrakk> \<Longrightarrow> hd (rev (y # ys @ xs)) \<rightarrow>\<^sup>*\<^bsub>T\<^esub> x
[PROOF STEP]
by auto
|
{"llama_tokens": 498, "file": "Query_Optimization_IKKBZ_Examples", "length": 2}
|
import torch
import torch.nn.functional as F
from torch import nn
import numpy as np
from torch.optim.lr_scheduler import CosineAnnealingLR
class BiC(nn.Module):
def __init__(self, lr, scheduling, lr_decay_factor, weight_decay, batch_size, epochs):
super(BiC, self).__init__()
self.beta = torch.nn.Parameter(torch.ones(1)) #.cuda()
self.gamma = torch.nn.Parameter(torch.zeros(1)) #.cuda()
self.lr = lr
self.scheduling = scheduling
self.lr_decay_factor = lr_decay_factor
self.weight_decay = weight_decay
self.class_specific = False
self.batch_size = batch_size
self.epochs = epochs
self.bic_flag = False
def reset(self, lr=None, scheduling=None, lr_decay_factor=None, weight_decay=None, n_classes=-1):
with torch.no_grad():
if lr is None:
lr = self.lr
if scheduling is None:
scheduling = self.scheduling
if lr_decay_factor is None:
lr_decay_factor = self.lr_decay_factor
if weight_decay is None:
weight_decay = self.weight_decay
if self.class_specific:
assert n_classes != -1
self.beta = torch.nn.Parameter(torch.ones(n_classes).cuda())
self.gamma = torch.nn.Parameter(torch.zeros(n_classes).cuda())
else:
self.beta = torch.nn.Parameter(torch.ones(1).cuda())
self.gamma = torch.nn.Parameter(torch.zeros(1).cuda())
self.optimizer = torch.optim.SGD([self.beta, self.gamma], lr=lr, momentum=0.9, weight_decay=weight_decay)
# self.scheduler = CosineAnnealingLR(self.optimizer, 10)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, scheduling, gamma=lr_decay_factor)
def extract_preds_and_targets(self, model, loader):
preds, targets = [], []
with torch.no_grad():
for (x, y) in loader:
preds.append(model(x.cuda())['logit'])
targets.append(y.cuda())
return torch.cat((preds)), torch.cat((targets))
def update(self, logger, task_size, model, loader, loss_criterion=None):
if task_size == 0:
logger.info("no new task for BiC!")
return
if loss_criterion is None:
loss_criterion = F.cross_entropy
self.bic_flag = True
logger.info("Begin BiC ...")
model.eval()
for epoch in range(self.epochs):
preds_, targets_ = self.extract_preds_and_targets(model, loader)
order = np.arange(preds_.shape[0])
np.random.shuffle(order)
preds, targets = preds_.clone(), targets_.clone()
preds, targets = preds[order], targets[order]
_loss = 0.0
_correct = 0
_count = 0
for start in range(0, preds.shape[0], self.batch_size):
if start + self.batch_size < preds.shape[0]:
out = preds[start:start + self.batch_size, :].clone()
lbls = targets[start:start + self.batch_size]
else:
out = preds[start:, :].clone()
lbls = targets[start:]
if self.class_specific is False:
out1 = out[:, :-task_size].clone()
out2 = out[:, -task_size:].clone()
outputs = torch.cat((out1, out2 * self.beta + self.gamma), 1)
else:
outputs = out * self.beta + self.gamma
loss = loss_criterion(outputs, lbls)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
_, pred = outputs.max(1)
_correct += (pred == lbls).sum()
_count += lbls.size(0)
_loss += loss.item() * outputs.shape[0]
logger.info("epoch {} loss {:4f} acc {:4f}".format(epoch, _loss / preds.shape[0], _correct / _count))
self.scheduler.step()
logger.info("beta {:.4f} gamma {:.4f}".format(self.beta.cpu().item(), self.gamma.cpu().item()))
@torch.no_grad()
def post_process(self, preds, task_size):
if self.class_specific is False:
if task_size != 0:
preds[:, -task_size:] = preds[:, -task_size:] * self.beta + self.gamma
else:
preds = preds * self.beta + self.gamma
return preds
class WA(object):
def __init__(self):
self.gamma = None
@torch.no_grad()
def update(self, classifier, task_size):
old_weight_norm = torch.norm(classifier.weight[:-task_size], p=2, dim=1)
new_weight_norm = torch.norm(classifier.weight[-task_size:], p=2, dim=1)
self.gamma = old_weight_norm.mean() / new_weight_norm.mean()
print(self.gamma.cpu().item())
@torch.no_grad()
def post_process(self, logits, task_size):
logits[:, -task_size:] = logits[:, -task_size:] * self.gamma
return logits
|
{"hexsha": "1e1bc81b0252fee832ffe5c3966d78728bd575af", "size": 5074, "ext": "py", "lang": "Python", "max_stars_repo_path": "inclearn/convnet/imbalance.py", "max_stars_repo_name": "Danden1/DER-ClassIL.pytorch", "max_stars_repo_head_hexsha": "66ccdb45890d3da335f4dcb841160cbea8719c15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 79, "max_stars_repo_stars_event_min_datetime": "2021-03-29T07:50:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T04:13:27.000Z", "max_issues_repo_path": "inclearn/convnet/imbalance.py", "max_issues_repo_name": "Danden1/DER-ClassIL.pytorch", "max_issues_repo_head_hexsha": "66ccdb45890d3da335f4dcb841160cbea8719c15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2021-04-07T01:42:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T08:59:30.000Z", "max_forks_repo_path": "inclearn/convnet/imbalance.py", "max_forks_repo_name": "Danden1/DER-ClassIL.pytorch", "max_forks_repo_head_hexsha": "66ccdb45890d3da335f4dcb841160cbea8719c15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-07-02T02:33:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T11:23:20.000Z", "avg_line_length": 41.2520325203, "max_line_length": 117, "alphanum_fraction": 0.5691761924, "include": true, "reason": "import numpy", "num_tokens": 1122}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
...
"""
import urllib2
import time
import csv, cStringIO
import numpy as np
import matplotlib.pyplot as plt
log_file = "Yun_Log_BatteryDisCharging.log"
fmt_print = "%s, %14.3f, %9.3f s, %9.3f s, %9.3f V, %9.3f A, %9.3f Ohm, %9.3f W, %9.3f mAh, %9.3f J"
fmt_write = "%s, %14.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f"
#calibration = {
# 'V': lambda V: 9.3 * V * 1.E-3, # ~0.1V/V [returns V]
# 'I': lambda I: (I - 2502.) / 225. * 1.E+3, # ~200mV/A, offset ~2500mV [returns mA]
# }
def read_adc_value(adc):
data = urllib2.urlopen("http://arduino.local/arduino/adc/%s" % adc).read()
return ( int(data.split()[-2]), float(data.split()[-1][1:-3]) )
def read_mon_values():
ret = []
data = urllib2.urlopen("http://arduino.local/arduino/mon/U").read()
ret.append( data.split()[-1][:-1] )
data = urllib2.urlopen("http://arduino.local/arduino/mon/I").read()
ret.append( data.split()[-1][:-1] )
data = urllib2.urlopen("http://arduino.local/arduino/mon/C").read()
ret.append( data.split()[-1][:-3] )
data = urllib2.urlopen("http://arduino.local/arduino/mon/E").read()
ret.append( data.split()[-1][:-1] )
data = urllib2.urlopen("http://arduino.local/arduino/mon/t").read()
ret.append( data.split()[-1][:-1] )
data = urllib2.urlopen("http://arduino.local/arduino/mon/W").read()
ret.append( data.split()[-1][:-1] )
return tuple(map(float, ret))
def read_mon_log():
data = urllib2.urlopen("http://arduino.local/arduino/mon/log").read()
spamreader = csv.reader(cStringIO.StringIO(data), quotechar='|')
return [map(float, row) for row in spamreader if row[0]]
print "Please make sure to connect to 'ArduinoYun-XXXXXXXXXXXX' wifi/wlan first!"
print "Checking log file for timestamp of last entry ..."
with open(log_file, 'rb') as csvfile:
spamreader = csv.reader(csvfile, quotechar='|')
for row in spamreader:
continue
delay = time.time() - time.mktime(time.strptime(row[0]))
getlog = (delay > 300.)
print "Last timestamp was:", row[0]
print "Time delay since then:", delay, "( >5mins?:", getlog, ")"
#plt.axis([0, 10, 0, 1])
plt.ylim([-1., 14.])
plt.grid(True)
plt.ion()
if getlog:
print "Retrieving log data from Yun ..."
W = read_mon_values()[-1]
if (delay < 2*3600) and (W >= float(row[2])): # there could still be data in the log we have already
lW = float(row[2])
print "Last entry watch value was:", lW
else:
lW = -1
ts = (time.asctime(), time.time())
i = 0
for item in read_mon_log():
if (lW >= float(item[0])):
continue
lW = float(item[0])
output = fmt_print % (ts + tuple(item))
print output
output = fmt_write % (ts + tuple(item))
with open(log_file, "a") as log:
log.write(output + "\n")
plt.scatter([item[0]]*2, item[2:4], color=['b', 'r'])
i += 1
print "Number of entries retrieved:", i
print "Retrieving live data from Yun, starting ..."
while True:
ts = (time.asctime(), time.time())
# adc0 = read_adc_value(0)
# voltage = calibration['V'](adc0[1])
#
# adc1 = read_adc_value(1)
# current = calibration['I'](adc1[1])
voltage, current, C, E, t, W = read_mon_values()
resistance = np.float64(voltage) / current # division with nan/inf (http://stackoverflow.com/questions/10011707/how-to-get-nan-when-i-divide-by-zero)
power = voltage * current
#output = "%s, %014.3f, %09.3f V, %09.3f mA, %9.3f Ohm, %9.3f W" % (ts + (voltage, current, resistance, power))
output = fmt_print % (ts + (W, t, voltage, current, resistance, power, C, E))
print output
output = fmt_write % (ts + (W, t, voltage, current, resistance, power, C, E))
with open(log_file, "a") as log:
log.write(output + "\n")
plt.scatter([W]*2, [voltage, current], color=['b', 'r'])
#time.sleep(10.)
plt.pause(60.)
|
{"hexsha": "4a8ffa657c652841fe438e788c361f49c6e34f6c", "size": 3989, "ext": "py", "lang": "Python", "max_stars_repo_path": "Yun_Log_BatteryDisCharging/Yun_Log_BatteryDisCharging.py", "max_stars_repo_name": "drtrigon/sketchbook", "max_stars_repo_head_hexsha": "fa58bf767cce2537c86e5ce1638771a164a5b118", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-06-29T22:53:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T23:26:06.000Z", "max_issues_repo_path": "Yun_Log_BatteryDisCharging/Yun_Log_BatteryDisCharging.py", "max_issues_repo_name": "drtrigon/sketchbook", "max_issues_repo_head_hexsha": "fa58bf767cce2537c86e5ce1638771a164a5b118", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Yun_Log_BatteryDisCharging/Yun_Log_BatteryDisCharging.py", "max_forks_repo_name": "drtrigon/sketchbook", "max_forks_repo_head_hexsha": "fa58bf767cce2537c86e5ce1638771a164a5b118", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-11-30T00:28:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-24T12:32:36.000Z", "avg_line_length": 31.1640625, "max_line_length": 154, "alphanum_fraction": 0.5968914515, "include": true, "reason": "import numpy", "num_tokens": 1283}
|
\documentclass[letterpaper,10pt]{article}
\usepackage[margin=2cm]{geometry}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{multicol}
\usepackage{listings}
\usepackage{color}
\definecolor{mygreen}{rgb}{0,0.6,0}
\definecolor{mygray}{rgb}{0.5,0.5,0.5}
\definecolor{mymauve}{rgb}{0.58,0,0.82}
\lstset{ %
backgroundcolor=\color{white}, % choose the background color; you must add \usepackage{color} or \usepackage{xcolor}
basicstyle=\footnotesize, % the size of the fonts that are used for the code
breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace
breaklines=true, % sets automatic line breaking
captionpos=b, % sets the caption-position to bottom
commentstyle=\color{mygreen}, % comment style
deletekeywords={...}, % if you want to delete keywords from the given language
escapeinside={\%*}{*)}, % if you want to add LaTeX within your code
extendedchars=true, % lets you use non-ASCII characters; for 8-bits encodings only, does not work with UTF-8
frame=single, % adds a frame around the code
keepspaces=true, % keeps spaces in text, useful for keeping indentation of code (possibly needs columns=flexible)
keywordstyle=\color{blue}, % keyword style
language=Matlab, % the language of the code
otherkeywords={}, % if you want to add more keywords to the set
numbers=left, % where to put the line-numbers; possible values are (none, left, right)
numbersep=5pt, % how far the line-numbers are from the code
numberstyle=\tiny\color{mygray}, % the style that is used for the line-numbers
rulecolor=\color{black}, % if not set, the frame-color may be changed on line-breaks within not-black text (e.g. comments (green here))
showspaces=false, % show spaces everywhere adding particular underscores; it overrides 'showstringspaces'
showstringspaces=false, % underline spaces within strings only
showtabs=false, % show tabs within strings adding particular underscores
stepnumber=1, % the step between two line-numbers. If it's 1, each line will be numbered
stringstyle=\color{mymauve}, % string literal style
tabsize=4, % sets default tabsize to 2 spaces
title=\lstname % show the filename of files included with \lstinputlisting; also try caption instead of title
}
\DeclareMathOperator*{\argmin}{arg\,min}
\DeclareMathOperator*{\argmax}{arg\,max}
\title{\textbf{Pattern Recognition Theory 18794 Homework 2}}
\author{Mengwen He (Alex)}
\begin{document}
\maketitle
\section*{Problem 1}
PCA vs LDA. Given the following dataset for a 2-class problem with 2-D features:
\begin{equation}
\nonumber
c_1=\left\{\left[\begin{array}{c}
2 \\ 1
\end{array}\right],\left[\begin{array}{c}
2 \\ 2
\end{array}\right],\left[\begin{array}{c}
2 \\ 3
\end{array}\right]\right\},~
c_2=\left\{\left[\begin{array}{c}
4 \\ 3
\end{array}\right],\left[\begin{array}{c}
5 \\ 3
\end{array}\right],\left[\begin{array}{c}
6 \\ 4
\end{array}\right]\right\}
\end{equation}
\begin{enumerate}
\item Using global PCA, find the best direction onto which the data will be projected on.\\
\lstinputlisting[language=Matlab]{./matlab/P_1_1.m}
\begin{enumerate}
\item Express the equation of the line along this direction passing through the samples mean in the following form: $\vec{w}^T\vec{x} + w_0 = 0$, where
$$\vec{x}=\left[\begin{array}{c}
x_1 \\ x_2
\end{array}\right]\text{~and~}\vec{w}=\left[\begin{array}{c}
w_1 \\ w_2
\end{array}\right]$$
and $w_0$ is a scalar known as the bias. Plot the $\vec{w}^T\vec{x} + w0 = 0$ line
along with all the sample points as well the line along $\vec{w}$ which recall starts from the mean.
$$\vec{w}=\left[\begin{array}{c}
0.4541 \\
-0.8910 \\
\end{array}\right],~w_0=0.7866$$
\item Project and reconstruct all the sample points. Plot the reconstructed points.
\begin{center}
\includegraphics[width=0.65\textwidth]{./matlab/PCA.eps}
\end{center}
\item Find the total mean square error MSE for all sample points (between the original points and the reconstructed points).
$$MSE=0.2943$$
\item Find the Fisher Ratio for this projection defined by
$$FR=\frac{(m_1-m_2)^2}{\sigma_1^2+\sigma_2^2}$$
where $m_i$ is the mean of the projected samples of class $i$, and $\sigma_i^2$ is the equivalent variance. You can compute the $FR$ on the projected 1-D points (rather than the reconstructed points which are 2-D vectors).
$$FR=10.9421$$
\end{enumerate}
\item Using Fisher Linear Discriminant Analysis (LDA), determine the best one-dimensional space onto which the above data should be projected.\\
\lstinputlisting[language=Matlab]{./matlab/P_1_2.m}
\begin{enumerate}
\item Express the equation of the line along this direction passing
through the samples mean in the following form: $\vec{w}^T\vec{x}+w0 = 0$. Plot
the $\vec{w}^T\vec{x} + w0 = 0$ line along with all the sample points.
$$\vec{w}=\left[\begin{array}{c}
-0.0499 \\
-0.9988 \\
\end{array}\right],~w_0=2.8381$$
\item Project and reconstruct all the sample points. Plot the reconstructed points. Make sure they lie on a line.\\
\begin{center}
\includegraphics[width=0.65\textwidth]{./matlab/LDA.eps}
\end{center}
\item Find the total MSE for all sample points (between the original points and the reconstructed points). Compare that to the total MSE found in the first question.
$$MSE=1.0095$$
\item Find the Fisher Ratio for this projection. You can compute the FR on the projected points (rather than the reconstructed points which are 2-D vectors). Compare that to the $FR$ found in part 1. Interpret your result.
$$FR=13.5385$$
\end{enumerate}
\end{enumerate}
\section*{Problem 2}
Another goal of PCA is to obtain the linear subspace which minimizes the projection error caused by dimension reduction. The two goals, maximum variance and minimum error, have the same formulation as PCA.
\begin{enumerate}
\item Assume all data samples $\{\vec{x}_1,\dots,\vec{x}_n\}$ are centered. Formulate the optimization problem to solve for a vector $\vec{w}$ spanning the linear subspace which minimizes the sum of the squared reconstruction errors:
$$\sum_{i=1}^{n}{d_i^2}$$
where $d_i$, the reconstruction error of $\vec{x}_i$, is defined as the distance between $\vec{x}_i$ and its projection onto the linear subspace. Assume that $\vec{w}$ has a unit norm since we are interested only in the direction of the vector $\vec{w}$. The answer must be written in terms of $\vec{w}$ and $\vec{x}_i$ without $d_i$.\\
\textbf{Answer:}\\
$\because$
\begin{equation}
\nonumber
\begin{array}{rcl}
d_i^2(\vec{w}) & = & ||\vec{x}_i-\vec{w}\vec{x}_i^T\vec{w}||_2^2 \\
& = & (\vec{x}_i-\vec{w}\vec{x}_i^T\vec{w})^T(\vec{x}_i-\vec{w}\vec{x}_i^T\vec{w}) \\
& = & \vec{x}_i^T\vec{x}_i-2\vec{x}_i^T\vec{w}\vec{x}_i^T\vec{w}+\vec{w}^T\vec{x}_i\vec{x}_i^T\vec{w}\\
& = & \vec{x}_i^T\vec{x}_i-2\vec{w}^T(\vec{x}_i\vec{x}_i^T)\vec{w}+\vec{w}^T(\vec{x}_i\vec{x}_i^T)\vec{w}\\
& = & \vec{x}_i^T\vec{x}_i-\vec{w}^T(\vec{x}_i\vec{x}_i^T)\vec{w}\\
\sum_{i=1}^{n}{d_i^2(\vec{w})} & = & \sum_{i=1}^{n}{\vec{x}_i^T\vec{x}_i}-\vec{w}^T\sum_{i=1}^{n}{(\vec{x}_i\vec{x}_i^T)}\vec{w} \\
\text{where}~~||\vec{w}|| & = & 1 \\
& = & \vec{w}^T\vec{w}=1
\end{array}
\end{equation}
$\therefore$ use Lagrange Multipliers to get the optimized $\vec{w}$:
\begin{equation}
\nonumber
\begin{array}{rcl}
L(\vec{w},\lambda) & = & \sum_{i=1}^{n}{d_i^2(\vec{w})} - \lambda(1-\vec{w}^T\vec{w}) \\
& = & \sum_{i=1}^{n}{\vec{x}_i^T\vec{x}_i}-\vec{w}^T\sum_{i=1}^{n}{(\vec{x}_i\vec{x}_i^T)}\vec{w} - \lambda(1-\vec{w}^T\vec{w}) \\
\frac{\partial L}{\partial \lambda} & = & \vec{w}^T\vec{w}-1 = 0 \\
\frac{\partial L}{\partial \vec{w}} & = & -2(\sum_{i=1}^{n}{\vec{x}_i\vec{x}_i^T})\vec{w}+2\lambda\vec{w} = 0\\
& \Rightarrow & \left(\sum_{i=1}^{n}{\vec{x}_i\vec{x}_i^T}\right)\vec{w}=\lambda\vec{w} \\
\end{array}
\end{equation}
$\therefore$ $\lambda$s are the eigenvalues, and $\vec{w}$s are the corresponding eigenvectors of $\sum_{i=1}^{n}{\vec{x}_i\vec{x}_i^T}$.\\
$\because$
$$\left(\sum_{i=1}^{n}{\vec{x}_i\vec{x}_i^T}\right)\vec{w}=\lambda\vec{w}\Rightarrow\vec{w}^T\left(\sum_{i=1}^{n}{\vec{x}_i\vec{x}_i^T}\right)\vec{w}=\lambda$$
and
$$\sum_{i=1}^{n}{\vec{x}_i^T\vec{x}_i}\text{ is constant}$$
$\therefore$ to minimize $\sum_{i=1}^{n}{d_i^2(\vec{w})}$, we need to choose the $\vec{w}^*$ to maximize $\vec{w}^T\left(\sum_{i=1}^{n}{\vec{x}_i\vec{x}_i^T}\right)\vec{w}=\lambda=\lambda_{max}$.
\item From your answer in (a), show that the optimization in part (a) is equivalent to the PCA optimization using the following equivalence:
$$\Sigma = \sum_{i}^{n}{\vec{x}_i\vec{x}_i^T}$$
\textbf{Answer:}\\
$\because$ PCA is to choose the $\vec{w}^*$ to maximize the variant after projection $\vec{w}^T\Sigma\vec{w}=\lambda=\lambda_{max}$.\\
$\therefore$ they are equivalent.
\end{enumerate}
\section*{Problem 3}
You are to implement PCA from scratch and not use the MATLAB built in
functions. You are allowed to use SVD and/or eigenvalue decomposition. Please
submit a print out of your code aw well.
\begin{enumerate}
\item Plot the mean image of digit 1 and plot the first 5 global PCA vectors
corresponding to the dataset corresponding to two cases: using Gram
Matrix Trick and without Gram Matrix trick. Do not forget to remove
the mean before you run PCA. Measure the time taken for the PCA vector
computation in each case.\\
\lstinputlisting[language=Matlab]{./matlab/P_3_1.m}
\begin{center}
\includegraphics[width=0.9\textwidth]{./matlab/digit_1.eps}\\
The first row is the digit 1's mean image; the second row is the first 5 eigenvector images without Gram trick; the third row is the first 5 eigenvector images with Gram trick.
\end{center}
\item Comment on the time taken in each case. Does it make sense to use the
Gram trick for this particular dataset?\\
\textbf{Answer:}
\begin{itemize}
\item When samplenum=1000 (total number is 1000*10=10000): w/o Gram costs 0.100550s, and w/ Gram costs 4.579334s.
\item When samplenum=100 (total number is 100*10=1000): w/o Gram costs 0.037760s, and w/ Gram costs 0.038084s.
\item When samplenum=10 (total number is 10*10=100): w/o Gram costs 0.013963s, and w/ Gram costs 0.042149s.
\item When samplenum=1 (total number is 10*10=100): w/o Gram costs 0.008933s, and w/ Gram costs 0.003651s.
\end{itemize}
Therefore, if $N<<d$, Gram trick is helpful for the PCA algorithm speed.
\item Pick any random image from the dataset which can be any image from any
class and project onto the eigen space (the global PCA eigen space that
you obtained in part 1 of this problem) and reconstruct it using the first n
eigen vectors (not just the nth vector but all of them up until n) (for the
two cases, Gram trick and without it) where $n = \{1, 2, 5, 10, 20\}$. Do not
forget to remove the mean before projecting the image, and also to add it
back once it has been reconstructed using only the first n eigen vectors.
For each of the 5 reconstructions, compute the mean square error (MSE)
of the reconstructed image. Note that the MSE between two vectors $\vec{a}$
and $\vec{b}$ is given by
$$MSE(\vec{a},\vec{b})=||\vec{a}-\vec{b}||_2^2$$
Display all reconstructed images and the original in one figure, with the
corresponding MSE value as the title of each sub-figure\\
\lstinputlisting[language=Matlab]{./matlab/P_3_3.m}
\newpage
\begin{center}
\includegraphics[width=0.95\textwidth]{./matlab/WoG_N01.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WG_N01.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WoG_N02.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WG_N02.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WoG_N05.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WG_N05.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WoG_N10.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WG_N10.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WoG_N20.eps}
\includegraphics[width=0.95\textwidth]{./matlab/WG_N20.eps}
\end{center}
\end{enumerate}
\end{document}
|
{"hexsha": "43c29213f84471efdd405ffdd14fbf9119f6f008", "size": 12331, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "18794_Pattern_Recognition_Theory/Homeworks/HW2/document.tex", "max_stars_repo_name": "MengwenHe-CMU/Courses", "max_stars_repo_head_hexsha": "6cd9a9469b573ff76f70ceff6a0aa6103f7cdf3e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-11-23T21:15:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T02:50:37.000Z", "max_issues_repo_path": "18794_Pattern_Recognition_Theory/Homeworks/HW2/document.tex", "max_issues_repo_name": "MengwenHe-CMU/Courses", "max_issues_repo_head_hexsha": "6cd9a9469b573ff76f70ceff6a0aa6103f7cdf3e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "18794_Pattern_Recognition_Theory/Homeworks/HW2/document.tex", "max_forks_repo_name": "MengwenHe-CMU/Courses", "max_forks_repo_head_hexsha": "6cd9a9469b573ff76f70ceff6a0aa6103f7cdf3e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-02-12T02:48:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T02:50:40.000Z", "avg_line_length": 50.5368852459, "max_line_length": 336, "alphanum_fraction": 0.691995783, "num_tokens": 4074}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.