text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <boost/utility.hpp>
#include <gtest/gtest.h>
#include "kudu/util/bit-util.h"
namespace kudu {
TEST(BitUtil, TrailingBits) {
EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 0), 0);
EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 1), 1);
EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 64),
BOOST_BINARY(1 1 1 1 1 1 1 1));
EXPECT_EQ(BitUtil::TrailingBits(BOOST_BINARY(1 1 1 1 1 1 1 1), 100),
BOOST_BINARY(1 1 1 1 1 1 1 1));
EXPECT_EQ(BitUtil::TrailingBits(0, 1), 0);
EXPECT_EQ(BitUtil::TrailingBits(0, 64), 0);
EXPECT_EQ(BitUtil::TrailingBits(1LL << 63, 0), 0);
EXPECT_EQ(BitUtil::TrailingBits(1LL << 63, 63), 0);
EXPECT_EQ(BitUtil::TrailingBits(1LL << 63, 64), 1LL << 63);
}
} // namespace kudu
|
{"hexsha": "02f5b7c53b059ebe94dfa8c0da40c30be0251689", "size": 1594, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/kudu/util/bit-util-test.cc", "max_stars_repo_name": "AnupamaGupta01/kudu-1", "max_stars_repo_head_hexsha": "79ee29db5ac1b458468b11f16f57f124601788e6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2016-09-12T06:53:49.000Z", "max_stars_repo_stars_event_max_datetime": "2016-09-12T15:47:46.000Z", "max_issues_repo_path": "src/kudu/util/bit-util-test.cc", "max_issues_repo_name": "AnupamaGupta01/kudu-1", "max_issues_repo_head_hexsha": "79ee29db5ac1b458468b11f16f57f124601788e6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kudu/util/bit-util-test.cc", "max_forks_repo_name": "AnupamaGupta01/kudu-1", "max_forks_repo_head_hexsha": "79ee29db5ac1b458468b11f16f57f124601788e6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-09-04T01:45:03.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-04T01:45:03.000Z", "avg_line_length": 41.9473684211, "max_line_length": 72, "alphanum_fraction": 0.7114178168, "num_tokens": 495}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
import os
import glob
import fnmatch
from warnings import warn
import re
import zipfile
from six.moves import StringIO
import numpy as np
from pims.base_frames import FramesSequence
from pims.frame import Frame
from pims.utils.sort import natural_keys
from PIL import Image
# skimage.io.plugin_order() gives a nice hierarchy of implementations of imread.
# If skimage is not available, go down our own hard-coded hierarchy.
try:
from skimage.io import imread
except ImportError:
try:
from matplotlib.pyplot import imread
except ImportError:
from scipy.ndimage import imread
class ImageSequence(FramesSequence):
"""Read a directory of sequentially numbered image files into an
iterable that returns images as numpy arrays.
Parameters
----------
path_spec : string or iterable of strings
a directory or, safer, a pattern like path/to/images/*.png
which will ignore extraneous files or a list of files to open
in the order they should be loaded. When a path to a zipfile is
specified, all files in the zipfile will be loaded.
process_func : function, optional
callable with signalture `proc_img = process_func(img)`,
which will be applied to the data from each frame
dtype : numpy datatype, optional
Image arrays will be converted to this datatype.
as_grey : boolean, optional
Convert color images to greyscale. False by default.
May not be used in conjection with process_func.
plugin : string
Passed on to skimage.io.imread if scikit-image is available.
If scikit-image is not available, this will be ignored and a warning
will be issued. Not available in combination with zipfiles.
Examples
--------
>>> video = ImageSequence('path/to/images/*.png') # or *.tif, or *.jpg
>>> imshow(video[0]) # Show the first frame.
>>> imshow(video[-1]) # Show the last frame.
>>> imshow(video[1][0:10, 0:10]) # Show one corner of the second frame.
>>> for frame in video[:]:
... # Do something with every frame.
>>> for frame in video[10:20]:
... # Do something with frames 10-20.
>>> for frame in video[[5, 7, 13]]:
... # Do something with frames 5, 7, and 13.
>>> frame_count = len(video) # Number of frames in video
>>> frame_shape = video.frame_shape # Pixel dimensions of video
"""
def __init__(self, path_spec, process_func=None, dtype=None,
as_grey=False, plugin=None):
try:
import skimage
except ImportError:
if plugin is not None:
warn("A plugin was specified but ignored. Plugins can only "
"be specified if scikit-image is available. Instead, "
"ImageSequence will try using matplotlib and scipy "
"in that order.")
self.kwargs = dict()
else:
self.kwargs = dict(plugin=plugin)
self._is_zipfile = False
self._zipfile = None
self._get_files(path_spec)
tmp = self.imread(self._filepaths[0], **self.kwargs)
self._first_frame_shape = tmp.shape
self._validate_process_func(process_func)
self._as_grey(as_grey, process_func)
if dtype is None:
self._dtype = tmp.dtype
else:
self._dtype = dtype
def close(self):
if self._is_zipfile:
self._zipfile.close()
super(ImageSequence, self).close()
def __del__(self):
self.close()
def imread(self, filename, **kwargs):
if self._is_zipfile:
img = StringIO(self._zipfile.read(filename))
return np.array(Image.open(img))
else:
return imread(filename, **kwargs)
def _get_files(self, path_spec):
# deal with if input is _not_ a string
if not isinstance(path_spec, six.string_types):
# assume it is iterable and off we go!
self._filepaths = sorted(list(path_spec), key=natural_keys)
self._count = len(path_spec)
return
if zipfile.is_zipfile(path_spec):
self._is_zipfile = True
self.pathname = os.path.abspath(path_spec)
self._zipfile = zipfile.ZipFile(path_spec, 'r')
filepaths = [fn for fn in self._zipfile.namelist()
if fnmatch.fnmatch(fn, '*.*')]
self._filepaths = sorted(filepaths, key=natural_keys)
self._count = len(self._filepaths)
if 'plugin' in self.kwargs and self.kwargs['plugin'] is not None:
warn("A plugin cannot be combined with reading from an "
"archive. Extract it if you want to use the plugin.")
return
self.pathname = os.path.abspath(path_spec) # used by __repr__
if os.path.isdir(path_spec):
warn("Loading ALL files in this directory. To ignore extraneous "
"files, use a pattern like 'path/to/images/*.png'",
UserWarning)
directory = path_spec
filenames = os.listdir(directory)
make_full_path = lambda filename: (
os.path.abspath(os.path.join(directory, filename)))
filepaths = list(map(make_full_path, filenames))
else:
filepaths = glob.glob(path_spec)
self._filepaths = sorted(filepaths, key=natural_keys)
self._count = len(self._filepaths)
# If there were no matches, this was probably a user typo.
if self._count == 0:
raise IOError("No files were found matching that path.")
def get_frame(self, j):
if j > self._count:
raise ValueError("File does not contain this many frames")
res = self.imread(self._filepaths[j], **self.kwargs)
if res.dtype != self._dtype:
res = res.astype(self._dtype)
res = Frame(self.process_func(res), frame_no=j)
return res
def __len__(self):
return self._count
@property
def frame_shape(self):
return self._first_frame_shape
@property
def pixel_type(self):
return self._dtype
def __repr__(self):
# May be overwritten by subclasses
try:
source = self.pathname
except AttributeError:
source = '(list of images)'
return """<Frames>
Source: {pathname}
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count=len(self),
pathname=source,
dtype=self.pixel_type)
def filename_to_tzc(filename, identifiers=None):
""" Find ocurrences of z/t/c + number (e.g. t001, z06, c2)
in a filename and returns a list of [t, z, c] coordinates
Parameters
----------
filename : string
filename to be searched for t, z, c indices
identifiers : list of string, optional
3 strings preceding t, z, c indices, in that order
Returns
---------
list of int
t, z, c indices. Elements default to 0 when index was not found.
"""
if identifiers is None:
identifiers = tzc = ['t', 'z', 'c']
else:
tzc = [re.escape(a) for a in identifiers]
dimensions = re.findall(r'({0}|{1}|{2})(\d+)'.format(*tzc),
filename)
if len(dimensions) > 3:
dimensions = dimensions[-3:]
order = [a[0] for a in dimensions]
result = [0, 0, 0]
for (i, col) in enumerate(identifiers):
try:
result[i] = int(dimensions[order.index(col)][1])
except ValueError:
result[i] = 0
return result
class ImageSequence3D(ImageSequence):
"""Read a directory of (t, z, c) numbered image files into an
iterable that returns images as numpy arrays, indexed by t.
Parameters
----------
path_spec : string or iterable of strings
a directory or, safer, a pattern like path/to/images/*.png
which will ignore extraneous files or a list of files to open
in the order they should be loaded. When a path to a zipfile is
specified, all files in the zipfile will be loaded. The filenames
should contain the indices of T, Z and C, preceded by a dimension
identifier such as: 'file_t001c05z32'.
process_func : function, optional
callable with signalture `proc_img = process_func(img)`,
which will be applied to the data from each frame
dtype : numpy datatype, optional
Image arrays will be converted to this datatype.
as_grey : boolean, optional
Not implemented for 3D images.
plugin : string
Passed on to skimage.io.imread if scikit-image is available.
If scikit-image is not available, this will be ignored and a warning
will be issued. Not available in combination with zipfiles.
tzc_identifiers : list of string, optional
3 strings preceding t, z, c indices. Default ['t', 'z', 'c'].
"""
def __init__(self, path_spec, process_func=None, dtype=None,
as_grey=False, plugin=None, tzc_identifiers=None):
self.tzc_identifiers = tzc_identifiers
super(ImageSequence3D, self).__init__(path_spec, process_func,
dtype, as_grey, plugin)
def _get_files(self, path_spec):
super(ImageSequence3D, self)._get_files(path_spec)
self._toc = np.array([filename_to_tzc(f, self.tzc_identifiers) \
for f in self._filepaths])
for n in range(3):
self._toc[:, n] = self._toc[:, n] - min(self._toc[:, n])
self._filepaths = np.array(self._filepaths)
self._count = max(self._toc[:, 0]) + 1
self._sizeZ = max(self._toc[:, 1]) + 1
self._sizeC = max(self._toc[:, 2]) + 1
self._channel = list(range(self._sizeC))
def get_frame(self, j):
if j > self._count:
raise ValueError("File does not contain this many frames")
res = np.zeros((len(self._channel), self._sizeZ,
self._first_frame_shape[0],
self._first_frame_shape[1]),
dtype=self._dtype)
for (Nc, c) in enumerate(self._channel):
selector = np.logical_and(self._toc[:, 0] == j,
self._toc[:, 2] == c)
filelist = self._filepaths[selector]
for (z, loc) in enumerate(filelist):
res[Nc, z] = self.imread(loc, **self.kwargs)
return Frame(self.process_func(res.squeeze()), frame_no=j)
@property
def sizes(self):
return {'X': self._first_frame_shape[1],
'Y': self._first_frame_shape[0],
'Z': self._sizeZ,
'T': self._count,
'C': self._sizeC}
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
try:
channel = tuple(value)
except TypeError:
channel = tuple((value,))
if np.any(np.greater_equal(channel, self._sizeC)) or \
np.any(np.less(channel, 0)):
raise IndexError('Channel index out of bounds.')
self._channel = channel
def __repr__(self):
# May be overwritten by subclasses
try:
source = self.pathname
except AttributeError:
source = '(list of images)'
return """<Frames>
Source: {pathname}
SizeT: {count} frames
SizeZ: {Z} frames
SizeC: {C} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count=len(self),
pathname=source,
dtype=self.pixel_type,
C=self._sizeC,
Z=self._sizeZ)
|
{"hexsha": "0956f081f3a7cd47bb7756d6f2e400f9135daf44", "size": 12268, "ext": "py", "lang": "Python", "max_stars_repo_path": "pims/image_sequence.py", "max_stars_repo_name": "sciunto/pims", "max_stars_repo_head_hexsha": "c98edfc78b229fa55d506f5e4474b4fa8019743c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pims/image_sequence.py", "max_issues_repo_name": "sciunto/pims", "max_issues_repo_head_hexsha": "c98edfc78b229fa55d506f5e4474b4fa8019743c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pims/image_sequence.py", "max_forks_repo_name": "sciunto/pims", "max_forks_repo_head_hexsha": "c98edfc78b229fa55d506f5e4474b4fa8019743c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6208955224, "max_line_length": 80, "alphanum_fraction": 0.5919465276, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2765}
|
"""
Created: May 2018
@author: JerryX
Find more : https://www.zhihu.com/people/xu-jerry-82
"""
import numpy as np
class SGDOptimizer(object):
def __init__(self, optmParams, dataType):
# self.gamma, self.eps = optmParams
self.dataType = dataType
# self.isInited = False
# self.v=[]
# self.Iter = 0
# # lazy init
# def initV(self, w):
# if (False == self.isInited):
# for i in range(len(w)):
# self.v.append(np.zeros(w[i].shape, dtype=self.dataType))
# self.isInited = True
def getUpdWeights(self, w, dw, lr):
# self.initV(w)
# t = self.Iter + 1
wNew = []
for i in range(len(w)):
wi = self.OptimzSGD(w[i], dw[i], lr)
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzSGD(self, x, dx,lr):
# v = self.gamma * v + lr * dx
x += - lr * dx
return x
# Momentum优化类
class MomentumOptimizer(object):
def __init__(self, optmParams, dataType):
self.gamma, self.eps = optmParams
self.dataType = dataType
self.isInited = False
self.v=[]
# self.Iter = 0
# lazy init
def initV(self, w):
if (False == self.isInited):
for i in range(len(w)):
self.v.append(np.zeros(w[i].shape, dtype=self.dataType))
self.isInited = True
def getUpdWeights(self, w, dw, lr):
self.initV(w)
# t = self.Iter + 1
wNew = []
for i in range(len(w)):
wi, self.v[i] = self.OptimzMomentum(w[i], dw[i], self.v[i], lr)
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzMomentum(self, x, dx, v, lr):
v = self.gamma * v + lr * dx
x += - v
return x, v
# Nesterov's Accelerated Gradient
class NAGOptimizer(object):
def __init__(self, optmParams, dataType):
self.gamma, self.eps = optmParams
self.dataType = dataType
self.isInited = False
self.v=[]
# lazy init
def initV(self, w):
if (False == self.isInited):
for i in range(len(w)):
self.v.append( np.zeros(w[i].shape, dtype=self.dataType))
self.isInited = True
# w和dw都是元组类型
def getUpdWeights(self, w, dw, lr):
self.initV(w)
wNew = []
for i in range(len(w)):
wi, self.v[i] = self.OptimzNAG(w[i], dw[i], self.v[i], lr)
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzNAG(self, x, dx, v,lr):
vt = self.gamma * v + lr * dx
x += self.gamma* v - (1.+self.gamma) * vt
return x, vt
# 自适应学习率优化,完成更新
class AdagradOptimizer(object):
def __init__(self, optmParams, dataType):
self.eps = optmParams
self.dataType = dataType
self.isInited = False
self.g=[]
# lazy init
def initG(self, w):
if (False == self.isInited):
for i in range(len(w)):
self.g.append( np.zeros(w[i].shape, dtype=self.dataType))
self.isInited = True
# w和dw都是元组类型
def getUpdWeights(self, w, dw, lr):
self.initG(w)
wNew = []
for i in range(len(w)):
wi, self.g[i] = self.OptimzAdagrad(w[i], dw[i], self.g[i], lr)
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzAdagrad(self, x, dx, g,lr):
g += dx ** 2
x += - lr * dx / (np.sqrt(g) + self.eps)
return x, g
# RMSprop优化,完成更新
class RMSpropOptimizer(object):
def __init__(self, optmParams, dataType):
self.gamma, self.eps = optmParams
self.dataType = dataType
self.isInited = False
# self.g=[]
self.eg = []
# lazy init
def initEG(self, w):
if (False == self.isInited):
for i in range(len(w)):
# self.g.append( np.zeros(w[i].shape, dtype=self.dataType))
self.eg.append(np.zeros(w[i].shape, dtype=self.dataType))
self.isInited = True
# w和dw都是元组类型
def getUpdWeights(self, w, dw, lr):
self.initEG(w)
wNew = []
for i in range(len(w)):
wi, self.eg[i] = self.OptimzRMSprop(w[i], dw[i], self.eg[i], lr)
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzRMSprop(self, x, dx, eg,lr):
eg =self.gamma * eg + (1-self.gamma) * (dx** 2)
x += - lr * dx / (np.sqrt(eg) + self.eps)
return x, eg
# RMSprop优化,完成更新
class AdaDeltaOptimizer(object):
def __init__(self, optmParams, dataType):
self.gamma, self.eps = optmParams
self.dataType = dataType
self.isInited = False
self.eg=[]
self.etsq = []
self.dt = []
# lazy init
def initE(self, w):
if (False == self.isInited):
for i in range(len(w)):
self.eg.append(np.zeros(w[i].shape, dtype=self.dataType))
self.etsq.append(np.zeros(w[i].shape, dtype=self.dataType))
self.dt.append(np.zeros(w[i].shape, dtype=self.dataType))
self.isInited = True
# w和dw都是元组类型
def getUpdWeights(self, w, dw, lr):
self.initE(w)
wNew = []
for i in range(len(w)):
wi, self.eg[i],self.etsq[i],self.dt[i] = self.OptimzAdaDelta(w[i], dw[i], self.eg[i],self.etsq[i],self.dt[i])
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzAdaDelta(self, x, dx, eg,etsq, dt):
eg =self.gamma * eg + (1-self.gamma) * (dx** 2)
etsq=self.gamma * etsq + (1-self.gamma) * (dt** 2)
dt = np.sqrt( (etsq + self.eps)/(eg +self.eps)) * dx
x += - dt
return x, eg,etsq,dt
# 自适应矩估计优化类
class AdamOptimizer(object):
def __init__(self, optmParams, dataType):
self.beta1 ,self.beta2 , self.eps = optmParams
self.dataType = dataType
self.isInited = False
self.m=[]
self.v=[]
# self.m_w = []
# self.v_w = []
# self.m_b = []
# self.v_b = []
self.Iter = 0
# lazy init
def initMV(self, w):
if (False == self.isInited):
for i in range(len(w)):
self.m.append(np.zeros(w[i].shape, dtype=self.dataType))
self.v.append(np.zeros(w[i].shape, dtype=self.dataType))
# self.m_w = np.zeros(shapeW, dtype=self.dataType)
# self.v_w = np.zeros(shapeW, dtype=self.dataType)
# self.m_b = np.zeros(shapeB, dtype=self.dataType)
# self.v_b = np.zeros(shapeB, dtype=self.dataType)
self.isInited = True
def getUpdWeights(self, w, dw, lr):
self.initMV(w)
t = self.Iter + 1
wNew = []
for i in range(len(w)):
wi, self.m[i],self.v[i] = self.OptimzAdam(w[i], dw[i], self.m[i], self.v[i], lr, t)
wNew.append(wi)
# 转为元组输出
return tuple(wNew)
def OptimzAdam(self, x, dx, m, v, lr, t):
m = self.beta1 * m + (1 - self.beta1) * dx
mt = m / (1 - self.beta1 ** t)
v = self.beta2 * v + (1 - self.beta2) * (dx ** 2)
vt = v / (1 - self.beta2 ** t)
x += - lr * mt / (np.sqrt(vt) + self.eps)
return x, m, v
# 自适应矩估计优化类
# TODO 固定接受w,b需要改为更灵活的形式如 adagrad传入元组的方式
class AdamOptimizer_succ(object):
def __init__(self, beta1, beta2, eps, dataType):
self.beta1 = beta1
self.beta2 = beta2
self.eps = eps
self.dataType = dataType
self.isInited = False
self.m_w = []
self.v_w = []
self.m_b = []
self.v_b = []
self.Iter = 0
# lazy init
def initMV(self, shapeW, shapeB):
if (False == self.isInited):
self.m_w = np.zeros(shapeW, dtype=self.dataType)
self.v_w = np.zeros(shapeW, dtype=self.dataType)
self.m_b = np.zeros(shapeB, dtype=self.dataType)
self.v_b = np.zeros(shapeB, dtype=self.dataType)
self.isInited = True
def getUpdWeights(self, w, dw, b, db, lr):
self.initMV(w.shape, b.shape)
t = self.Iter + 1
wNew, self.m_w, self.v_w = self.OptimzAdam(w, dw, self.m_w, self.v_w, lr, t)
bNew, self.m_b, self.v_b = self.OptimzAdam(b, db, self.m_b, self.v_b, lr, t)
self.Iter += 1
return wNew, bNew
def OptimzAdam(self, x, dx, m, v, lr, t):
# beta1 = self.beta1
# beta2 = self.beta2
m = self.beta1 * m + (1 - self.beta1) * dx
mt = m / (1 - self.beta1 ** t)
v = self.beta2 * v + (1 - self.beta2) * (dx ** 2)
vt = v / (1 - self.beta2 ** t)
x += - lr * mt / (np.sqrt(vt) + self.eps)
return x, m, v
|
{"hexsha": "6984cdb067943c7f1d7c365258f92d014bbabfad", "size": 9075, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/xDLbase/optimizers.py", "max_stars_repo_name": "AskyJx/xDeepLearning", "max_stars_repo_head_hexsha": "98d875a34c6b4f4a51dcb825998028fd36260848", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2019-03-21T18:16:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T12:10:04.000Z", "max_issues_repo_path": "src/xDLbase/optimizers.py", "max_issues_repo_name": "AskyJx/xDeepLearning", "max_issues_repo_head_hexsha": "98d875a34c6b4f4a51dcb825998028fd36260848", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-17T10:09:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-10T11:38:29.000Z", "max_forks_repo_path": "src/xDLbase/optimizers.py", "max_forks_repo_name": "AskyJx/xDeepLearning", "max_forks_repo_head_hexsha": "98d875a34c6b4f4a51dcb825998028fd36260848", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-03T15:26:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-09T03:06:01.000Z", "avg_line_length": 29.1800643087, "max_line_length": 122, "alphanum_fraction": 0.5043526171, "include": true, "reason": "import numpy", "num_tokens": 2673}
|
# -*- coding: UTF-8 -*-
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid import layers
from paddlepalm.interface import task_paradigm
import numpy as np
import os
class TaskParadigm(task_paradigm):
'''
classification
'''
def __init__(self, config, phase, backbone_config=None):
self._is_training = phase == 'train'
self._hidden_size = backbone_config['hidden_size']
self.num_classes = config['n_classes']
if 'initializer_range' in config:
self._param_initializer = config['initializer_range']
else:
self._param_initializer = fluid.initializer.TruncatedNormal(
scale=backbone_config.get('initializer_range', 0.02))
if 'dropout_prob' in config:
self._dropout_prob = config['dropout_prob']
else:
self._dropout_prob = backbone_config.get('hidden_dropout_prob', 0.0)
self._pred_output_path = config.get('pred_output_path', None)
self._preds = []
@property
def inputs_attrs(self):
if self._is_training:
reader = {"label_ids": [[-1], 'int64']}
else:
reader = {}
bb = {"sentence_embedding": [[-1, self._hidden_size], 'float32']}
return {'reader': reader, 'backbone': bb}
@property
def outputs_attrs(self):
if self._is_training:
return {'loss': [[1], 'float32']}
else:
return {'logits': [[-1, self.num_classes], 'float32']}
def build(self, inputs, scope_name=''):
sent_emb = inputs['backbone']['sentence_embedding']
if self._is_training:
label_ids = inputs['reader']['label_ids']
cls_feats = fluid.layers.dropout(
x=sent_emb,
dropout_prob=self._dropout_prob,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=sent_emb,
size=self.num_classes,
param_attr=fluid.ParamAttr(
name=scope_name+"cls_out_w",
initializer=self._param_initializer),
bias_attr=fluid.ParamAttr(
name=scope_name+"cls_out_b", initializer=fluid.initializer.Constant(0.)))
if self._is_training:
inputs = fluid.layers.softmax(logits)
loss = fluid.layers.cross_entropy(
input=inputs, label=label_ids)
loss = layers.mean(loss)
return {"loss": loss}
else:
return {"logits":logits}
def postprocess(self, rt_outputs):
if not self._is_training:
logits = rt_outputs['logits']
preds = np.argmax(logits, -1)
self._preds.extend(preds.tolist())
def epoch_postprocess(self, post_inputs):
# there is no post_inputs needed and not declared in epoch_inputs_attrs, hence no elements exist in post_inputs
if not self._is_training:
if self._pred_output_path is None:
raise ValueError('argument pred_output_path not found in config. Please add it into config dict/file.')
with open(os.path.join(self._pred_output_path, 'predictions.json'), 'w') as writer:
for p in self._preds:
writer.write(str(p)+'\n')
print('Predictions saved at '+os.path.join(self._pred_output_path, 'predictions.json'))
|
{"hexsha": "2893dc33ce833f597d1f04311f8728d15112e606", "size": 4003, "ext": "py", "lang": "Python", "max_stars_repo_path": "paddlepalm/task_paradigm/cls.py", "max_stars_repo_name": "wangxiao1021/PALM", "max_stars_repo_head_hexsha": "f57e0efd68ac0bf5cb7545991a4eb9c43c29c21c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paddlepalm/task_paradigm/cls.py", "max_issues_repo_name": "wangxiao1021/PALM", "max_issues_repo_head_hexsha": "f57e0efd68ac0bf5cb7545991a4eb9c43c29c21c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paddlepalm/task_paradigm/cls.py", "max_forks_repo_name": "wangxiao1021/PALM", "max_forks_repo_head_hexsha": "f57e0efd68ac0bf5cb7545991a4eb9c43c29c21c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-28T02:08:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-28T02:08:09.000Z", "avg_line_length": 38.8640776699, "max_line_length": 119, "alphanum_fraction": 0.6270297277, "include": true, "reason": "import numpy", "num_tokens": 868}
|
# https://deeplearningcourses.com/c/data-science-natural-language-processing-in-python
# https://www.udemy.com/data-science-natural-language-processing-in-python
# Author: http://lazyprogrammer.me
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
# WHERE TO GET THE VECTORS:
# GloVe: https://nlp.stanford.edu/projects/glove/
# Direct link: http://nlp.stanford.edu/data/glove.6B.zip
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
def dist1(a, b):
return np.linalg.norm(a - b)
def dist2(a, b):
return 1 - a.dot(b) / (np.linalg.norm(a) * np.linalg.norm(b))
# pick a distance type
dist, metric = dist2, 'cosine'
# dist, metric = dist1, 'euclidean'
## more intuitive
# def find_analogies(w1, w2, w3):
# for w in (w1, w2, w3):
# if w not in word2vec:
# print("%s not in dictionary" % w)
# return
# king = word2vec[w1]
# man = word2vec[w2]
# woman = word2vec[w3]
# v0 = king - man + woman
# min_dist = float('inf')
# best_word = ''
# for word, v1 in iteritems(word2vec):
# if word not in (w1, w2, w3):
# d = dist(v0, v1)
# if d < min_dist:
# min_dist = d
# best_word = word
# print(w1, "-", w2, "=", best_word, "-", w3)
## faster
def find_analogies(w1, w2, w3):
for w in (w1, w2, w3):
if w not in word2vec:
print("%s not in dictionary" % w)
return
king = word2vec[w1]
man = word2vec[w2]
woman = word2vec[w3]
v0 = king - man + woman
distances = pairwise_distances(v0.reshape(1, D), embedding, metric=metric).reshape(V)
idxs = distances.argsort()[:4]
for idx in idxs:
word = idx2word[idx]
if word not in (w1, w2, w3):
best_word = word
break
print(w1, "-", w2, "=", best_word, "-", w3)
def nearest_neighbors(w, n=5):
if w not in word2vec:
print("%s not in dictionary:" % w)
return
v = word2vec[w]
distances = pairwise_distances(v.reshape(1, D), embedding, metric=metric).reshape(V)
idxs = distances.argsort()[1:n+1]
print("neighbors of: %s" % w)
for idx in idxs:
print("\t%s" % idx2word[idx])
# load in pre-trained word vectors
print('Loading word vectors...')
word2vec = {}
embedding = []
idx2word = []
with open('../large_files/glove.6B/glove.6B.50d.txt', encoding='utf-8') as f:
# is just a space-separated text file in the format:
# word vec[0] vec[1] vec[2] ...
for line in f:
values = line.split()
word = values[0]
vec = np.asarray(values[1:], dtype='float32')
word2vec[word] = vec
embedding.append(vec)
idx2word.append(word)
print('Found %s word vectors.' % len(word2vec))
embedding = np.array(embedding)
V, D = embedding.shape
find_analogies('king', 'man', 'woman')
find_analogies('france', 'paris', 'london')
find_analogies('france', 'paris', 'rome')
find_analogies('paris', 'france', 'italy')
find_analogies('france', 'french', 'english')
find_analogies('japan', 'japanese', 'chinese')
find_analogies('japan', 'japanese', 'italian')
find_analogies('japan', 'japanese', 'australian')
find_analogies('december', 'november', 'june')
find_analogies('miami', 'florida', 'texas')
find_analogies('einstein', 'scientist', 'painter')
find_analogies('china', 'rice', 'bread')
find_analogies('man', 'woman', 'she')
find_analogies('man', 'woman', 'aunt')
find_analogies('man', 'woman', 'sister')
find_analogies('man', 'woman', 'wife')
find_analogies('man', 'woman', 'actress')
find_analogies('man', 'woman', 'mother')
find_analogies('heir', 'heiress', 'princess')
find_analogies('nephew', 'niece', 'aunt')
find_analogies('france', 'paris', 'tokyo')
find_analogies('france', 'paris', 'beijing')
find_analogies('february', 'january', 'november')
find_analogies('france', 'paris', 'rome')
find_analogies('paris', 'france', 'italy')
nearest_neighbors('king')
nearest_neighbors('france')
nearest_neighbors('japan')
nearest_neighbors('einstein')
nearest_neighbors('woman')
nearest_neighbors('nephew')
nearest_neighbors('february')
nearest_neighbors('rome')
|
{"hexsha": "b5e60463de67b119fcc47ef8eb2785b6a6ca1f01", "size": 4118, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nlp_class2/pretrained_glove.py", "max_stars_repo_name": "JouniVatanen/NLP-and-Deep-Learning", "max_stars_repo_head_hexsha": "2fddcc2c39787713d33d17e80565de4ed073ca60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-24T06:55:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-24T06:55:31.000Z", "max_issues_repo_path": "Machine Learning/nlp_class2/pretrained_glove.py", "max_issues_repo_name": "Ashleshk/Machine-Learning-Data-Science-Deep-Learning", "max_issues_repo_head_hexsha": "03357ab98155bf73b8f1d2fd53255cc16bea2333", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Machine Learning/nlp_class2/pretrained_glove.py", "max_forks_repo_name": "Ashleshk/Machine-Learning-Data-Science-Deep-Learning", "max_forks_repo_head_hexsha": "03357ab98155bf73b8f1d2fd53255cc16bea2333", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-16T13:11:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-16T13:11:14.000Z", "avg_line_length": 28.5972222222, "max_line_length": 87, "alphanum_fraction": 0.6677999029, "include": true, "reason": "import numpy", "num_tokens": 1294}
|
import numpy as np
from scipy import signal
from sklearn.base import BaseEstimator, TransformerMixin
from mne.filter import filter_data, construct_iir_filter, create_filter
def is_filter_stable(a):
"""Check if iir filter is stable, not for fir filters
Parameters
----------
a: ndarray
a coefs from any iir filter
Returns
-------
bool: bool
True if stable
"""
return np.all(np.abs(np.roots(a)))
class OnlineConsecutiveFilter(BaseEstimator, TransformerMixin):
"""Online consecutive filter.
https://stackoverflow.com/questions/21862777/bandpass-butterworth-filter-frequencies-in-scipy
"""
def __init__(self, srate, filters, return_sos=True):
self.srate = srate
self.filters = filters
self.return_sos = return_sos
def _compute_filter(self, l, h, fs, order=3, return_sos=True):
"""Compute butterworth filter coeffs.
Parameters
----------
l : float|None
low cutoff frequency, if None return lowpass filter coeffs
h : float|None
high cutoff frequency, if None return highpass filter coeffs
fs : float
sampling rate
return_sos : bool, optional
if True, return sos coeffs, by default True
Returns
-------
coeff:
filter coeffs
coeff_zi:
zi coeffs used by lfilter
"""
output = 'sos' if return_sos else 'ba'
if h is None:
coeff = signal.butter(order, l,
btype='highpass', output=output, fs=fs)
elif l is None:
coeff = signal.butter(order, h,
btype='lowpass', output=output, fs=fs)
else:
coeff = signal.butter(order, [l, h],
btype='bandpass', output=output, fs=fs)
if return_sos:
coeff_zi = signal.sosfilt_zi(coeff)
else:
coeff_zi = signal.lfilter_zi(*coeff)
return coeff, coeff_zi
def fit(self, X, y=None):
"""Compatiable with sklearn , use X to scale zi
X of shape (Ne, ) initial sample, X must be 1d
"""
Ne = len(X)
self._coeffs = []
self._coeffs_zi = []
for lfreq, hfreq in self.filters:
coeffs, coeffs_zi = self._compute_filter(lfreq, hfreq,
self.srate, order=5, return_sos=self.return_sos)
self._coeffs.append(coeffs)
self._coeffs_zi.append([coeffs_zi.copy() * X[i] for i in range(Ne)])
return self
def _lfilter(self, x, coeffs, coeffs_zi, use_sos=True):
if use_sos:
y, zi = signal.sosfilt(coeffs, x, zi=coeffs_zi)
else:
y, zi = signal.lfilter(*coeffs, x, zi=coeffs_zi)
return y, zi
def transform(self, X):
Ne = len(X)
X_filt = np.zeros((len(self.filters), *X.shape))
for i in range(len(self.filters)):
for j in range(Ne):
X_filt[i, j], self._coeffs_zi[i][j] = self._lfilter(
X[j],
self._coeffs[i],
self._coeffs_zi[i][j],
use_sos=self.return_sos
)
return X_filt
class OnlineBlockFilter(BaseEstimator, TransformerMixin):
"""Online block filter.
https://stackoverflow.com/questions/21862777/bandpass-butterworth-filter-frequencies-in-scipy
"""
def __init__(self, srate, filters, use_reflect=False, return_sos=True):
self.srate = srate
self.filters = filters
self.use_reflect = use_reflect
self.return_sos = return_sos
def _compute_filter(self, l, h, fs, order=3, return_sos=True):
"""Compute butterworth filter coeffs.
Parameters
----------
l : float|None
low cutoff frequency, if None return lowpass filter coeffs
h : float|None
high cutoff frequency, if None return highpass filter coeffs
fs : float
sampling rate
order: int
order of butterworth filter
return_sos : bool, optional
if True, return sos coeffs, by default True
Returns
-------
coeff:
filter coeffs
coeff_zi:
zi coeffs used by lfilter
"""
output = 'sos' if return_sos else 'ba'
if h is None:
coeff = signal.butter(order, l,
btype='highpass', output=output, fs=fs)
elif l is None:
coeff = signal.butter(order, h,
btype='lowpass', output=output, fs=fs)
else:
coeff = signal.butter(order, [l, h],
btype='bandpass', output=output, fs=fs)
if return_sos:
coeff_zi = signal.sosfilt_zi(coeff)
else:
coeff_zi = signal.lfilter_zi(*coeff)
return coeff, coeff_zi
def fit(self, X=None, y=None):
"""Compatiable with sklearn , use X to scale zi in each block
"""
self._coeffs = []
self._coeffs_zi = []
for lfreq, hfreq in self.filters:
coeffs, coeffs_zi = self._compute_filter(lfreq, hfreq,
self.srate, order=5, return_sos=self.return_sos)
self._coeffs.append(coeffs)
self._coeffs_zi.append(coeffs_zi)
return self
def _lfilter(self, x, coeffs, coeffs_zi, use_sos=True):
if use_sos:
y, zi = signal.sosfilt(coeffs, x, zi=coeffs_zi)
else:
y, zi = signal.lfilter(*coeffs, x, zi=coeffs_zi)
return y, zi
def _reflect(self, X):
pre_X = X.copy()[..., ::-1]
double_X = np.concatenate((pre_X, X), axis=-1)
return double_X
def _unreflect(self, double_X):
Ns = double_X.shape[-1] // 2
X = double_X[..., Ns:]
return X
def transform(self, X):
X = np.reshape(X, (-1, *X.shape[-2:]))
if self.use_reflect:
X = self._reflect(X)
Nt, Ne, Ns = X.shape
X_filt = np.zeros((len(self.filters), Nt, Ne, Ns))
for i in range(len(self.filters)):
for j in range(Nt):
for k in range(Ne):
X_filt[i, j, k], _ = self._lfilter(
X[j, k],
self._coeffs[i],
self._coeffs_zi[i] * X[j, k, 0],
use_sos=self.return_sos
)
if self.use_reflect:
X_filt = self._unreflect(X_filt)
return X_filt
class BlockFilter(BaseEstimator, TransformerMixin):
def __init__(self, sfreq, filters):
self.sfreq = sfreq
self.filters = filters
def fit(self, X=None, y=None):
self.iir_params = []
iir_params = {
"order": 4,
"ftype": 'butter',
'output': 'sos'
}
for band in self.filters:
self.iir_params.append(
construct_iir_filter(iir_params, f_pass=band, f_stop=None, sfreq=self.sfreq, btype='bandpass',
verbose=False)
)
# for band in self.filters:
# self.hs_.append(
# create_filter(None, self.sfreq, band[0], band[1],
# method='iir', iir_params=construct_iir_filter(iir_params, f_pass=band, f_stop=None, sfreq=sfreq, btype='bandpass')
# )
# )
return self
def transform(self, X):
Xf = []
for band, iir_params in zip(self.filters, self.iir_params):
Xf.append(filter_data(X, self.sfreq, band[0], band[1], method='iir', iir_params=iir_params, verbose=False,
n_jobs=-1))
Xf = np.stack(Xf)
return Xf
|
{"hexsha": "49236f1b1680dde90093102ad4d8917ebb5d7e76", "size": 8197, "ext": "py", "lang": "Python", "max_stars_repo_path": "buttleworth.py", "max_stars_repo_name": "Rebell-Leader/mi-eeg-diploma", "max_stars_repo_head_hexsha": "a650c2e37e3c3f61196b8b9c39aef2c930529207", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "buttleworth.py", "max_issues_repo_name": "Rebell-Leader/mi-eeg-diploma", "max_issues_repo_head_hexsha": "a650c2e37e3c3f61196b8b9c39aef2c930529207", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "buttleworth.py", "max_forks_repo_name": "Rebell-Leader/mi-eeg-diploma", "max_forks_repo_head_hexsha": "a650c2e37e3c3f61196b8b9c39aef2c930529207", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4571428571, "max_line_length": 133, "alphanum_fraction": 0.5176284006, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1950}
|
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import f1_score, log_loss
from sklearn.model_selection import cross_val_score
from sklearn.neural_network import MLPClassifier
from data import load_test_data, write_accuracy, write_logloss, \
load_train_data_with_PCA_per_type
from visualize import plot_cnf
train_x, train_y, test_x, test_y, genres, scaler_rythym, scaler_chroma, scaler_mfcc = load_train_data_with_PCA_per_type()
neural = MLPClassifier(max_iter=400, random_state=2, hidden_layer_sizes=[40, 40])
neural.fit(train_x, train_y)
scores = cross_val_score(neural, train_x, train_y, cv=5, scoring='accuracy')
print("Cross val accuracy: ", scores.mean(), scores.std())
preds = neural.predict_proba(test_x)
preds = np.argmax(preds, axis=-1)
print('Test Set F-score = {0:.3f}'.format(f1_score(test_y, preds, average='weighted')))
predictions_on_train = neural.predict_proba(train_x)
log_loss_score = log_loss(train_y, predictions_on_train, eps=1e-15)
print("Train logloss:", log_loss_score)
plot_cnf(neural, test_x, test_y)
test_data = load_test_data()
rythym = test_data[:, :168]
chroma = test_data[:, 169:216]
mfcc = test_data[:, 217:]
rythym = scaler_rythym.fit_transform(rythym)
chroma = scaler_chroma.fit_transform(chroma)
mfcc = scaler_mfcc.fit_transform(mfcc)
rythym = preprocessing.normalize(rythym, norm='l2')
chroma = preprocessing.normalize(chroma, norm='l2')
mfcc = preprocessing.normalize(mfcc, norm='l2')
# rythym = pca_rythym.fit_transform(rythym)
# chroma = pca_chroma.fit_transform(chroma)
# mfcc = pca_mfcc.fit_transform(mfcc)
test_data = np.concatenate((rythym, chroma, mfcc), axis=1)
N = test_data.shape[0]
predictions = neural.predict(test_data)
predictions = predictions.reshape((predictions.shape[0], 1))
accuracy_data = predictions.astype(np.uint64)
write_accuracy(accuracy_data)
y_pred = neural.predict_proba(test_data)
write_logloss(y_pred)
# plt.show()
|
{"hexsha": "f96417fb9ac31e6a8839430f840236464e4bcf6e", "size": 1931, "ext": "py", "lang": "Python", "max_stars_repo_path": "neural_network.py", "max_stars_repo_name": "Oltier/ML-Music-genre-labeling", "max_stars_repo_head_hexsha": "749fc277136be1fbec2bda105921966fcda4082a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neural_network.py", "max_issues_repo_name": "Oltier/ML-Music-genre-labeling", "max_issues_repo_head_hexsha": "749fc277136be1fbec2bda105921966fcda4082a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neural_network.py", "max_forks_repo_name": "Oltier/ML-Music-genre-labeling", "max_forks_repo_head_hexsha": "749fc277136be1fbec2bda105921966fcda4082a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7288135593, "max_line_length": 121, "alphanum_fraction": 0.7861211807, "include": true, "reason": "import numpy", "num_tokens": 516}
|
# set the directory path
import os,sys
import os.path as path
abs_path_pkg = path.abspath(path.join(__file__ ,"../../../../"))
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, abs_path_pkg)
from Py_FS.datasets import get_dataset
from Py_FS.wrapper.population_based.get_algorithm import get_algorithm
from matplotlib import pyplot as plt
from matplotlib import cm
import numpy as np
def plot(algo_res, ax, color="r"):
avg_fitness = []
for cur in algo_res.history:
avg_fitness.append(np.mean(cur['fitness']))
ax.plot(np.arange(len(avg_fitness)), avg_fitness, c=color, label=algo_res.algo_name)
def main():
dataset_names = ['Glass', 'WaveformEW', 'SpectEW', 'Ionosphere', 'KrVsKpEW', 'Sonar', 'BreastEW', 'Madelon', 'Soybean-small', 'Lymphography', 'CongressEW', 'Monk1', 'Monk3', 'Zoo', 'Monk2', 'Tic-tac-toe', 'Iris', 'BreastCancer', 'Arrhythmia', 'Wine', 'Digits', 'PenglungEW', 'HeartEW', 'Hill-valley', 'Horse', 'M-of-n', 'Vote', 'Exactly', 'Vowel', 'Exactly2']
list_algo = ["BBA", "CS", "EO", "GA", "GSA", "GWO", "HS", "MA", "PSO", "RDA", "SCA", "WOA"]
for dataset_name in dataset_names:
# dataset_name = "BreastCancer"
data = get_dataset(dataset_name)
algo_res = {}
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
colors = iter(cm.rainbow(np.linspace(0, 1, len(list_algo))))
for algo_name, c in zip(list_algo, colors):
algo = get_algorithm(algo_name)
algo_def = algo(default_mode=True, verbose=False, num_agents=40, max_iter=100, train_data=data.data, train_label=data.target, save_conv_graph=False)
algo_res[algo_name] = algo_def.run()
plot(algo_res[algo_name], ax, c)
ax.set_title(dataset_name)
ax.legend(loc="best")
# plt.show()
fig.savefig(dir_path + "/test_results/test_" + dataset_name + ".jpg")
plt.close(fig)
if __name__ == "__main__":
main()
|
{"hexsha": "b4badc8333fcaf914e7a4fa207f5c23abd576377", "size": 1984, "ext": "py", "lang": "Python", "max_stars_repo_path": "Py_FS/wrapper/population_based/_test.py", "max_stars_repo_name": "rishavpramanik/Feature-Selection", "max_stars_repo_head_hexsha": "afe96cb8271f1e86a77075d19ec107c37afbbff3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-29T12:47:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T12:47:26.000Z", "max_issues_repo_path": "Py_FS/wrapper/population_based/_test.py", "max_issues_repo_name": "rishavpramanik/Feature-Selection", "max_issues_repo_head_hexsha": "afe96cb8271f1e86a77075d19ec107c37afbbff3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-21T09:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-15T06:58:06.000Z", "max_forks_repo_path": "Py_FS/wrapper/population_based/_test.py", "max_forks_repo_name": "rishavpramanik/Feature-Selection", "max_forks_repo_head_hexsha": "afe96cb8271f1e86a77075d19ec107c37afbbff3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-06-29T07:20:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-11T08:55:10.000Z", "avg_line_length": 41.3333333333, "max_line_length": 363, "alphanum_fraction": 0.6436491935, "include": true, "reason": "import numpy", "num_tokens": 557}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This Parameters module is a container for all possible parameters and all ways in which they are adapted
by various optimization methods.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = 'Sander van Rijn <svr003@gmail.com>'
from modea.Utils import initializable_parameters
import numpy as np
from numpy import abs, all, any, append, arange, ceil, diag, dot, exp, eye, floor, isfinite, isinf, isreal,\
ones, log, max, mean, median, mod, newaxis, outer, real, sqrt, square, sum, triu, zeros
from numpy.linalg import cond, eig, eigh, norm, LinAlgError
class BaseParameters(object):
"""
Data holder class for all hardcoded values that are independent of problem dimensionality
"""
### (1+1)-ES ###
c = 0.817 # Sigma adaptation factor
### CMA-ES ###
alpha_mu = 2
### (1+1)-Cholesky ES ###
p_target = 2/11
c_p = 1/12
p_thresh = 0.44
### (B)IPOP Restart parameters ###
tolfun = 1e-12
conditioncov = 1e14
tolupsigma = 1e20
class Parameters(BaseParameters):
"""
Data holder class that initializes *all* possible parameters, regardless of what functions/algorithm are used
If multiple functions/algorithms use the same parameter name, but differently, these will be split into
separate parameters.
:param n: Dimensionality of the problem to be solved
:param budget: Number of fitness evaluations the algorithm may perform
:param mu: Number of individuals that form the parents of each generation
:param lambda_: Number of individuals in the offspring of each generation
:param weights_option: String to determine which weignts to use.
Choose between ``default`` (CMA-ES) and ``1/n``
:param l_bound: Lower bound of the search space
:param u_bound: Upper bound of the search space
:param seq_cutoff: Minimal cut-off allowed in sequential selection
:param wcm: Initial weighted center of mass
:param active: Boolean switch on using an active update. Default: False
:param elitist: Boolean switch on using a (mu, l) strategy rather than (mu + l). Default: False
:param sequential: Boolean switch on using sequential evaluation. Default: False
:param tpa: Boolean switch on using two-point step-size adaptation. Default: False
:param values: Dictionary in the form of ``{'name': value}`` of initial values for allowed parameters.
Any values for names not in :data:`modea.Utils.initializable_parameters` are ignored.
"""
def __init__(self, n, budget, sigma=None,
mu=None, lambda_=None, weights_option=None, l_bound=None, u_bound=None, seq_cutoff=1, wcm=None,
active=False, elitist=False, local_restart=None, sequential=False, tpa=False,
values=None):
if lambda_ is None:
lambda_ = int(4 + floor(3 * log(n)))
eff_lambda = lambda_ - 2 if tpa else lambda_
if mu is None:
mu = 0.5
elif mu > lambda_:
raise Exception("mu ({}) cannot be greater than lambda ({})".format(mu, lambda_))
elif mu >= 1:
mu /= lambda_
if sigma is None:
sigma = 1
if l_bound is None or not isfinite(l_bound).all():
l_bound = ones((n,)) * -5
if u_bound is None or not isfinite(u_bound).all():
u_bound = ones((n,)) * 5
if seq_cutoff is None:
seq_cutoff = mu * eff_lambda
if wcm is None:
wcm = (np.random.randn(n,1) * (u_bound - l_bound)) + l_bound
### Basic parameters ###
self.n = n
self.budget = budget
self.mu = mu
self.lambda_ = lambda_
self.eff_lambda = eff_lambda
self.l_bound = l_bound
self.u_bound = u_bound
self.search_space_size = u_bound - l_bound
self.sigma = sigma
self.sigma_mean = sigma
self.active = active
self.elitist = elitist
self.local_restart = local_restart
self.sequential = sequential
self.seq_cutoff = seq_cutoff
self.tpa = tpa
self.weights_option = weights_option
self.weights = self.getWeights(weights_option)
self.mu_eff = 1 / sum(square(self.weights))
### Meta-parameters ###
self.N = 10 * self.n
### (1+1)-ES ###
self.success_history = zeros((self.N, ), dtype=np.int)
### CMA-ES ###
# Static
mu_eff = self.mu_eff # Local copy
self.c_sigma = (mu_eff + 2) / (mu_eff + n + 5)
self.c_c = (4 + mu_eff/n) / (n + 4 + 2*mu_eff/n)
self.c_1 = 2 / ((n + 1.3)**2 + mu_eff)
self.c_mu = min(1-self.c_1, self.alpha_mu*((mu_eff - 2 + 1/mu_eff) / ((n+2)**2 + self.alpha_mu*mu_eff/2)))
self.damps = 1 + 2*np.max([0, sqrt((mu_eff-1)/(n+1))-1]) + self.c_sigma
self.chiN = n**.5 * (1-1/(4*n)+1/(21*n**2)) # Expected random vector (or something like it)
# Dynamic
self.C = eye(n) # Covariance matrix
self.sqrt_C = eye(n)
self.B = eye(n) # Eigenvectors of C
self.D = ones((n,1)) # Diagonal eigenvalues of C
self.s_mean = None
self.p_sigma = zeros((n,1))
self.p_c = zeros((n,1))
self.weighted_mutation_vector = zeros((n,1)) # weighted average of the last generation of offset vectors
self.y_w_squared = zeros((n,1))
self.offspring = None
self.offset = None
self.all_offspring = None
self.wcm = wcm
self.wcm_old = None
### Threshold Convergence ###
# Static
self.diameter = sqrt(sum(square(self.search_space_size))) # Diameter of the search space
self.init_threshold = 0.2 # Guess value, not actually mentioned in paper
self.decay_factor = 0.995 # Determines curve of the decay. < 1: 'bulges', > 1: 'hollow'
# Dynamic
self.threshold = self.init_threshold * self.diameter * ((1-0) / 1)**self.decay_factor
## Active CMA-ES ##
if active:
self.c_c = 2 / (n+sqrt(2))**2
### Two Point Step Size Adaptation ###
# Static
self.alpha = 0.5
self.tpa_factor = 0.5
self.beta_tpa = 0
self.c_alpha = 0.3
# Dynamic
self.alpha_s = 0
self.tpa_result = None
### IPOP ###
self.last_pop = None
self.lambda_orig = self.lambda_large = self.lambda_small = self.lambda_
self.pop_inc_factor = 2
self.flat_fitness_index = int(min([ceil(0.1+self.lambda_/4.0), self.mu_int-1]))
self.nbin = 10 + int(ceil(30*n/lambda_))
self.histfunevals = zeros(self.nbin)
self.recent_best_fitnesses = [] # Contains the most recent best fitnesses of the 20 most recent generations
self.stagnation_list = [] # Contains median fitness of some recent generations
self.is_fitness_flat = False # (effectively) are all fitness values this generation equal?
self.max_iter = 100 + 50*(n+3)**2 / sqrt(lambda_)
self.tolx = 1e-12 * self.sigma
self.tolupx = 1e3 * self.sigma
self.values = values
if values: # Now we've had the default values, we change all values that were passed along
self.__init_values(values)
def getParameterOpts(self):
return {'n': self.n, 'budget': self.budget, 'sigma': self.sigma,
'mu': self.mu, 'lambda_': self.lambda_, 'weights_option': self.weights_option, 'l_bound': self.l_bound,
'u_bound': self.u_bound, 'seq_cutoff': self.seq_cutoff, 'wcm': self.wcm,
'active': self.active, 'elitist': self.elitist, 'local_restart': self.local_restart,
'sequential': self.sequential, 'tpa': self.tpa, 'values': self.values}
def __init_values(self, values):
"""
Dynamically initialize parameters in this parameter object based on the given dictionary
:param values: Dictionary in the form of ``{'name': value}`` of initial values for allowed parameters.
Any values for names not in :data:`modea.initializable_parameters` are ignored
"""
for name, value in list(values.items()):
if name in initializable_parameters:
setattr(self, name, value)
@property
def mu_int(self):
"""Integer value of mu"""
if self.eff_lambda < 1:
raise Exception("Effective lambda ({}) should be at least 1!".format(self.eff_lambda))
return int(1 + floor((self.eff_lambda-1) * self.mu))
def oneFifthRule(self, t):
"""
Adapts sigma based on the 1/5-th success rule
:param t: Number of evaluations used by the algorithm so far
"""
# Only adapt every n evaluations
if t % self.n != 0:
return
if t < self.N:
success = mean(self.success_history[:t])
else:
success = mean(self.success_history)
if success < 1/5:
self.sigma *= self.c
elif success > 1/5:
self.sigma /= self.c
self.sigma_mean = self.sigma
def addToSuccessHistory(self, t, success):
"""
Record the (boolean) ``success`` value at time ``t``
:param t: Number of evaluations used by the algorithm so far
:param success: Boolean that records whether the last update was a success
"""
t %= self.N
self.success_history[t] = 1 if success else 0
def addToFitnessHistory(self, fitness):
"""
Record the latest ``fitness`` value (with a history of 5 generations)
:param fitness: Fitness value to be recorded
"""
self.fitness_history.append(fitness)
if len(self.fitness_history) > 5:
self.fitness_history = self.fitness_history[1:]
def adaptCovarianceMatrix(self, evalcount):
"""
Adapt the covariance matrix according to the (Active-)CMA-ES.
:param evalcount: Number of evaluations used by the algorithm so far
"""
cc, cs, c_1, c_mu, n = self.c_c, self.c_sigma, self.c_1, self.c_mu, self.n
wcm, wcm_old, mueff, invsqrt_C = self.wcm, self.wcm_old, self.mu_eff, self.sqrt_C
lambda_ = self.lambda_
self.p_sigma = (1-cs) * self.p_sigma + \
sqrt(cs*(2-cs)*mueff) * dot(invsqrt_C, (wcm - wcm_old) / self.sigma)
power = (2*evalcount/lambda_)
if power < 1000: #TODO: Solve more neatly
hsig = sum(self.p_sigma**2)/(1-(1-cs)**power)/n < 2 + 4/(n+1)
else:
#Prevent underflow error,
hsig = sum(self.p_sigma**2)/n < 2 + 4/(n+1)
self.p_c = (1-cc) * self.p_c + hsig * sqrt(cc*(2-cc)*mueff) * (wcm - wcm_old) / self.sigma
offset = self.offset[:, :self.mu_int]
# Regular update of C
self.C = (1 - c_1 - c_mu) * self.C \
+ c_1 * (outer(self.p_c, self.p_c) + (1-hsig) * cc * (2-cc) * self.C) \
+ c_mu * dot(offset, self.weights*offset.T)
if self.active and len(self.all_offspring) >= 2*self.mu_int: # Active update of C
offset_bad = self.offset[:, -self.mu_int:]
self.C -= c_mu * dot(offset_bad, self.weights*offset_bad.T)
# Adapt step size sigma
if self.tpa:
alpha_act = self.tpa_result * self.alpha
alpha_act += self.beta_tpa if self.tpa_result > 1 else 0
self.alpha_s += self.c_alpha * (alpha_act - self.alpha_s)
self.sigma *= exp(self.alpha_s)
else:
exponent = (norm(self.p_sigma) / self.chiN - 1) * self.c_sigma / self.damps
if exponent < 1000: #TODO: Solve more neatly
self.sigma = self.sigma * exp(exponent)
else:
self.sigma = self.sigma_mean
self.sigma_mean = self.sigma
### Update BD ###
C = self.C # lastest setting for
C = triu(C) + triu(C, 1).T # eigen decomposition
degenerated = False
if any(isinf(C)) > 1: # interval
degenerated = True
# raise Exception("Values in C are infinite")
elif not 1e-16 < self.sigma_mean < 1e6:
degenerated = True
else:
try:
w, e_vector = eigh(C)
e_value = sqrt(list(map(complex, w))).reshape(-1, 1)
if any(~isreal(e_value)):
degenerated = True
# raise Exception("Eigenvalues of C are not real")
elif any(isinf(e_value)):
degenerated = True
# raise Exception("Eigenvalues of C are infinite")
else:
self.D = real(e_value)
self.B = e_vector
self.sqrt_C = dot(e_vector, e_value**-1 * e_vector.T)
except LinAlgError as e:
# raise Exception(e)
print("Restarting, degeneration detected: {}".format(e))
degenerated = True
if degenerated:
self.restart()
def checkDegenerated(self):
"""
Check if the parameters (C, s_mean, etc) have degenerated and need to be reset.
Designed for use by a CMA ES
"""
degenerated = False
if np.min(isfinite(self.C)) == 0:
degenerated = True
elif not ((10**(-16)) < self.sigma_mean < (10**16)):
degenerated = True
else:
self.D, self.B = eig(self.C)
self.D = sqrt(self.D)
self.D.shape = (self.n,1) # Force D to be a column vector
if not isreal(self.D).all():
degenerated = True
if degenerated:
self.restart()
def getWeights(self, weights_option=None):
"""
Defines a list of weights to be used in weighted recombination. Available options are:
* ``1/n``: Each weight is set to 1/n
* ``1/2^n``: Each weight is set to 1/2^i + (1/2^n)/mu
* ``default``: Each weight is set to log((lambda-1)/2) - log(i)
:param weights_option: String to indicate which weights should be used.
:returns: Returns a np.array of weights, adding to 1
"""
mu = self.mu_int
if weights_option == '1/n':
weights = ones((mu, 1)) * (1/mu)
elif weights_option == '1/2^n':
# The idea here is to give weights (1/2, 1/4, ..., 1/2**mu) + (1/2**mu / mu) so it all sums to 1
leftover = (1 / (2**mu)) / mu
weights = 1 / 2**arange(1, mu+1) + leftover
weights.shape = (mu, 1)
else:
_mu_prime = (self.lambda_-1) / 2.0
weights = log(_mu_prime+1.0)-log(arange(1, mu+1)[:, newaxis])
weights = weights / sum(weights)
return weights
def updateThreshold(self, t):
"""
Update the threshold that is used to maintain a minimum stepsize.
Taken from: Evolution Strategies with Thresheld Convergence (CEC 2015)
:param t: Ammount of the evaluation budget spent
"""
budget = self.budget
self.threshold = self.init_threshold * self.diameter * ((budget-t) / self.budget)**self.decay_factor
def restart(self):
"""
Very basic restart, done by resetting some of the variables for CMA-ES
"""
n = self.n
self.C = eye(n)
self.B = eye(n)
self.D = ones((n,1))
self.p_sigma = zeros((n, 1))
self.sigma_mean = self.sigma = 1 # TODO: make this depend on any input default sigma value
# TODO: add feedback of resetting sigma to the sigma per individual
def recordRecentFitnessValues(self, evalcount, fitnesses):
"""
Record recent fitness values at current budget
"""
self.histfunevals[int(mod(evalcount/self.lambda_-1, self.nbin))] = min(fitnesses)
self.recent_best_fitnesses.append(min(fitnesses))
self.recent_best_fitnesses = self.recent_best_fitnesses[-20:]
self.stagnation_list.append(median(fitnesses))
self.stagnation_list = self.stagnation_list[-int(ceil(0.2*evalcount + 120 + 30*self.n/self.lambda_)):]
flat_fitness_index = min(len(fitnesses)-1, self.flat_fitness_index)
self.is_fitness_flat = min(fitnesses) == sorted(fitnesses)[flat_fitness_index]
def checkLocalRestartConditions(self, evalcount):
"""
Check for local restart conditions according to (B)IPOP
:param evalcount: Counter for the current generation
:returns: Boolean value ``restart_required``, True if a restart should be performed
"""
if not self.local_restart:
return False
debug = False
restart_required = False
diagC = diag(self.C).reshape(-1, 1)
tmp = append(abs(self.p_c), sqrt(diagC), axis=1)
a = int(mod(evalcount/self.lambda_-1, self.n))
# TolX
if all(self.sigma*(max(tmp, axis=1)) < self.tolx):
if debug:
print('TolX')
restart_required = True
# TolUPX
elif any(self.sigma*sqrt(diagC)) > self.tolupx:
if debug:
print('TolUPX')
restart_required = True
# No effective axis
elif all(0.1*self.sigma*self.D[a, 0]*self.B[:, a] + self.wcm == self.wcm):
if debug:
print('noeffectaxis')
restart_required = True
# No effective coordinate
elif any(0.2*self.sigma*sqrt(diagC) + self.wcm == self.wcm):
if debug:
print('noeffectcoord')
restart_required = True
# Condition of C
elif cond(self.C) > self.conditioncov:
if debug:
print('condcov')
restart_required = True
elif mod(evalcount, self.lambda_) == self.nbin and \
max(self.histfunevals) - min(self.histfunevals) < self.tolfun:
if debug:
print('tolfun')
restart_required = True
# Adjust step size in case of equal function values
elif self.is_fitness_flat:
if debug:
print('flatfitness')
restart_required = True
# A mismatch between sigma increase and decrease of all eigenvalues in C
elif self.sigma / 1 > self.tolupsigma*max(self.D):
if debug:
print('tolupsigma')
restart_required = True
# Stagnation, median of most recent 20 best values is no better than that of the oldest 20 medians/generation
elif len(self.stagnation_list) > 20 and len(self.recent_best_fitnesses) > 20 and \
median(self.stagnation_list[:20]) > median(self.recent_best_fitnesses):
if debug:
print('stagnation')
restart_required = True
return restart_required
|
{"hexsha": "e23d1a6a45b58f83d5597d3629298e8757e20787", "size": 19432, "ext": "py", "lang": "Python", "max_stars_repo_path": "modea/Parameters.py", "max_stars_repo_name": "sjvrijn/ModEA", "max_stars_repo_head_hexsha": "c59f77bacd460cdff5e2d05f20c5bb65efa07c50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-12T07:45:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-16T15:52:35.000Z", "max_issues_repo_path": "modea/Parameters.py", "max_issues_repo_name": "sjvrijn/ModEA", "max_issues_repo_head_hexsha": "c59f77bacd460cdff5e2d05f20c5bb65efa07c50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-03-02T18:54:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-04T09:50:53.000Z", "max_forks_repo_path": "modea/Parameters.py", "max_forks_repo_name": "sjvrijn/ModEA", "max_forks_repo_head_hexsha": "c59f77bacd460cdff5e2d05f20c5bb65efa07c50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-20T16:55:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-27T12:28:23.000Z", "avg_line_length": 38.2519685039, "max_line_length": 119, "alphanum_fraction": 0.5718402635, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4835}
|
[GOAL]
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
⊢ Quotient.mk IsometryRel.setoid p = toGHSpace X ↔ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑p
[PROOFSTEP]
simp only [toGHSpace, Quotient.eq]
[GOAL]
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
⊢ p ≈ NonemptyCompacts.kuratowskiEmbedding X ↔ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑p
[PROOFSTEP]
refine' ⟨fun h => _, _⟩
[GOAL]
case refine'_1
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
h : p ≈ NonemptyCompacts.kuratowskiEmbedding X
⊢ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑p
[PROOFSTEP]
rcases Setoid.symm h with ⟨e⟩
[GOAL]
case refine'_1.intro
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
h : p ≈ NonemptyCompacts.kuratowskiEmbedding X
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ p }
⊢ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑p
[PROOFSTEP]
have f := (kuratowskiEmbedding.isometry X).isometryEquivOnRange.trans e
[GOAL]
case refine'_1.intro
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
h : p ≈ NonemptyCompacts.kuratowskiEmbedding X
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ p }
f : X ≃ᵢ { x // x ∈ p }
⊢ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑p
[PROOFSTEP]
use fun x => f x, isometry_subtype_coe.comp f.isometry
[GOAL]
case right
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
h : p ≈ NonemptyCompacts.kuratowskiEmbedding X
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ p }
f : X ≃ᵢ { x // x ∈ p }
⊢ (range fun x => ↑(↑f x)) = ↑p
[PROOFSTEP]
erw [range_comp, f.range_eq_univ, Set.image_univ, Subtype.range_coe]
[GOAL]
case refine'_2
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
⊢ (∃ Ψ, Isometry Ψ ∧ range Ψ = ↑p) → p ≈ NonemptyCompacts.kuratowskiEmbedding X
[PROOFSTEP]
rintro ⟨Ψ, ⟨isomΨ, rangeΨ⟩⟩
[GOAL]
case refine'_2.intro.intro
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
isomΨ : Isometry Ψ
rangeΨ : range Ψ = ↑p
⊢ p ≈ NonemptyCompacts.kuratowskiEmbedding X
[PROOFSTEP]
have f := ((kuratowskiEmbedding.isometry X).isometryEquivOnRange.symm.trans isomΨ.isometryEquivOnRange).symm
[GOAL]
case refine'_2.intro.intro
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
isomΨ : Isometry Ψ
rangeΨ : range Ψ = ↑p
f : ↑(range Ψ) ≃ᵢ ↑(range (kuratowskiEmbedding X))
⊢ p ≈ NonemptyCompacts.kuratowskiEmbedding X
[PROOFSTEP]
have E : (range Ψ ≃ᵢ NonemptyCompacts.kuratowskiEmbedding X) = (p ≃ᵢ range (kuratowskiEmbedding X)) := by
dsimp only [NonemptyCompacts.kuratowskiEmbedding]; rw [rangeΨ]; rfl
[GOAL]
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
isomΨ : Isometry Ψ
rangeΨ : range Ψ = ↑p
f : ↑(range Ψ) ≃ᵢ ↑(range (kuratowskiEmbedding X))
⊢ (↑(range Ψ) ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X }) =
({ x // x ∈ p } ≃ᵢ ↑(range (kuratowskiEmbedding X)))
[PROOFSTEP]
dsimp only [NonemptyCompacts.kuratowskiEmbedding]
[GOAL]
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
isomΨ : Isometry Ψ
rangeΨ : range Ψ = ↑p
f : ↑(range Ψ) ≃ᵢ ↑(range (kuratowskiEmbedding X))
⊢ (↑(range Ψ) ≃ᵢ
{ x //
x ∈
{
toCompacts :=
{ carrier := range (kuratowskiEmbedding X),
isCompact' := (_ : IsCompact (range (kuratowskiEmbedding X))) },
nonempty' := (_ : Set.Nonempty (range fun y => kuratowskiEmbedding X y)) } }) =
({ x // x ∈ p } ≃ᵢ ↑(range (kuratowskiEmbedding X)))
[PROOFSTEP]
rw [rangeΨ]
[GOAL]
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
isomΨ : Isometry Ψ
rangeΨ : range Ψ = ↑p
f : ↑(range Ψ) ≃ᵢ ↑(range (kuratowskiEmbedding X))
⊢ (↑↑p ≃ᵢ
{ x //
x ∈
{
toCompacts :=
{ carrier := range (kuratowskiEmbedding X),
isCompact' := (_ : IsCompact (range (kuratowskiEmbedding X))) },
nonempty' := (_ : Set.Nonempty (range fun y => kuratowskiEmbedding X y)) } }) =
({ x // x ∈ p } ≃ᵢ ↑(range (kuratowskiEmbedding X)))
[PROOFSTEP]
rfl
[GOAL]
case refine'_2.intro.intro
X : Type u
inst✝² : MetricSpace X
inst✝¹ : CompactSpace X
inst✝ : Nonempty X
p : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
isomΨ : Isometry Ψ
rangeΨ : range Ψ = ↑p
f : ↑(range Ψ) ≃ᵢ ↑(range (kuratowskiEmbedding X))
E :
(↑(range Ψ) ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X }) =
({ x // x ∈ p } ≃ᵢ ↑(range (kuratowskiEmbedding X)))
⊢ p ≈ NonemptyCompacts.kuratowskiEmbedding X
[PROOFSTEP]
exact ⟨cast E f⟩
[GOAL]
p : GHSpace
⊢ toGHSpace (Rep p) = p
[PROOFSTEP]
change toGHSpace (Quot.out p : NonemptyCompacts ℓ_infty_ℝ) = p
[GOAL]
p : GHSpace
⊢ toGHSpace { x // x ∈ Quot.out p } = p
[PROOFSTEP]
rw [← eq_toGHSpace]
[GOAL]
p : GHSpace
⊢ Quotient.mk IsometryRel.setoid (Quot.out p) = p
[PROOFSTEP]
exact Quot.out_eq p
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
⊢ toGHSpace X = toGHSpace Y → Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
simp only [toGHSpace]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
⊢ Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding X) =
Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding Y) →
Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
rw [Quotient.eq]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
⊢ NonemptyCompacts.kuratowskiEmbedding X ≈ NonemptyCompacts.kuratowskiEmbedding Y → Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
rintro ⟨e⟩
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }
⊢ Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
have I :
(NonemptyCompacts.kuratowskiEmbedding X ≃ᵢ NonemptyCompacts.kuratowskiEmbedding Y) =
(range (kuratowskiEmbedding X) ≃ᵢ range (kuratowskiEmbedding Y)) :=
by dsimp only [NonemptyCompacts.kuratowskiEmbedding]; rfl
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }
⊢ ({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }) =
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y)))
[PROOFSTEP]
dsimp only [NonemptyCompacts.kuratowskiEmbedding]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }
⊢ ({ x //
x ∈
{
toCompacts :=
{ carrier := range (kuratowskiEmbedding X),
isCompact' := (_ : IsCompact (range (kuratowskiEmbedding X))) },
nonempty' := (_ : Set.Nonempty (range fun y => kuratowskiEmbedding X y)) } } ≃ᵢ
{ x //
x ∈
{
toCompacts :=
{ carrier := range (kuratowskiEmbedding Y),
isCompact' := (_ : IsCompact (range (kuratowskiEmbedding Y))) },
nonempty' := (_ : Set.Nonempty (range fun y => kuratowskiEmbedding Y y)) } }) =
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y)))
[PROOFSTEP]
rfl
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }
I :
({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }) =
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y)))
⊢ Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
have f := (kuratowskiEmbedding.isometry X).isometryEquivOnRange
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }
I :
({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }) =
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y)))
f : X ≃ᵢ ↑(range (kuratowskiEmbedding X))
⊢ Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
have g := (kuratowskiEmbedding.isometry Y).isometryEquivOnRange.symm
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : { x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }
I :
({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y }) =
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y)))
f : X ≃ᵢ ↑(range (kuratowskiEmbedding X))
g : ↑(range (kuratowskiEmbedding Y)) ≃ᵢ Y
⊢ Nonempty (X ≃ᵢ Y)
[PROOFSTEP]
exact ⟨f.trans <| (cast I e).trans g⟩
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
⊢ Nonempty (X ≃ᵢ Y) → toGHSpace X = toGHSpace Y
[PROOFSTEP]
rintro ⟨e⟩
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
⊢ toGHSpace X = toGHSpace Y
[PROOFSTEP]
simp only [toGHSpace, Quotient.eq']
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
⊢ Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding X) =
Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding Y)
[PROOFSTEP]
have f := (kuratowskiEmbedding.isometry X).isometryEquivOnRange.symm
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
f : ↑(range (kuratowskiEmbedding X)) ≃ᵢ X
⊢ Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding X) =
Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding Y)
[PROOFSTEP]
have g := (kuratowskiEmbedding.isometry Y).isometryEquivOnRange
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
f : ↑(range (kuratowskiEmbedding X)) ≃ᵢ X
g : Y ≃ᵢ ↑(range (kuratowskiEmbedding Y))
⊢ Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding X) =
Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding Y)
[PROOFSTEP]
have I :
(range (kuratowskiEmbedding X) ≃ᵢ range (kuratowskiEmbedding Y)) =
(NonemptyCompacts.kuratowskiEmbedding X ≃ᵢ NonemptyCompacts.kuratowskiEmbedding Y) :=
by dsimp only [NonemptyCompacts.kuratowskiEmbedding]; rfl
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
f : ↑(range (kuratowskiEmbedding X)) ≃ᵢ X
g : Y ≃ᵢ ↑(range (kuratowskiEmbedding Y))
⊢ (↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y))) =
({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y })
[PROOFSTEP]
dsimp only [NonemptyCompacts.kuratowskiEmbedding]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
f : ↑(range (kuratowskiEmbedding X)) ≃ᵢ X
g : Y ≃ᵢ ↑(range (kuratowskiEmbedding Y))
⊢ (↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y))) =
({ x //
x ∈
{
toCompacts :=
{ carrier := range (kuratowskiEmbedding X),
isCompact' := (_ : IsCompact (range (kuratowskiEmbedding X))) },
nonempty' := (_ : Set.Nonempty (range fun y => kuratowskiEmbedding X y)) } } ≃ᵢ
{ x //
x ∈
{
toCompacts :=
{ carrier := range (kuratowskiEmbedding Y),
isCompact' := (_ : IsCompact (range (kuratowskiEmbedding Y))) },
nonempty' := (_ : Set.Nonempty (range fun y => kuratowskiEmbedding Y y)) } })
[PROOFSTEP]
rfl
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
f : ↑(range (kuratowskiEmbedding X)) ≃ᵢ X
g : Y ≃ᵢ ↑(range (kuratowskiEmbedding Y))
I :
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y))) =
({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y })
⊢ Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding X) =
Quotient.mk IsometryRel.setoid (NonemptyCompacts.kuratowskiEmbedding Y)
[PROOFSTEP]
rw [Quotient.eq]
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
e : X ≃ᵢ Y
f : ↑(range (kuratowskiEmbedding X)) ≃ᵢ X
g : Y ≃ᵢ ↑(range (kuratowskiEmbedding Y))
I :
(↑(range (kuratowskiEmbedding X)) ≃ᵢ ↑(range (kuratowskiEmbedding Y))) =
({ x // x ∈ NonemptyCompacts.kuratowskiEmbedding X } ≃ᵢ { x // x ∈ NonemptyCompacts.kuratowskiEmbedding Y })
⊢ NonemptyCompacts.kuratowskiEmbedding X ≈ NonemptyCompacts.kuratowskiEmbedding Y
[PROOFSTEP]
exact ⟨cast I ((f.trans e).trans g)⟩
[GOAL]
p q : GHSpace
⊢ dist p q = ghDist (GHSpace.Rep p) (GHSpace.Rep q)
[PROOFSTEP]
rw [ghDist, p.toGHSpace_rep, q.toGHSpace_rep]
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
rcases exists_mem_of_nonempty X with ⟨xX, _⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
let s : Set γ := range Φ ∪ range Ψ
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
let Φ' : X → Subtype s := fun y => ⟨Φ y, mem_union_left _ (mem_range_self _)⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
let Ψ' : Y → Subtype s := fun y => ⟨Ψ y, mem_union_right _ (mem_range_self _)⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
have IΦ' : Isometry Φ' := fun x y => ha x y
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
have IΨ' : Isometry Ψ' := fun x y => hb x y
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
have : IsCompact s := (isCompact_range ha.continuous).union (isCompact_range hb.continuous)
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this : IsCompact s
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
letI : MetricSpace (Subtype s) := by infer_instance
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this : IsCompact s
⊢ MetricSpace (Subtype s)
[PROOFSTEP]
infer_instance
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝ : IsCompact s
this : MetricSpace (Subtype s) := inferInstance
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
haveI : CompactSpace (Subtype s) := ⟨isCompact_iff_isCompact_univ.1 ‹IsCompact s›⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝¹ : IsCompact s
this✝ : MetricSpace (Subtype s) := inferInstance
this : CompactSpace (Subtype s)
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
haveI : Nonempty (Subtype s) := ⟨Φ' xX⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
have ΦΦ' : Φ = Subtype.val ∘ Φ' := by funext; rfl
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
⊢ Φ = Subtype.val ∘ Φ'
[PROOFSTEP]
funext
[GOAL]
case h
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
x✝ : X
⊢ Φ x✝ = (Subtype.val ∘ Φ') x✝
[PROOFSTEP]
rfl
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
have ΨΨ' : Ψ = Subtype.val ∘ Ψ' := by funext; rfl
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
⊢ Ψ = Subtype.val ∘ Ψ'
[PROOFSTEP]
funext
[GOAL]
case h
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
x✝ : Y
⊢ Ψ x✝ = (Subtype.val ∘ Ψ') x✝
[PROOFSTEP]
rfl
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
have : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ') :=
by
rw [ΦΦ', ΨΨ', range_comp, range_comp]
exact hausdorffDist_image isometry_subtype_coe
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
⊢ hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
[PROOFSTEP]
rw [ΦΦ', ΨΨ', range_comp, range_comp]
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝² : IsCompact s
this✝¹ : MetricSpace (Subtype s) := inferInstance
this✝ : CompactSpace (Subtype s)
this : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
⊢ hausdorffDist (Subtype.val '' range Φ') (Subtype.val '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
[PROOFSTEP]
exact hausdorffDist_image isometry_subtype_coe
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝³ : IsCompact s
this✝² : MetricSpace (Subtype s) := inferInstance
this✝¹ : CompactSpace (Subtype s)
this✝ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
⊢ ghDist X Y ≤ hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
rw [this]
-- Embed `s` in `ℓ^∞(ℝ)` through its Kuratowski embedding
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝³ : IsCompact s
this✝² : MetricSpace (Subtype s) := inferInstance
this✝¹ : CompactSpace (Subtype s)
this✝ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
⊢ ghDist X Y ≤ hausdorffDist (range Φ') (range Ψ')
[PROOFSTEP]
let F := kuratowskiEmbedding (Subtype s)
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝³ : IsCompact s
this✝² : MetricSpace (Subtype s) := inferInstance
this✝¹ : CompactSpace (Subtype s)
this✝ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
⊢ ghDist X Y ≤ hausdorffDist (range Φ') (range Ψ')
[PROOFSTEP]
have : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ') :=
hausdorffDist_image (kuratowskiEmbedding.isometry _)
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
⊢ ghDist X Y ≤ hausdorffDist (range Φ') (range Ψ')
[PROOFSTEP]
rw [← this]
-- Let `A` and `B` be the images of `X` and `Y` under this embedding. They are in `ℓ^∞(ℝ)`, and
-- their Hausdorff distance is the same as in the original space.
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
⊢ ghDist X Y ≤ hausdorffDist (F '' range Φ') (F '' range Ψ')
[PROOFSTEP]
let A : NonemptyCompacts ℓ_infty_ℝ :=
⟨⟨F '' range Φ', (isCompact_range IΦ'.continuous).image (kuratowskiEmbedding.isometry _).continuous⟩,
(range_nonempty _).image _⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
⊢ ghDist X Y ≤ hausdorffDist (F '' range Φ') (F '' range Ψ')
[PROOFSTEP]
let B : NonemptyCompacts ℓ_infty_ℝ :=
⟨⟨F '' range Ψ', (isCompact_range IΨ'.continuous).image (kuratowskiEmbedding.isometry _).continuous⟩,
(range_nonempty _).image _⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
⊢ ghDist X Y ≤ hausdorffDist (F '' range Φ') (F '' range Ψ')
[PROOFSTEP]
have AX : ⟦A⟧ = toGHSpace X := by
rw [eq_toGHSpace_iff]
exact ⟨fun x => F (Φ' x), (kuratowskiEmbedding.isometry _).comp IΦ', range_comp _ _⟩
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
⊢ Quotient.mk IsometryRel.setoid A = toGHSpace X
[PROOFSTEP]
rw [eq_toGHSpace_iff]
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
⊢ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑A
[PROOFSTEP]
exact ⟨fun x => F (Φ' x), (kuratowskiEmbedding.isometry _).comp IΦ', range_comp _ _⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
⊢ ghDist X Y ≤ hausdorffDist (F '' range Φ') (F '' range Ψ')
[PROOFSTEP]
have BY : ⟦B⟧ = toGHSpace Y := by
rw [eq_toGHSpace_iff]
exact ⟨fun x => F (Ψ' x), (kuratowskiEmbedding.isometry _).comp IΨ', range_comp _ _⟩
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
⊢ Quotient.mk IsometryRel.setoid B = toGHSpace Y
[PROOFSTEP]
rw [eq_toGHSpace_iff]
[GOAL]
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
⊢ ∃ Ψ, Isometry Ψ ∧ range Ψ = ↑B
[PROOFSTEP]
exact ⟨fun x => F (Ψ' x), (kuratowskiEmbedding.isometry _).comp IΨ', range_comp _ _⟩
[GOAL]
case intro
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
⊢ ghDist X Y ≤ hausdorffDist (F '' range Φ') (F '' range Ψ')
[PROOFSTEP]
refine' csInf_le ⟨0, _⟩ _
[GOAL]
case intro.refine'_1
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
⊢ 0 ∈
lowerBounds
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = toGHSpace X} ×ˢ {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y})
[PROOFSTEP]
simp only [lowerBounds, mem_image, mem_prod, mem_setOf_eq, Prod.exists, and_imp, forall_exists_index]
[GOAL]
case intro.refine'_1
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
⊢ ∀ ⦃a : ℝ⦄ (x x_1 : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid x = toGHSpace X →
Quotient.mk IsometryRel.setoid x_1 = toGHSpace Y → hausdorffDist ↑x ↑x_1 = a → 0 ≤ a
[PROOFSTEP]
intro t _ _ _ _ ht
[GOAL]
case intro.refine'_1
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
t : ℝ
x✝¹ x✝ : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
a✝¹ : Quotient.mk IsometryRel.setoid x✝¹ = toGHSpace X
a✝ : Quotient.mk IsometryRel.setoid x✝ = toGHSpace Y
ht : hausdorffDist ↑x✝¹ ↑x✝ = t
⊢ 0 ≤ t
[PROOFSTEP]
rw [← ht]
[GOAL]
case intro.refine'_1
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
t : ℝ
x✝¹ x✝ : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
a✝¹ : Quotient.mk IsometryRel.setoid x✝¹ = toGHSpace X
a✝ : Quotient.mk IsometryRel.setoid x✝ = toGHSpace Y
ht : hausdorffDist ↑x✝¹ ↑x✝ = t
⊢ 0 ≤ hausdorffDist ↑x✝¹ ↑x✝
[PROOFSTEP]
exact hausdorffDist_nonneg
[GOAL]
case intro.refine'_2
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
⊢ hausdorffDist (F '' range Φ') (F '' range Ψ') ∈
(fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = toGHSpace X} ×ˢ {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y}
[PROOFSTEP]
apply (mem_image _ _ _).2
[GOAL]
case intro.refine'_2
X : Type u
inst✝⁶ : MetricSpace X
inst✝⁵ : CompactSpace X
inst✝⁴ : Nonempty X
Y : Type v
inst✝³ : MetricSpace Y
inst✝² : CompactSpace Y
inst✝¹ : Nonempty Y
γ : Type w
inst✝ : MetricSpace γ
Φ : X → γ
Ψ : Y → γ
ha : Isometry Φ
hb : Isometry Ψ
xX : X
h✝ : xX ∈ univ
s : Set γ := range Φ ∪ range Ψ
Φ' : X → Subtype s := fun y => { val := Φ y, property := (_ : Φ y ∈ range Φ ∪ range Ψ) }
Ψ' : Y → Subtype s := fun y => { val := Ψ y, property := (_ : Ψ y ∈ range Φ ∪ range Ψ) }
IΦ' : Isometry Φ'
IΨ' : Isometry Ψ'
this✝⁴ : IsCompact s
this✝³ : MetricSpace (Subtype s) := inferInstance
this✝² : CompactSpace (Subtype s)
this✝¹ : Nonempty (Subtype s)
ΦΦ' : Φ = Subtype.val ∘ Φ'
ΨΨ' : Ψ = Subtype.val ∘ Ψ'
this✝ : hausdorffDist (range Φ) (range Ψ) = hausdorffDist (range Φ') (range Ψ')
F : Subtype s → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (Subtype s)
this : hausdorffDist (F '' range Φ') (F '' range Ψ') = hausdorffDist (range Φ') (range Ψ')
A : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Φ', isCompact' := (_ : IsCompact (F '' range Φ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Φ')) }
B : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ } :=
{ toCompacts := { carrier := F '' range Ψ', isCompact' := (_ : IsCompact (F '' range Ψ')) },
nonempty' := (_ : Set.Nonempty ((fun a => F a) '' range Ψ')) }
AX : Quotient.mk IsometryRel.setoid A = toGHSpace X
BY : Quotient.mk IsometryRel.setoid B = toGHSpace Y
⊢ ∃ x,
x ∈ {a | Quotient.mk IsometryRel.setoid a = toGHSpace X} ×ˢ {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y} ∧
hausdorffDist ↑x.fst ↑x.snd = hausdorffDist (F '' range Φ') (F '' range Ψ')
[PROOFSTEP]
exists (⟨A, B⟩ : NonemptyCompacts ℓ_infty_ℝ × NonemptyCompacts ℓ_infty_ℝ)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) = ghDist X Y
[PROOFSTEP]
inhabit X
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h : Inhabited X
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) = ghDist X Y
[PROOFSTEP]
inhabit Y
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) = ghDist X Y
[PROOFSTEP]
have A :
∀ p q : NonemptyCompacts ℓ_infty_ℝ,
⟦p⟧ = toGHSpace X →
⟦q⟧ = toGHSpace Y →
hausdorffDist (p : Set ℓ_infty_ℝ) q < diam (univ : Set X) + 1 + diam (univ : Set Y) →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist (p : Set ℓ_infty_ℝ) q :=
by
intro p q hp hq bound
rcases eq_toGHSpace_iff.1 hp with ⟨Φ, ⟨Φisom, Φrange⟩⟩
rcases eq_toGHSpace_iff.1 hq with ⟨Ψ, ⟨Ψisom, Ψrange⟩⟩
have I : diam (range Φ ∪ range Ψ) ≤ 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) :=
by
rcases exists_mem_of_nonempty X with ⟨xX, _⟩
have : ∃ y ∈ range Ψ, dist (Φ xX) y < diam (univ : Set X) + 1 + diam (univ : Set Y) :=
by
rw [Ψrange]
have : Φ xX ∈ ↑p := Φrange.subst (mem_range_self _)
exact
exists_dist_lt_of_hausdorffDist_lt this bound
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded)
rcases this with ⟨y, hy, dy⟩
rcases mem_range.1 hy with ⟨z, hzy⟩
rw [← hzy] at dy
have DΦ : diam (range Φ) = diam (univ : Set X) := Φisom.diam_range
have DΨ : diam (range Ψ) = diam (univ : Set Y) := Ψisom.diam_range
calc
diam (range Φ ∪ range Ψ) ≤ diam (range Φ) + dist (Φ xX) (Ψ z) + diam (range Ψ) :=
diam_union (mem_range_self _) (mem_range_self _)
_ ≤ diam (univ : Set X) + (diam (univ : Set X) + 1 + diam (univ : Set Y)) + diam (univ : Set Y) :=
by
rw [DΦ, DΨ]
gcongr
-- apply add_le_add (add_le_add le_rfl (le_of_lt dy)) le_rfl
_ = 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) := by ring
let f : Sum X Y → ℓ_infty_ℝ := fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
let F : Sum X Y × Sum X Y → ℝ := fun p =>
dist (f p.1)
(f p.2)
-- check that the induced "distance" is a candidate
have Fgood : F ∈ candidates X Y :=
by
simp only [candidates, forall_const, and_true_iff, add_comm, eq_self_iff_true, dist_eq_zero, and_self_iff,
Set.mem_setOf_eq]
repeat' constructor
·
exact fun x y =>
calc
F (inl x, inl y) = dist (Φ x) (Φ y) := rfl
_ = dist x y := Φisom.dist_eq x y
·
exact fun x y =>
calc
F (inr x, inr y) = dist (Ψ x) (Ψ y) := rfl
_ = dist x y := Ψisom.dist_eq x y
· exact fun x y => dist_comm _ _
· exact fun x y z => dist_triangle _ _ _
·
exact fun x y =>
calc
F (x, y) ≤ diam (range Φ ∪ range Ψ) :=
by
have A : ∀ z : Sum X Y, f z ∈ range Φ ∪ range Ψ :=
by
intro z
cases z
· apply mem_union_left; apply mem_range_self
· apply mem_union_right; apply mem_range_self
refine' dist_le_diam_of_mem _ (A _) (A _)
rw [Φrange, Ψrange]
exact (p ⊔ q).isCompact.bounded
_ ≤ 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) := I
let Fb := candidatesBOfCandidates F Fgood
have : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb :=
hausdorffDist_optimal_le_HD _ _ (candidatesBOfCandidates_mem F Fgood)
refine' le_trans this (le_of_forall_le_of_dense fun r hr => _)
have I1 : ∀ x : X, (⨅ y, Fb (inl x, inr y)) ≤ r := by
intro x
have : f (inl x) ∈ ↑p := Φrange.subst (mem_range_self _)
rcases exists_dist_lt_of_hausdorffDist_lt this hr
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded
q.isCompact.bounded) with
⟨z, zq, hz⟩
have : z ∈ range Ψ := by rwa [← Ψrange] at zq
rcases mem_range.1 this with ⟨y, hy⟩
calc
(⨅ y, Fb (inl x, inr y)) ≤ Fb (inl x, inr y) := ciInf_le (by simpa only [add_zero] using HD_below_aux1 0) y
_ = dist (Φ x) (Ψ y) := rfl
_ = dist (f (inl x)) z := by rw [hy]
_ ≤ r := le_of_lt hz
have I2 : ∀ y : Y, (⨅ x, Fb (inl x, inr y)) ≤ r := by
intro y
have : f (inr y) ∈ ↑q := Ψrange.subst (mem_range_self _)
rcases exists_dist_lt_of_hausdorffDist_lt' this hr
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded
q.isCompact.bounded) with
⟨z, zq, hz⟩
have : z ∈ range Φ := by rwa [← Φrange] at zq
rcases mem_range.1 this with ⟨x, hx⟩
calc
(⨅ x, Fb (inl x, inr y)) ≤ Fb (inl x, inr y) := ciInf_le (by simpa only [add_zero] using HD_below_aux2 0) x
_ = dist (Φ x) (Ψ y) := rfl
_ = dist z (f (inr y)) := by rw [hx]
_ ≤ r := le_of_lt hz
simp only [HD, ciSup_le I1, ciSup_le I2, max_le_iff, and_self_iff]
/- Get the same inequality for any coupling. If the coupling is quite good, the desired
inequality has been proved above. If it is bad, then the inequality is obvious. -/
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
⊢ ∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
intro p q hp hq bound
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
rcases eq_toGHSpace_iff.1 hp with ⟨Φ, ⟨Φisom, Φrange⟩⟩
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
rcases eq_toGHSpace_iff.1 hq with ⟨Ψ, ⟨Ψisom, Ψrange⟩⟩
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
have I : diam (range Φ ∪ range Ψ) ≤ 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) :=
by
rcases exists_mem_of_nonempty X with ⟨xX, _⟩
have : ∃ y ∈ range Ψ, dist (Φ xX) y < diam (univ : Set X) + 1 + diam (univ : Set Y) :=
by
rw [Ψrange]
have : Φ xX ∈ ↑p := Φrange.subst (mem_range_self _)
exact
exists_dist_lt_of_hausdorffDist_lt this bound
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded)
rcases this with ⟨y, hy, dy⟩
rcases mem_range.1 hy with ⟨z, hzy⟩
rw [← hzy] at dy
have DΦ : diam (range Φ) = diam (univ : Set X) := Φisom.diam_range
have DΨ : diam (range Ψ) = diam (univ : Set Y) := Ψisom.diam_range
calc
diam (range Φ ∪ range Ψ) ≤ diam (range Φ) + dist (Φ xX) (Ψ z) + diam (range Ψ) :=
diam_union (mem_range_self _) (mem_range_self _)
_ ≤ diam (univ : Set X) + (diam (univ : Set X) + 1 + diam (univ : Set Y)) + diam (univ : Set Y) :=
by
rw [DΦ, DΨ]
gcongr
-- apply add_le_add (add_le_add le_rfl (le_of_lt dy)) le_rfl
_ = 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) := by ring
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
rcases exists_mem_of_nonempty X with ⟨xX, _⟩
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
have : ∃ y ∈ range Ψ, dist (Φ xX) y < diam (univ : Set X) + 1 + diam (univ : Set Y) :=
by
rw [Ψrange]
have : Φ xX ∈ ↑p := Φrange.subst (mem_range_self _)
exact
exists_dist_lt_of_hausdorffDist_lt this bound
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
⊢ ∃ y, y ∈ range Ψ ∧ dist (Φ xX) y < diam univ + 1 + diam univ
[PROOFSTEP]
rw [Ψrange]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
⊢ ∃ y, y ∈ ↑q ∧ dist (Φ xX) y < diam univ + 1 + diam univ
[PROOFSTEP]
have : Φ xX ∈ ↑p := Φrange.subst (mem_range_self _)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
this : Φ xX ∈ ↑p
⊢ ∃ y, y ∈ ↑q ∧ dist (Φ xX) y < diam univ + 1 + diam univ
[PROOFSTEP]
exact
exists_dist_lt_of_hausdorffDist_lt this bound
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded)
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
this : ∃ y, y ∈ range Ψ ∧ dist (Φ xX) y < diam univ + 1 + diam univ
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
rcases this with ⟨y, hy, dy⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
dy : dist (Φ xX) y < diam univ + 1 + diam univ
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
rcases mem_range.1 hy with ⟨z, hzy⟩
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
dy : dist (Φ xX) y < diam univ + 1 + diam univ
z : Y
hzy : Ψ z = y
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
rw [← hzy] at dy
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
z : Y
dy : dist (Φ xX) (Ψ z) < diam univ + 1 + diam univ
hzy : Ψ z = y
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
have DΦ : diam (range Φ) = diam (univ : Set X) := Φisom.diam_range
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
z : Y
dy : dist (Φ xX) (Ψ z) < diam univ + 1 + diam univ
hzy : Ψ z = y
DΦ : diam (range Φ) = diam univ
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
have DΨ : diam (range Ψ) = diam (univ : Set Y) := Ψisom.diam_range
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
z : Y
dy : dist (Φ xX) (Ψ z) < diam univ + 1 + diam univ
hzy : Ψ z = y
DΦ : diam (range Φ) = diam univ
DΨ : diam (range Ψ) = diam univ
⊢ diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
calc
diam (range Φ ∪ range Ψ) ≤ diam (range Φ) + dist (Φ xX) (Ψ z) + diam (range Ψ) :=
diam_union (mem_range_self _) (mem_range_self _)
_ ≤ diam (univ : Set X) + (diam (univ : Set X) + 1 + diam (univ : Set Y)) + diam (univ : Set Y) :=
by
rw [DΦ, DΨ]
gcongr
-- apply add_le_add (add_le_add le_rfl (le_of_lt dy)) le_rfl
_ = 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) := by ring
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
z : Y
dy : dist (Φ xX) (Ψ z) < diam univ + 1 + diam univ
hzy : Ψ z = y
DΦ : diam (range Φ) = diam univ
DΨ : diam (range Ψ) = diam univ
⊢ diam (range Φ) + dist (Φ xX) (Ψ z) + diam (range Ψ) ≤ diam univ + (diam univ + 1 + diam univ) + diam univ
[PROOFSTEP]
rw [DΦ, DΨ]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
z : Y
dy : dist (Φ xX) (Ψ z) < diam univ + 1 + diam univ
hzy : Ψ z = y
DΦ : diam (range Φ) = diam univ
DΨ : diam (range Ψ) = diam univ
⊢ diam univ + dist (Φ xX) (Ψ z) + diam univ ≤ diam univ + (diam univ + 1 + diam univ) + diam univ
[PROOFSTEP]
gcongr
-- apply add_le_add (add_le_add le_rfl (le_of_lt dy)) le_rfl
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
xX : X
h✝ : xX ∈ univ
y : { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : y ∈ range Ψ
z : Y
dy : dist (Φ xX) (Ψ z) < diam univ + 1 + diam univ
hzy : Ψ z = y
DΦ : diam (range Φ) = diam univ
DΨ : diam (range Ψ) = diam univ
⊢ diam univ + (diam univ + 1 + diam univ) + diam univ = 2 * diam univ + 1 + 2 * diam univ
[PROOFSTEP]
ring
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
let f : Sum X Y → ℓ_infty_ℝ := fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
let F : Sum X Y × Sum X Y → ℝ := fun p =>
dist (f p.1)
(f p.2)
-- check that the induced "distance" is a candidate
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
have Fgood : F ∈ candidates X Y :=
by
simp only [candidates, forall_const, and_true_iff, add_comm, eq_self_iff_true, dist_eq_zero, and_self_iff,
Set.mem_setOf_eq]
repeat' constructor
·
exact fun x y =>
calc
F (inl x, inl y) = dist (Φ x) (Φ y) := rfl
_ = dist x y := Φisom.dist_eq x y
·
exact fun x y =>
calc
F (inr x, inr y) = dist (Ψ x) (Ψ y) := rfl
_ = dist x y := Ψisom.dist_eq x y
· exact fun x y => dist_comm _ _
· exact fun x y z => dist_triangle _ _ _
·
exact fun x y =>
calc
F (x, y) ≤ diam (range Φ ∪ range Ψ) :=
by
have A : ∀ z : Sum X Y, f z ∈ range Φ ∪ range Ψ := by
intro z
cases z
· apply mem_union_left; apply mem_range_self
· apply mem_union_right; apply mem_range_self
refine' dist_le_diam_of_mem _ (A _) (A _)
rw [Φrange, Ψrange]
exact (p ⊔ q).isCompact.bounded
_ ≤ 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) := I
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ F ∈ candidates X Y
[PROOFSTEP]
simp only [candidates, forall_const, and_true_iff, add_comm, eq_self_iff_true, dist_eq_zero, and_self_iff,
Set.mem_setOf_eq]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ((((∀ (x y : X), dist (Φ x) (Φ y) = dist x y) ∧ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y) ∧
∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) =
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match x with
| inl y => Φ y
| inr z => Ψ z)) ∧
∀ (x y z : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z) ≤
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) +
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z)) ∧
∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) ≤
↑(GromovHausdorff.maxVar X Y)
[PROOFSTEP]
repeat' constructor
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ((((∀ (x y : X), dist (Φ x) (Φ y) = dist x y) ∧ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y) ∧
∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) =
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match x with
| inl y => Φ y
| inr z => Ψ z)) ∧
∀ (x y z : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z) ≤
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) +
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z)) ∧
∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) ≤
↑(GromovHausdorff.maxVar X Y)
[PROOFSTEP]
constructor
[GOAL]
case left
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ (((∀ (x y : X), dist (Φ x) (Φ y) = dist x y) ∧ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y) ∧
∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) =
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match x with
| inl y => Φ y
| inr z => Ψ z)) ∧
∀ (x y z : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z) ≤
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) +
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z)
[PROOFSTEP]
constructor
[GOAL]
case left.left
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ((∀ (x y : X), dist (Φ x) (Φ y) = dist x y) ∧ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y) ∧
∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) =
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match x with
| inl y => Φ y
| inr z => Ψ z)
[PROOFSTEP]
constructor
[GOAL]
case left.left.left
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ (∀ (x y : X), dist (Φ x) (Φ y) = dist x y) ∧ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y
[PROOFSTEP]
constructor
[GOAL]
case left.left.left.left
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : X), dist (Φ x) (Φ y) = dist x y
[PROOFSTEP]
constructor
[GOAL]
case left.left.left.right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y
[PROOFSTEP]
constructor
[GOAL]
case left.left.right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) =
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match x with
| inl y => Φ y
| inr z => Ψ z)
[PROOFSTEP]
constructor
[GOAL]
case left.right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y z : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z) ≤
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) +
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z)
[PROOFSTEP]
constructor
[GOAL]
case right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) ≤
↑(GromovHausdorff.maxVar X Y)
[PROOFSTEP]
constructor
[GOAL]
case left.left.left.left
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : X), dist (Φ x) (Φ y) = dist x y
[PROOFSTEP]
exact fun x y =>
calc
F (inl x, inl y) = dist (Φ x) (Φ y) := rfl
_ = dist x y := Φisom.dist_eq x y
[GOAL]
case left.left.left.right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : Y), dist (Ψ x) (Ψ y) = dist x y
[PROOFSTEP]
exact fun x y =>
calc
F (inr x, inr y) = dist (Ψ x) (Ψ y) := rfl
_ = dist x y := Ψisom.dist_eq x y
[GOAL]
case left.left.right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) =
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match x with
| inl y => Φ y
| inr z => Ψ z)
[PROOFSTEP]
exact fun x y => dist_comm _ _
[GOAL]
case left.right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y z : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z) ≤
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) +
dist
(match y with
| inl y => Φ y
| inr z => Ψ z)
(match z with
| inl y => Φ y
| inr z => Ψ z)
[PROOFSTEP]
exact fun x y z => dist_triangle _ _ _
[GOAL]
case right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
⊢ ∀ (x y : X ⊕ Y),
dist
(match x with
| inl y => Φ y
| inr z => Ψ z)
(match y with
| inl y => Φ y
| inr z => Ψ z) ≤
↑(GromovHausdorff.maxVar X Y)
[PROOFSTEP]
exact fun x y =>
calc
F (x, y) ≤ diam (range Φ ∪ range Ψ) :=
by
have A : ∀ z : Sum X Y, f z ∈ range Φ ∪ range Ψ := by
intro z
cases z
· apply mem_union_left; apply mem_range_self
· apply mem_union_right; apply mem_range_self
refine' dist_le_diam_of_mem _ (A _) (A _)
rw [Φrange, Ψrange]
exact (p ⊔ q).isCompact.bounded
_ ≤ 2 * diam (univ : Set X) + 1 + 2 * diam (univ : Set Y) := I
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
⊢ F (x, y) ≤ diam (range Φ ∪ range Ψ)
[PROOFSTEP]
have A : ∀ z : Sum X Y, f z ∈ range Φ ∪ range Ψ := by
intro z
cases z
· apply mem_union_left; apply mem_range_self
· apply mem_union_right; apply mem_range_self
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
⊢ ∀ (z : X ⊕ Y), f z ∈ range Φ ∪ range Ψ
[PROOFSTEP]
intro z
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y z : X ⊕ Y
⊢ f z ∈ range Φ ∪ range Ψ
[PROOFSTEP]
cases z
[GOAL]
case inl
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
val✝ : X
⊢ f (inl val✝) ∈ range Φ ∪ range Ψ
[PROOFSTEP]
apply mem_union_left
[GOAL]
case inl.a
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
val✝ : X
⊢ f (inl val✝) ∈ range Φ
[PROOFSTEP]
apply mem_range_self
[GOAL]
case inr
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
val✝ : Y
⊢ f (inr val✝) ∈ range Φ ∪ range Ψ
[PROOFSTEP]
apply mem_union_right
[GOAL]
case inr.a
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
val✝ : Y
⊢ f (inr val✝) ∈ range Ψ
[PROOFSTEP]
apply mem_range_self
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
A : ∀ (z : X ⊕ Y), f z ∈ range Φ ∪ range Ψ
⊢ F (x, y) ≤ diam (range Φ ∪ range Ψ)
[PROOFSTEP]
refine' dist_le_diam_of_mem _ (A _) (A _)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
A : ∀ (z : X ⊕ Y), f z ∈ range Φ ∪ range Ψ
⊢ Metric.Bounded (range Φ ∪ range Ψ)
[PROOFSTEP]
rw [Φrange, Ψrange]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
x y : X ⊕ Y
A : ∀ (z : X ⊕ Y), f z ∈ range Φ ∪ range Ψ
⊢ Metric.Bounded (↑p ∪ ↑q)
[PROOFSTEP]
exact (p ⊔ q).isCompact.bounded
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
let Fb := candidatesBOfCandidates F Fgood
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
have : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb :=
hausdorffDist_optimal_le_HD _ _ (candidatesBOfCandidates_mem F Fgood)
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
refine' le_trans this (le_of_forall_le_of_dense fun r hr => _)
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
⊢ HD Fb ≤ r
[PROOFSTEP]
have I1 : ∀ x : X, (⨅ y, Fb (inl x, inr y)) ≤ r := by
intro x
have : f (inl x) ∈ ↑p := Φrange.subst (mem_range_self _)
rcases exists_dist_lt_of_hausdorffDist_lt this hr
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded) with
⟨z, zq, hz⟩
have : z ∈ range Ψ := by rwa [← Ψrange] at zq
rcases mem_range.1 this with ⟨y, hy⟩
calc
(⨅ y, Fb (inl x, inr y)) ≤ Fb (inl x, inr y) := ciInf_le (by simpa only [add_zero] using HD_below_aux1 0) y
_ = dist (Φ x) (Ψ y) := rfl
_ = dist (f (inl x)) z := by rw [hy]
_ ≤ r := le_of_lt hz
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
⊢ ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
intro x
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
⊢ ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
have : f (inl x) ∈ ↑p := Φrange.subst (mem_range_self _)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this : f (inl x) ∈ ↑p
⊢ ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
rcases exists_dist_lt_of_hausdorffDist_lt this hr
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded) with
⟨z, zq, hz⟩
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this : f (inl x) ∈ ↑p
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑q
hz : dist (f (inl x)) z < r
⊢ ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
have : z ∈ range Ψ := by rwa [← Ψrange] at zq
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this : f (inl x) ∈ ↑p
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑q
hz : dist (f (inl x)) z < r
⊢ z ∈ range Ψ
[PROOFSTEP]
rwa [← Ψrange] at zq
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this✝ : f (inl x) ∈ ↑p
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑q
hz : dist (f (inl x)) z < r
this : z ∈ range Ψ
⊢ ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
rcases mem_range.1 this with ⟨y, hy⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this✝ : f (inl x) ∈ ↑p
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑q
hz : dist (f (inl x)) z < r
this : z ∈ range Ψ
y : Y
hy : Ψ y = z
⊢ ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
calc
(⨅ y, Fb (inl x, inr y)) ≤ Fb (inl x, inr y) := ciInf_le (by simpa only [add_zero] using HD_below_aux1 0) y
_ = dist (Φ x) (Ψ y) := rfl
_ = dist (f (inl x)) z := by rw [hy]
_ ≤ r := le_of_lt hz
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this✝ : f (inl x) ∈ ↑p
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑q
hz : dist (f (inl x)) z < r
this : z ∈ range Ψ
y : Y
hy : Ψ y = z
⊢ BddBelow (range fun y => ↑Fb (inl x, inr y))
[PROOFSTEP]
simpa only [add_zero] using HD_below_aux1 0
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
x : X
this✝ : f (inl x) ∈ ↑p
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑q
hz : dist (f (inl x)) z < r
this : z ∈ range Ψ
y : Y
hy : Ψ y = z
⊢ dist (Φ x) (Ψ y) = dist (f (inl x)) z
[PROOFSTEP]
rw [hy]
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
⊢ HD Fb ≤ r
[PROOFSTEP]
have I2 : ∀ y : Y, (⨅ x, Fb (inl x, inr y)) ≤ r := by
intro y
have : f (inr y) ∈ ↑q := Ψrange.subst (mem_range_self _)
rcases exists_dist_lt_of_hausdorffDist_lt' this hr
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded) with
⟨z, zq, hz⟩
have : z ∈ range Φ := by rwa [← Φrange] at zq
rcases mem_range.1 this with ⟨x, hx⟩
calc
(⨅ x, Fb (inl x, inr y)) ≤ Fb (inl x, inr y) := ciInf_le (by simpa only [add_zero] using HD_below_aux2 0) x
_ = dist (Φ x) (Ψ y) := rfl
_ = dist z (f (inr y)) := by rw [hx]
_ ≤ r := le_of_lt hz
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
⊢ ∀ (y : Y), ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
intro y
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
⊢ ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
have : f (inr y) ∈ ↑q := Ψrange.subst (mem_range_self _)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this : f (inr y) ∈ ↑q
⊢ ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
rcases exists_dist_lt_of_hausdorffDist_lt' this hr
(hausdorffEdist_ne_top_of_nonempty_of_bounded p.nonempty q.nonempty p.isCompact.bounded q.isCompact.bounded) with
⟨z, zq, hz⟩
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this : f (inr y) ∈ ↑q
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑p
hz : dist z (f (inr y)) < r
⊢ ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
have : z ∈ range Φ := by rwa [← Φrange] at zq
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this : f (inr y) ∈ ↑q
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑p
hz : dist z (f (inr y)) < r
⊢ z ∈ range Φ
[PROOFSTEP]
rwa [← Φrange] at zq
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this✝ : f (inr y) ∈ ↑q
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑p
hz : dist z (f (inr y)) < r
this : z ∈ range Φ
⊢ ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
rcases mem_range.1 this with ⟨x, hx⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this✝ : f (inr y) ∈ ↑q
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑p
hz : dist z (f (inr y)) < r
this : z ∈ range Φ
x : X
hx : Φ x = z
⊢ ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
[PROOFSTEP]
calc
(⨅ x, Fb (inl x, inr y)) ≤ Fb (inl x, inr y) := ciInf_le (by simpa only [add_zero] using HD_below_aux2 0) x
_ = dist (Φ x) (Ψ y) := rfl
_ = dist z (f (inr y)) := by rw [hx]
_ ≤ r := le_of_lt hz
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this✝ : f (inr y) ∈ ↑q
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑p
hz : dist z (f (inr y)) < r
this : z ∈ range Φ
x : X
hx : Φ x = z
⊢ BddBelow (range fun x => ↑Fb (inl x, inr y))
[PROOFSTEP]
simpa only [add_zero] using HD_below_aux2 0
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this✝¹ : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
y : Y
this✝ : f (inr y) ∈ ↑q
z : { x // x ∈ lp (fun n => ℝ) ⊤ }
zq : z ∈ ↑p
hz : dist z (f (inr y)) < r
this : z ∈ range Φ
x : X
hx : Φ x = z
⊢ dist (Φ x) (Ψ y) = dist z (f (inr y))
[PROOFSTEP]
rw [hx]
[GOAL]
case intro.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
bound : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
Φ : X → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Φrange : range Φ = ↑p
Ψ : Y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψisom : Isometry Ψ
Ψrange : range Ψ = ↑q
I : diam (range Φ ∪ range Ψ) ≤ 2 * diam univ + 1 + 2 * diam univ
f : X ⊕ Y → { x // x ∈ lp (fun n => ℝ) ⊤ } :=
fun x =>
match x with
| inl y => Φ y
| inr z => Ψ z
F : (X ⊕ Y) × (X ⊕ Y) → ℝ := fun p => dist (f p.fst) (f p.snd)
Fgood : F ∈ candidates X Y
Fb : GromovHausdorff.Cb X Y := candidatesBOfCandidates F Fgood
this : hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD Fb
r : ℝ
hr : hausdorffDist ↑p ↑q < r
I1 : ∀ (x : X), ⨅ (y : Y), ↑Fb (inl x, inr y) ≤ r
I2 : ∀ (y : Y), ⨅ (x : X), ↑Fb (inl x, inr y) ≤ r
⊢ HD Fb ≤ r
[PROOFSTEP]
simp only [HD, ciSup_le I1, ciSup_le I2, max_le_iff, and_self_iff]
/- Get the same inequality for any coupling. If the coupling is quite good, the desired
inequality has been proved above. If it is bad, then the inequality is obvious. -/
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) = ghDist X Y
[PROOFSTEP]
have B :
∀ p q : NonemptyCompacts ℓ_infty_ℝ,
⟦p⟧ = toGHSpace X →
⟦q⟧ = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist (p : Set ℓ_infty_ℝ) q :=
by
intro p q hp hq
by_cases h : hausdorffDist (p : Set ℓ_infty_ℝ) q < diam (univ : Set X) + 1 + diam (univ : Set Y)
· exact A p q hp hq h
·
calc
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD (candidatesBDist X Y) :=
hausdorffDist_optimal_le_HD _ _ candidatesBDist_mem_candidatesB
_ ≤ diam (univ : Set X) + 1 + diam (univ : Set Y) := HD_candidatesBDist_le
_ ≤ hausdorffDist (p : Set ℓ_infty_ℝ) q := not_lt.1 h
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ ∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
intro p q hp hq
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
by_cases h : hausdorffDist (p : Set ℓ_infty_ℝ) q < diam (univ : Set X) + 1 + diam (univ : Set Y)
[GOAL]
case pos
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
h : hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
exact A p q hp hq h
[GOAL]
case neg
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : Quotient.mk IsometryRel.setoid p = toGHSpace X
hq : Quotient.mk IsometryRel.setoid q = toGHSpace Y
h : ¬hausdorffDist ↑p ↑q < diam univ + 1 + diam univ
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
[PROOFSTEP]
calc
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ HD (candidatesBDist X Y) :=
hausdorffDist_optimal_le_HD _ _ candidatesBDist_mem_candidatesB
_ ≤ diam (univ : Set X) + 1 + diam (univ : Set Y) := HD_candidatesBDist_le
_ ≤ hausdorffDist (p : Set ℓ_infty_ℝ) q := not_lt.1 h
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) = ghDist X Y
[PROOFSTEP]
refine' le_antisymm _ _
[GOAL]
case refine'_1
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ ghDist X Y
[PROOFSTEP]
apply le_csInf
[GOAL]
case refine'_1.h₁
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ Set.Nonempty
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = toGHSpace X} ×ˢ {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y})
[PROOFSTEP]
refine' (Set.Nonempty.prod _ _).image _
[GOAL]
case refine'_1.h₁.refine'_1
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ Set.Nonempty {a | Quotient.mk IsometryRel.setoid a = toGHSpace X}
[PROOFSTEP]
exact ⟨_, rfl⟩
[GOAL]
case refine'_1.h₁.refine'_2
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ Set.Nonempty {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y}
[PROOFSTEP]
exact ⟨_, rfl⟩
[GOAL]
case refine'_1.h₂
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ ∀ (b : ℝ),
b ∈
(fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = toGHSpace X} ×ˢ {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y} →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ b
[PROOFSTEP]
rintro b ⟨⟨p, q⟩, ⟨hp, hq⟩, rfl⟩
[GOAL]
case refine'_1.h₂.intro.mk.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hp : (p, q).fst ∈ {a | Quotient.mk IsometryRel.setoid a = toGHSpace X}
hq : (p, q).snd ∈ {b | Quotient.mk IsometryRel.setoid b = toGHSpace Y}
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ (fun p => hausdorffDist ↑p.fst ↑p.snd) (p, q)
[PROOFSTEP]
exact B p q hp hq
[GOAL]
case refine'_2
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
inhabited_h✝ : Inhabited X
inhabited_h : Inhabited Y
A :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist ↑p ↑q < diam univ + 1 + diam univ →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
B :
∀ (p q : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }),
Quotient.mk IsometryRel.setoid p = toGHSpace X →
Quotient.mk IsometryRel.setoid q = toGHSpace Y →
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) ≤ hausdorffDist ↑p ↑q
⊢ ghDist X Y ≤ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y))
[PROOFSTEP]
exact ghDist_le_hausdorffDist (isometry_optimalGHInjl X Y) (isometry_optimalGHInjr X Y)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
⊢ ∃ Φ Ψ, Isometry Φ ∧ Isometry Ψ ∧ ghDist X Y = hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
let F := kuratowskiEmbedding (OptimalGHCoupling X Y)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
⊢ ∃ Φ Ψ, Isometry Φ ∧ Isometry Ψ ∧ ghDist X Y = hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
let Φ := F ∘ optimalGHInjl X Y
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
Φ : X → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjl X Y
⊢ ∃ Φ Ψ, Isometry Φ ∧ Isometry Ψ ∧ ghDist X Y = hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
let Ψ := F ∘ optimalGHInjr X Y
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
Φ : X → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjl X Y
Ψ : Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjr X Y
⊢ ∃ Φ Ψ, Isometry Φ ∧ Isometry Ψ ∧ ghDist X Y = hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
refine' ⟨Φ, Ψ, _, _, _⟩
[GOAL]
case refine'_1
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
Φ : X → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjl X Y
Ψ : Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjr X Y
⊢ Isometry Φ
[PROOFSTEP]
exact (kuratowskiEmbedding.isometry _).comp (isometry_optimalGHInjl X Y)
[GOAL]
case refine'_2
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
Φ : X → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjl X Y
Ψ : Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjr X Y
⊢ Isometry Ψ
[PROOFSTEP]
exact (kuratowskiEmbedding.isometry _).comp (isometry_optimalGHInjr X Y)
[GOAL]
case refine'_3
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
Φ : X → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjl X Y
Ψ : Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjr X Y
⊢ ghDist X Y = hausdorffDist (range Φ) (range Ψ)
[PROOFSTEP]
rw [← image_univ, ← image_univ, image_comp F, image_univ, image_comp F (optimalGHInjr X Y), image_univ, ←
hausdorffDist_optimal]
[GOAL]
case refine'_3
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
F : OptimalGHCoupling X Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := kuratowskiEmbedding (OptimalGHCoupling X Y)
Φ : X → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjl X Y
Ψ : Y → { x // x ∈ lp (fun i => ℝ) ⊤ } := F ∘ optimalGHInjr X Y
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) =
hausdorffDist (F '' range (optimalGHInjl X Y)) (F '' range (optimalGHInjr X Y))
[PROOFSTEP]
exact (hausdorffDist_image (kuratowskiEmbedding.isometry _)).symm
[GOAL]
x : GHSpace
⊢ dist x x = 0
[PROOFSTEP]
rcases exists_rep x with ⟨y, hy⟩
[GOAL]
case intro
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ dist x x = 0
[PROOFSTEP]
refine' le_antisymm _ _
[GOAL]
case intro.refine'_1
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ dist x x ≤ 0
[PROOFSTEP]
apply csInf_le
[GOAL]
case intro.refine'_1.h₁
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ BddBelow
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = x})
[PROOFSTEP]
exact ⟨0, by rintro b ⟨⟨u, v⟩, -, rfl⟩; exact hausdorffDist_nonneg⟩
[GOAL]
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ 0 ∈
lowerBounds
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = x})
[PROOFSTEP]
rintro b ⟨⟨u, v⟩, -, rfl⟩
[GOAL]
case intro.mk.intro
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
u v : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
⊢ 0 ≤ (fun p => hausdorffDist ↑p.fst ↑p.snd) (u, v)
[PROOFSTEP]
exact hausdorffDist_nonneg
[GOAL]
case intro.refine'_1.h₂
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ 0 ∈
(fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = x}
[PROOFSTEP]
simp only [mem_image, mem_prod, mem_setOf_eq, Prod.exists]
[GOAL]
case intro.refine'_1.h₂
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ ∃ a b, (Quotient.mk IsometryRel.setoid a = x ∧ Quotient.mk IsometryRel.setoid b = x) ∧ hausdorffDist ↑a ↑b = 0
[PROOFSTEP]
exists y, y
[GOAL]
case intro.refine'_1.h₂
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ (Quotient.mk IsometryRel.setoid y = x ∧ Quotient.mk IsometryRel.setoid y = x) ∧ hausdorffDist ↑y ↑y = 0
[PROOFSTEP]
simpa only [and_self_iff, hausdorffDist_self_zero, eq_self_iff_true, and_true_iff]
[GOAL]
case intro.refine'_2
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ 0 ≤ dist x x
[PROOFSTEP]
apply le_csInf
[GOAL]
case intro.refine'_2.h₁
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ Set.Nonempty
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = x})
[PROOFSTEP]
exact Set.Nonempty.image _ <| Set.Nonempty.prod ⟨y, hy⟩ ⟨y, hy⟩
[GOAL]
case intro.refine'_2.h₂
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
⊢ ∀ (b : ℝ),
b ∈
(fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = x} →
0 ≤ b
[PROOFSTEP]
rintro b ⟨⟨u, v⟩, -, rfl⟩
[GOAL]
case intro.refine'_2.h₂.intro.mk.intro
x : GHSpace
y : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
hy : Quotient.mk IsometryRel.setoid y = x
u v : NonemptyCompacts { x // x ∈ lp (fun n => ℝ) ⊤ }
⊢ 0 ≤ (fun p => hausdorffDist ↑p.fst ↑p.snd) (u, v)
[PROOFSTEP]
exact hausdorffDist_nonneg
[GOAL]
x y : GHSpace
⊢ dist x y = dist y x
[PROOFSTEP]
have A :
(fun p : NonemptyCompacts ℓ_infty_ℝ × NonemptyCompacts ℓ_infty_ℝ => hausdorffDist (p.1 : Set ℓ_infty_ℝ) p.2) ''
{a | ⟦a⟧ = x} ×ˢ {b | ⟦b⟧ = y} =
(fun p : NonemptyCompacts ℓ_infty_ℝ × NonemptyCompacts ℓ_infty_ℝ => hausdorffDist (p.1 : Set ℓ_infty_ℝ) p.2) ∘
Prod.swap ''
{a | ⟦a⟧ = x} ×ˢ {b | ⟦b⟧ = y} :=
by
congr
funext
simp only [comp_apply, Prod.fst_swap, Prod.snd_swap]
congr
simp (config := { singlePass := true }) only [hausdorffDist_comm]
[GOAL]
x y : GHSpace
⊢ (fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y} =
(fun p => hausdorffDist ↑p.fst ↑p.snd) ∘ Prod.swap ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y}
[PROOFSTEP]
congr
[GOAL]
x y : GHSpace
⊢ (fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y} =
(fun p => hausdorffDist ↑p.fst ↑p.snd) ∘ Prod.swap ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y}
[PROOFSTEP]
funext
[GOAL]
case h
x y : GHSpace
x✝ : ℝ
⊢ ((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y})
x✝ =
((fun p => hausdorffDist ↑p.fst ↑p.snd) ∘ Prod.swap ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y})
x✝
[PROOFSTEP]
simp only [comp_apply, Prod.fst_swap, Prod.snd_swap]
[GOAL]
case h
x y : GHSpace
x✝ : ℝ
⊢ ((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y})
x✝ =
((fun a => hausdorffDist ↑a.snd ↑a.fst) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y})
x✝
[PROOFSTEP]
congr
[GOAL]
case h.e_f
x y : GHSpace
x✝ : ℝ
⊢ (fun p => hausdorffDist ↑p.fst ↑p.snd) = fun a => hausdorffDist ↑a.snd ↑a.fst
[PROOFSTEP]
simp (config := { singlePass := true }) only [hausdorffDist_comm]
[GOAL]
x y : GHSpace
A :
(fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y} =
(fun p => hausdorffDist ↑p.fst ↑p.snd) ∘ Prod.swap ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y}
⊢ dist x y = dist y x
[PROOFSTEP]
simp only [dist, A, image_comp, image_swap_prod]
[GOAL]
x y z : GHSpace
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let X := x.Rep
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let Y := y.Rep
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let Z := z.Rep
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let γ1 := OptimalGHCoupling X Y
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let γ2 := OptimalGHCoupling Y Z
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let Φ : Y → γ1 := optimalGHInjr X Y
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
have hΦ : Isometry Φ := isometry_optimalGHInjr X Y
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
let Ψ : Y → γ2 := optimalGHInjl Y Z
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
have hΨ : Isometry Ψ := isometry_optimalGHInjl Y Z
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
have Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z := toGlue_commute hΦ hΨ
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ dist x z ≤ dist x y + dist y z
[PROOFSTEP]
calc
dist x z = dist (toGHSpace X) (toGHSpace Z) := by rw [x.toGHSpace_rep, z.toGHSpace_rep]
_ ≤ hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjl X Y)) (range (toGlueR hΦ hΨ ∘ optimalGHInjr Y Z)) :=
(ghDist_le_hausdorffDist ((toGlueL_isometry hΦ hΨ).comp (isometry_optimalGHInjl X Y))
((toGlueR_isometry hΦ hΨ).comp (isometry_optimalGHInjr Y Z)))
_ ≤
hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjl X Y)) (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y)) +
hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y)) (range (toGlueR hΦ hΨ ∘ optimalGHInjr Y Z)) :=
by
refine'
hausdorffDist_triangle <| hausdorffEdist_ne_top_of_nonempty_of_bounded (range_nonempty _) (range_nonempty _) _ _
· exact (isCompact_range (Isometry.continuous ((toGlueL_isometry hΦ hΨ).comp (isometry_optimalGHInjl X Y)))).bounded
· exact (isCompact_range (Isometry.continuous ((toGlueL_isometry hΦ hΨ).comp (isometry_optimalGHInjr X Y)))).bounded
_ =
hausdorffDist (toGlueL hΦ hΨ '' range (optimalGHInjl X Y)) (toGlueL hΦ hΨ '' range (optimalGHInjr X Y)) +
hausdorffDist (toGlueR hΦ hΨ '' range (optimalGHInjl Y Z)) (toGlueR hΦ hΨ '' range (optimalGHInjr Y Z)) :=
by simp only [← range_comp, Comm, eq_self_iff_true, add_right_inj]
_ =
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) +
hausdorffDist (range (optimalGHInjl Y Z)) (range (optimalGHInjr Y Z)) :=
by rw [hausdorffDist_image (toGlueL_isometry hΦ hΨ), hausdorffDist_image (toGlueR_isometry hΦ hΨ)]
_ = dist (toGHSpace X) (toGHSpace Y) + dist (toGHSpace Y) (toGHSpace Z) := by
rw [hausdorffDist_optimal, hausdorffDist_optimal, ghDist, ghDist]
_ = dist x y + dist y z := by rw [x.toGHSpace_rep, y.toGHSpace_rep, z.toGHSpace_rep]
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ dist x z = dist (toGHSpace X) (toGHSpace Z)
[PROOFSTEP]
rw [x.toGHSpace_rep, z.toGHSpace_rep]
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjl X Y)) (range (toGlueR hΦ hΨ ∘ optimalGHInjr Y Z)) ≤
hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjl X Y)) (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y)) +
hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y)) (range (toGlueR hΦ hΨ ∘ optimalGHInjr Y Z))
[PROOFSTEP]
refine' hausdorffDist_triangle <| hausdorffEdist_ne_top_of_nonempty_of_bounded (range_nonempty _) (range_nonempty _) _ _
[GOAL]
case refine'_1
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ Metric.Bounded (range (toGlueL hΦ hΨ ∘ optimalGHInjl X Y))
[PROOFSTEP]
exact (isCompact_range (Isometry.continuous ((toGlueL_isometry hΦ hΨ).comp (isometry_optimalGHInjl X Y)))).bounded
[GOAL]
case refine'_2
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ Metric.Bounded (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y))
[PROOFSTEP]
exact (isCompact_range (Isometry.continuous ((toGlueL_isometry hΦ hΨ).comp (isometry_optimalGHInjr X Y)))).bounded
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjl X Y)) (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y)) +
hausdorffDist (range (toGlueL hΦ hΨ ∘ optimalGHInjr X Y)) (range (toGlueR hΦ hΨ ∘ optimalGHInjr Y Z)) =
hausdorffDist (toGlueL hΦ hΨ '' range (optimalGHInjl X Y)) (toGlueL hΦ hΨ '' range (optimalGHInjr X Y)) +
hausdorffDist (toGlueR hΦ hΨ '' range (optimalGHInjl Y Z)) (toGlueR hΦ hΨ '' range (optimalGHInjr Y Z))
[PROOFSTEP]
simp only [← range_comp, Comm, eq_self_iff_true, add_right_inj]
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ hausdorffDist (toGlueL hΦ hΨ '' range (optimalGHInjl X Y)) (toGlueL hΦ hΨ '' range (optimalGHInjr X Y)) +
hausdorffDist (toGlueR hΦ hΨ '' range (optimalGHInjl Y Z)) (toGlueR hΦ hΨ '' range (optimalGHInjr Y Z)) =
hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) +
hausdorffDist (range (optimalGHInjl Y Z)) (range (optimalGHInjr Y Z))
[PROOFSTEP]
rw [hausdorffDist_image (toGlueL_isometry hΦ hΨ), hausdorffDist_image (toGlueR_isometry hΦ hΨ)]
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ hausdorffDist (range (optimalGHInjl X Y)) (range (optimalGHInjr X Y)) +
hausdorffDist (range (optimalGHInjl Y Z)) (range (optimalGHInjr Y Z)) =
dist (toGHSpace X) (toGHSpace Y) + dist (toGHSpace Y) (toGHSpace Z)
[PROOFSTEP]
rw [hausdorffDist_optimal, hausdorffDist_optimal, ghDist, ghDist]
[GOAL]
x y z : GHSpace
X : Type := GHSpace.Rep x
Y : Type := GHSpace.Rep y
Z : Type := GHSpace.Rep z
γ1 : Type := OptimalGHCoupling X Y
γ2 : Type := OptimalGHCoupling Y Z
Φ : Y → γ1 := optimalGHInjr X Y
hΦ : Isometry Φ
Ψ : Y → γ2 := optimalGHInjl Y Z
hΨ : Isometry Ψ
Comm : toGlueL hΦ hΨ ∘ optimalGHInjr X Y = toGlueR hΦ hΨ ∘ optimalGHInjl Y Z
⊢ dist (toGHSpace X) (toGHSpace Y) + dist (toGHSpace Y) (toGHSpace Z) = dist x y + dist y z
[PROOFSTEP]
rw [x.toGHSpace_rep, y.toGHSpace_rep, z.toGHSpace_rep]
[GOAL]
x✝¹ x✝ : GHSpace
⊢ (fun x y =>
↑{
val :=
sInf
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y}),
property :=
(_ :
0 ≤
sInf
((fun p => hausdorffDist ↑p.fst ↑p.snd) ''
{a | Quotient.mk IsometryRel.setoid a = x} ×ˢ {b | Quotient.mk IsometryRel.setoid b = y})) })
x✝¹ x✝ =
ENNReal.ofReal (dist x✝¹ x✝)
[PROOFSTEP]
exact ENNReal.coe_nnreal_eq _
[GOAL]
x y : GHSpace
hxy : dist x y = 0
⊢ x = y
[PROOFSTEP]
rcases ghDist_eq_hausdorffDist x.Rep y.Rep with ⟨Φ, Ψ, Φisom, Ψisom, DΦΨ⟩
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : ghDist (GHSpace.Rep x) (GHSpace.Rep y) = hausdorffDist (range Φ) (range Ψ)
⊢ x = y
[PROOFSTEP]
rw [← dist_ghDist] at DΦΨ
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : dist x y = hausdorffDist (range Φ) (range Ψ)
⊢ x = y
[PROOFSTEP]
simp_rw [hxy] at DΦΨ
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
⊢ x = y
[PROOFSTEP]
have : range Φ = range Ψ :=
by
have hΦ : IsCompact (range Φ) := isCompact_range Φisom.continuous
have hΨ : IsCompact (range Ψ) := isCompact_range Ψisom.continuous
apply (IsClosed.hausdorffDist_zero_iff_eq _ _ _).1 DΦΨ.symm
· exact hΦ.isClosed
· exact hΨ.isClosed
· exact hausdorffEdist_ne_top_of_nonempty_of_bounded (range_nonempty _) (range_nonempty _) hΦ.bounded hΨ.bounded
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
⊢ range Φ = range Ψ
[PROOFSTEP]
have hΦ : IsCompact (range Φ) := isCompact_range Φisom.continuous
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
hΦ : IsCompact (range Φ)
⊢ range Φ = range Ψ
[PROOFSTEP]
have hΨ : IsCompact (range Ψ) := isCompact_range Ψisom.continuous
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
hΦ : IsCompact (range Φ)
hΨ : IsCompact (range Ψ)
⊢ range Φ = range Ψ
[PROOFSTEP]
apply (IsClosed.hausdorffDist_zero_iff_eq _ _ _).1 DΦΨ.symm
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
hΦ : IsCompact (range Φ)
hΨ : IsCompact (range Ψ)
⊢ IsClosed (range Φ)
[PROOFSTEP]
exact hΦ.isClosed
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
hΦ : IsCompact (range Φ)
hΨ : IsCompact (range Ψ)
⊢ IsClosed (range Ψ)
[PROOFSTEP]
exact hΨ.isClosed
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
hΦ : IsCompact (range Φ)
hΨ : IsCompact (range Ψ)
⊢ EMetric.hausdorffEdist (range Φ) (range Ψ) ≠ ⊤
[PROOFSTEP]
exact hausdorffEdist_ne_top_of_nonempty_of_bounded (range_nonempty _) (range_nonempty _) hΦ.bounded hΨ.bounded
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
this : range Φ = range Ψ
⊢ x = y
[PROOFSTEP]
have T : (range Ψ ≃ᵢ y.Rep) = (range Φ ≃ᵢ y.Rep) := by rw [this]
[GOAL]
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
this : range Φ = range Ψ
⊢ (↑(range Ψ) ≃ᵢ GHSpace.Rep y) = (↑(range Φ) ≃ᵢ GHSpace.Rep y)
[PROOFSTEP]
rw [this]
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
this : range Φ = range Ψ
T : (↑(range Ψ) ≃ᵢ GHSpace.Rep y) = (↑(range Φ) ≃ᵢ GHSpace.Rep y)
⊢ x = y
[PROOFSTEP]
have eΨ := cast T Ψisom.isometryEquivOnRange.symm
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
this : range Φ = range Ψ
T : (↑(range Ψ) ≃ᵢ GHSpace.Rep y) = (↑(range Φ) ≃ᵢ GHSpace.Rep y)
eΨ : ↑(range Φ) ≃ᵢ GHSpace.Rep y
⊢ x = y
[PROOFSTEP]
have e := Φisom.isometryEquivOnRange.trans eΨ
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
this : range Φ = range Ψ
T : (↑(range Ψ) ≃ᵢ GHSpace.Rep y) = (↑(range Φ) ≃ᵢ GHSpace.Rep y)
eΨ : ↑(range Φ) ≃ᵢ GHSpace.Rep y
e : GHSpace.Rep x ≃ᵢ GHSpace.Rep y
⊢ x = y
[PROOFSTEP]
rw [← x.toGHSpace_rep, ← y.toGHSpace_rep, toGHSpace_eq_toGHSpace_iff_isometryEquiv]
[GOAL]
case intro.intro.intro.intro
x y : GHSpace
hxy : dist x y = 0
Φ : GHSpace.Rep x → { x // x ∈ lp (fun n => ℝ) ⊤ }
Ψ : GHSpace.Rep y → { x // x ∈ lp (fun n => ℝ) ⊤ }
Φisom : Isometry Φ
Ψisom : Isometry Ψ
DΦΨ : 0 = hausdorffDist (range Φ) (range Ψ)
this : range Φ = range Ψ
T : (↑(range Ψ) ≃ᵢ GHSpace.Rep y) = (↑(range Φ) ≃ᵢ GHSpace.Rep y)
eΨ : ↑(range Φ) ≃ᵢ GHSpace.Rep y
e : GHSpace.Rep x ≃ᵢ GHSpace.Rep y
⊢ Nonempty (GHSpace.Rep x ≃ᵢ GHSpace.Rep y)
[PROOFSTEP]
exact ⟨e⟩
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤ dist p q
[PROOFSTEP]
have ha : Isometry ((↑) : p → X) := isometry_subtype_coe
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
ha : Isometry Subtype.val
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤ dist p q
[PROOFSTEP]
have hb : Isometry ((↑) : q → X) := isometry_subtype_coe
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
ha : Isometry Subtype.val
hb : Isometry Subtype.val
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤ dist p q
[PROOFSTEP]
have A : dist p q = hausdorffDist (p : Set X) q := rfl
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
ha : Isometry Subtype.val
hb : Isometry Subtype.val
A : dist p q = hausdorffDist ↑p ↑q
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤ dist p q
[PROOFSTEP]
have I : ↑p = range ((↑) : p → X) := Subtype.range_coe_subtype.symm
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
ha : Isometry Subtype.val
hb : Isometry Subtype.val
A : dist p q = hausdorffDist ↑p ↑q
I : ↑p = range Subtype.val
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤ dist p q
[PROOFSTEP]
have J : ↑q = range ((↑) : q → X) := Subtype.range_coe_subtype.symm
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
ha : Isometry Subtype.val
hb : Isometry Subtype.val
A : dist p q = hausdorffDist ↑p ↑q
I : ↑p = range Subtype.val
J : ↑q = range Subtype.val
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤ dist p q
[PROOFSTEP]
rw [A, I, J]
[GOAL]
X : Type u
inst✝ : MetricSpace X
p q : NonemptyCompacts X
ha : Isometry Subtype.val
hb : Isometry Subtype.val
A : dist p q = hausdorffDist ↑p ↑q
I : ↑p = range Subtype.val
J : ↑q = range Subtype.val
⊢ dist (NonemptyCompacts.toGHSpace p) (NonemptyCompacts.toGHSpace q) ≤
hausdorffDist (range Subtype.val) (range Subtype.val)
[PROOFSTEP]
exact ghDist_le_hausdorffDist ha hb
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃
[PROOFSTEP]
refine' le_of_forall_pos_le_add fun δ δ0 => _
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
rcases exists_mem_of_nonempty X with ⟨xX, _⟩
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
rcases hs xX with ⟨xs, hxs, Dxs⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have sne : s.Nonempty := ⟨xs, hxs⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
letI : Nonempty s := sne.to_subtype
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this : Nonempty ↑s := Nonempty.to_subtype sne
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : 0 ≤ ε₂ := le_trans (abs_nonneg _) (H ⟨xs, hxs⟩ ⟨xs, hxs⟩)
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝ : Nonempty ↑s := Nonempty.to_subtype sne
this : 0 ≤ ε₂
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : ∀ p q : s, |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ) := fun p q =>
calc
|dist p q - dist (Φ p) (Φ q)| ≤ ε₂ := H p q
_ ≤ 2 * (ε₂ / 2 + δ) := by
linarith
-- glue `X` and `Y` along the almost matching subsets
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝ : Nonempty ↑s := Nonempty.to_subtype sne
this : 0 ≤ ε₂
p q : ↑s
⊢ ε₂ ≤ 2 * (ε₂ / 2 + δ)
[PROOFSTEP]
linarith
-- glue `X` and `Y` along the almost matching subsets
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝¹ : Nonempty ↑s := Nonempty.to_subtype sne
this✝ : 0 ≤ ε₂
this : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
letI : MetricSpace (Sum X Y) := glueMetricApprox (fun x : s => (x : X)) (fun x => Φ x) (ε₂ / 2 + δ) (by linarith) this
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝¹ : Nonempty ↑s := Nonempty.to_subtype sne
this✝ : 0 ≤ ε₂
this : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
⊢ 0 < ε₂ / 2 + δ
[PROOFSTEP]
linarith
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝² : Nonempty ↑s := Nonempty.to_subtype sne
this✝¹ : 0 ≤ ε₂
this✝ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
let Fl := @Sum.inl X Y
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝² : Nonempty ↑s := Nonempty.to_subtype sne
this✝¹ : 0 ≤ ε₂
this✝ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝
Fl : X → X ⊕ Y := inl
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
let Fr := @Sum.inr X Y
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝² : Nonempty ↑s := Nonempty.to_subtype sne
this✝¹ : 0 ≤ ε₂
this✝ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have Il : Isometry Fl := Isometry.of_dist_eq fun x y => rfl
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝² : Nonempty ↑s := Nonempty.to_subtype sne
this✝¹ : 0 ≤ ε₂
this✝ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have Ir : Isometry Fr := Isometry.of_dist_eq fun x y => rfl
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝² : Nonempty ↑s := Nonempty.to_subtype sne
this✝¹ : 0 ≤ ε₂
this✝ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr) := ghDist_le_hausdorffDist Il Ir
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝³ : Nonempty ↑s := Nonempty.to_subtype sne
this✝² : 0 ≤ ε₂
this✝¹ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝¹
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr) :=
haveI B : Bounded (range Fl) := (isCompact_range Il.continuous).bounded
hausdorffDist_triangle
(hausdorffEdist_ne_top_of_nonempty_of_bounded (range_nonempty _) (sne.image _) B (B.mono (image_subset_range _ _)))
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁴ : Nonempty ↑s := Nonempty.to_subtype sne
this✝³ : 0 ≤ ε₂
this✝² : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝¹ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝²
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr) :=
haveI B : Bounded (range Fr) := (isCompact_range Ir.continuous).bounded
hausdorffDist_triangle'
(hausdorffEdist_ne_top_of_nonempty_of_bounded ((range_nonempty _).image _) (range_nonempty _)
(Bounded.mono (image_subset_range _ _) B) B)
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁵ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁴ : 0 ≤ ε₂
this✝³ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝² : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝³
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝¹ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁ :=
by
rw [← image_univ, hausdorffDist_image Il]
have : 0 ≤ ε₁ := le_trans dist_nonneg Dxs
refine' hausdorffDist_le_of_mem_dist this (fun x _ => hs x) fun x _ => ⟨x, mem_univ _, by simpa only [dist_self]⟩
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁵ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁴ : 0 ≤ ε₂
this✝³ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝² : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝³
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝¹ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
⊢ hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
[PROOFSTEP]
rw [← image_univ, hausdorffDist_image Il]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁵ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁴ : 0 ≤ ε₂
this✝³ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝² : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝³
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝¹ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
⊢ hausdorffDist univ s ≤ ε₁
[PROOFSTEP]
have : 0 ≤ ε₁ := le_trans dist_nonneg Dxs
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : 0 ≤ ε₁
⊢ hausdorffDist univ s ≤ ε₁
[PROOFSTEP]
refine' hausdorffDist_le_of_mem_dist this (fun x _ => hs x) fun x _ => ⟨x, mem_univ _, by simpa only [dist_self]⟩
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : 0 ≤ ε₁
x : X
x✝ : x ∈ s
⊢ dist x x ≤ ε₁
[PROOFSTEP]
simpa only [dist_self]
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ :=
by
refine' hausdorffDist_le_of_mem_dist (by linarith) _ _
· intro x' hx'
rcases(Set.mem_image _ _ _).1 hx' with ⟨x, ⟨x_in_s, xx'⟩⟩
rw [← xx']
use Fr (Φ ⟨x, x_in_s⟩), mem_image_of_mem Fr (mem_range_self _)
exact le_of_eq (glueDist_glued_points (fun x : s => (x : X)) Φ (ε₂ / 2 + δ) ⟨x, x_in_s⟩)
· intro x' hx'
rcases(Set.mem_image _ _ _).1 hx' with ⟨y, ⟨y_in_s', yx'⟩⟩
rcases mem_range.1 y_in_s' with ⟨x, xy⟩
use Fl x, mem_image_of_mem _ x.2
rw [← yx', ← xy, dist_comm]
exact le_of_eq (glueDist_glued_points (Z := s) (@Subtype.val X s) Φ (ε₂ / 2 + δ) x)
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
⊢ hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
[PROOFSTEP]
refine' hausdorffDist_le_of_mem_dist (by linarith) _ _
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
⊢ 0 ≤ ε₂ / 2 + δ
[PROOFSTEP]
linarith
[GOAL]
case refine'_1
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
⊢ ∀ (x : X ⊕ Y), x ∈ Fl '' s → ∃ y, y ∈ Fr '' range Φ ∧ dist x y ≤ ε₂ / 2 + δ
[PROOFSTEP]
intro x' hx'
[GOAL]
case refine'_1
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fl '' s
⊢ ∃ y, y ∈ Fr '' range Φ ∧ dist x' y ≤ ε₂ / 2 + δ
[PROOFSTEP]
rcases(Set.mem_image _ _ _).1 hx' with ⟨x, ⟨x_in_s, xx'⟩⟩
[GOAL]
case refine'_1.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fl '' s
x : X
x_in_s : x ∈ s
xx' : Fl x = x'
⊢ ∃ y, y ∈ Fr '' range Φ ∧ dist x' y ≤ ε₂ / 2 + δ
[PROOFSTEP]
rw [← xx']
[GOAL]
case refine'_1.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fl '' s
x : X
x_in_s : x ∈ s
xx' : Fl x = x'
⊢ ∃ y, y ∈ Fr '' range Φ ∧ dist (Fl x) y ≤ ε₂ / 2 + δ
[PROOFSTEP]
use Fr (Φ ⟨x, x_in_s⟩), mem_image_of_mem Fr (mem_range_self _)
[GOAL]
case right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fl '' s
x : X
x_in_s : x ∈ s
xx' : Fl x = x'
⊢ dist (Fl x) (Fr (Φ { val := x, property := x_in_s })) ≤ ε₂ / 2 + δ
[PROOFSTEP]
exact le_of_eq (glueDist_glued_points (fun x : s => (x : X)) Φ (ε₂ / 2 + δ) ⟨x, x_in_s⟩)
[GOAL]
case refine'_2
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
⊢ ∀ (x : X ⊕ Y), x ∈ Fr '' range Φ → ∃ y, y ∈ Fl '' s ∧ dist x y ≤ ε₂ / 2 + δ
[PROOFSTEP]
intro x' hx'
[GOAL]
case refine'_2
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fr '' range Φ
⊢ ∃ y, y ∈ Fl '' s ∧ dist x' y ≤ ε₂ / 2 + δ
[PROOFSTEP]
rcases(Set.mem_image _ _ _).1 hx' with ⟨y, ⟨y_in_s', yx'⟩⟩
[GOAL]
case refine'_2.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fr '' range Φ
y : Y
y_in_s' : y ∈ range Φ
yx' : Fr y = x'
⊢ ∃ y, y ∈ Fl '' s ∧ dist x' y ≤ ε₂ / 2 + δ
[PROOFSTEP]
rcases mem_range.1 y_in_s' with ⟨x, xy⟩
[GOAL]
case refine'_2.intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fr '' range Φ
y : Y
y_in_s' : y ∈ range Φ
yx' : Fr y = x'
x : ↑s
xy : Φ x = y
⊢ ∃ y, y ∈ Fl '' s ∧ dist x' y ≤ ε₂ / 2 + δ
[PROOFSTEP]
use Fl x, mem_image_of_mem _ x.2
[GOAL]
case right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fr '' range Φ
y : Y
y_in_s' : y ∈ range Φ
yx' : Fr y = x'
x : ↑s
xy : Φ x = y
⊢ dist x' (Fl ↑x) ≤ ε₂ / 2 + δ
[PROOFSTEP]
rw [← yx', ← xy, dist_comm]
[GOAL]
case right
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁶ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁵ : 0 ≤ ε₂
this✝⁴ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝³ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁴
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝² : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝¹ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
x' : X ⊕ Y
hx' : x' ∈ Fr '' range Φ
y : Y
y_in_s' : y ∈ range Φ
yx' : Fr y = x'
x : ↑s
xy : Φ x = y
⊢ dist (Fl ↑x) (Fr (Φ x)) ≤ ε₂ / 2 + δ
[PROOFSTEP]
exact le_of_eq (glueDist_glued_points (Z := s) (@Subtype.val X s) Φ (ε₂ / 2 + δ) x)
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁷ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁶ : 0 ≤ ε₂
this✝⁵ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁴ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁵
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝³ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝² : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝¹ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
have : hausdorffDist (Fr '' range Φ) (range Fr) ≤ ε₃ :=
by
rw [← @image_univ _ _ Fr, hausdorffDist_image Ir]
rcases exists_mem_of_nonempty Y with ⟨xY, _⟩
rcases hs' xY with ⟨xs', Dxs'⟩
have : 0 ≤ ε₃ := le_trans dist_nonneg Dxs'
refine hausdorffDist_le_of_mem_dist this (fun x _ => ⟨x, mem_univ _, by simpa only [dist_self]⟩) fun x _ => ?_
rcases hs' x with ⟨y, Dy⟩
exact ⟨Φ y, mem_range_self _, Dy⟩
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁷ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁶ : 0 ≤ ε₂
this✝⁵ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁴ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁵
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝³ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝² : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝¹ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
⊢ hausdorffDist (Fr '' range Φ) (range Fr) ≤ ε₃
[PROOFSTEP]
rw [← @image_univ _ _ Fr, hausdorffDist_image Ir]
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁷ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁶ : 0 ≤ ε₂
this✝⁵ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁴ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁵
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝³ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝² : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝¹ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
⊢ hausdorffDist (range Φ) univ ≤ ε₃
[PROOFSTEP]
rcases exists_mem_of_nonempty Y with ⟨xY, _⟩
[GOAL]
case intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝¹ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁷ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁶ : 0 ≤ ε₂
this✝⁵ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁴ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁵
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝³ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝² : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝¹ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
xY : Y
h✝ : xY ∈ univ
⊢ hausdorffDist (range Φ) univ ≤ ε₃
[PROOFSTEP]
rcases hs' xY with ⟨xs', Dxs'⟩
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝¹ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁷ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁶ : 0 ≤ ε₂
this✝⁵ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁴ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁵
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝³ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝² : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝¹ :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
xY : Y
h✝ : xY ∈ univ
xs' : ↑s
Dxs' : dist xY (Φ xs') ≤ ε₃
⊢ hausdorffDist (range Φ) univ ≤ ε₃
[PROOFSTEP]
have : 0 ≤ ε₃ := le_trans dist_nonneg Dxs'
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝¹ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁸ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁷ : 0 ≤ ε₂
this✝⁶ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁵ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁶
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝⁴ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝³ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝² :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝¹ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this✝ : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
xY : Y
h✝ : xY ∈ univ
xs' : ↑s
Dxs' : dist xY (Φ xs') ≤ ε₃
this : 0 ≤ ε₃
⊢ hausdorffDist (range Φ) univ ≤ ε₃
[PROOFSTEP]
refine hausdorffDist_le_of_mem_dist this (fun x _ => ⟨x, mem_univ _, by simpa only [dist_self]⟩) fun x _ => ?_
[GOAL]
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝¹ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁸ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁷ : 0 ≤ ε₂
this✝⁶ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁵ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁶
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝⁴ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝³ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝² :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝¹ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this✝ : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
xY : Y
h✝ : xY ∈ univ
xs' : ↑s
Dxs' : dist xY (Φ xs') ≤ ε₃
this : 0 ≤ ε₃
x : Y
x✝ : x ∈ range Φ
⊢ dist x x ≤ ε₃
[PROOFSTEP]
simpa only [dist_self]
[GOAL]
case intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝¹ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁸ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁷ : 0 ≤ ε₂
this✝⁶ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁵ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁶
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝⁴ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝³ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝² :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝¹ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this✝ : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
xY : Y
h✝ : xY ∈ univ
xs' : ↑s
Dxs' : dist xY (Φ xs') ≤ ε₃
this : 0 ≤ ε₃
x : Y
x✝ : x ∈ univ
⊢ ∃ y, y ∈ range Φ ∧ dist x y ≤ ε₃
[PROOFSTEP]
rcases hs' x with ⟨y, Dy⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝¹ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁸ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁷ : 0 ≤ ε₂
this✝⁶ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁵ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁶
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝⁴ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝³ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝² :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝¹ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this✝ : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
xY : Y
h✝ : xY ∈ univ
xs' : ↑s
Dxs' : dist xY (Φ xs') ≤ ε₃
this : 0 ≤ ε₃
x : Y
x✝ : x ∈ univ
y : ↑s
Dy : dist x (Φ y) ≤ ε₃
⊢ ∃ y, y ∈ range Φ ∧ dist x y ≤ ε₃
[PROOFSTEP]
exact ⟨Φ y, mem_range_self _, Dy⟩
[GOAL]
case intro.intro.intro
X : Type u
inst✝⁵ : MetricSpace X
inst✝⁴ : CompactSpace X
inst✝³ : Nonempty X
Y : Type v
inst✝² : MetricSpace Y
inst✝¹ : CompactSpace Y
inst✝ : Nonempty Y
s : Set X
Φ : ↑s → Y
ε₁ ε₂ ε₃ : ℝ
hs : ∀ (x : X), ∃ y, y ∈ s ∧ dist x y ≤ ε₁
hs' : ∀ (x : Y), ∃ y, dist x (Φ y) ≤ ε₃
H : ∀ (x y : ↑s), |dist x y - dist (Φ x) (Φ y)| ≤ ε₂
δ : ℝ
δ0 : 0 < δ
xX : X
h✝ : xX ∈ univ
xs : X
hxs : xs ∈ s
Dxs : dist xX xs ≤ ε₁
sne : Set.Nonempty s
this✝⁸ : Nonempty ↑s := Nonempty.to_subtype sne
this✝⁷ : 0 ≤ ε₂
this✝⁶ : ∀ (p q : ↑s), |dist p q - dist (Φ p) (Φ q)| ≤ 2 * (ε₂ / 2 + δ)
this✝⁵ : MetricSpace (X ⊕ Y) := glueMetricApprox (fun x => ↑x) (fun x => Φ x) (ε₂ / 2 + δ) (_ : 0 < ε₂ / 2 + δ) this✝⁶
Fl : X → X ⊕ Y := inl
Fr : Y → X ⊕ Y := inr
Il : Isometry Fl
Ir : Isometry Fr
this✝⁴ : ghDist X Y ≤ hausdorffDist (range Fl) (range Fr)
this✝³ : hausdorffDist (range Fl) (range Fr) ≤ hausdorffDist (range Fl) (Fl '' s) + hausdorffDist (Fl '' s) (range Fr)
this✝² :
hausdorffDist (Fl '' s) (range Fr) ≤
hausdorffDist (Fl '' s) (Fr '' range Φ) + hausdorffDist (Fr '' range Φ) (range Fr)
this✝¹ : hausdorffDist (range Fl) (Fl '' s) ≤ ε₁
this✝ : hausdorffDist (Fl '' s) (Fr '' range Φ) ≤ ε₂ / 2 + δ
this : hausdorffDist (Fr '' range Φ) (range Fr) ≤ ε₃
⊢ ghDist X Y ≤ ε₁ + ε₂ / 2 + ε₃ + δ
[PROOFSTEP]
linarith
[GOAL]
⊢ SecondCountableTopology GHSpace
[PROOFSTEP]
refine' secondCountable_of_countable_discretization fun δ δpos => _
[GOAL]
δ : ℝ
δpos : δ > 0
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
let ε := 2 / 5 * δ
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
have εpos : 0 < ε := mul_pos (by norm_num) δpos
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
⊢ 0 < 2 / 5
[PROOFSTEP]
norm_num
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
have : ∀ p : GHSpace, ∃ s : Set p.Rep, s.Finite ∧ univ ⊆ ⋃ x ∈ s, ball x ε := fun p => by
simpa only [subset_univ, true_and] using finite_cover_balls_of_compact (α := p.Rep) isCompact_univ εpos
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
p : GHSpace
⊢ ∃ s, Set.Finite s ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x ε
[PROOFSTEP]
simpa only [subset_univ, true_and] using finite_cover_balls_of_compact (α := p.Rep) isCompact_univ εpos
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
this : ∀ (p : GHSpace), ∃ s, Set.Finite s ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x ε
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
choose s hs using this
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
have : ∀ p : GHSpace, ∀ t : Set p.Rep, t.Finite → ∃ n : ℕ, ∃ _ : Equiv t (Fin n), True :=
by
intro p t ht
letI : Fintype t := Finite.fintype ht
exact ⟨Fintype.card t, Fintype.equivFin t, trivial⟩
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
⊢ ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → ∃ n x, True
[PROOFSTEP]
intro p t ht
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
p : GHSpace
t : Set (GHSpace.Rep p)
ht : Set.Finite t
⊢ ∃ n x, True
[PROOFSTEP]
letI : Fintype t := Finite.fintype ht
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
p : GHSpace
t : Set (GHSpace.Rep p)
ht : Set.Finite t
this : Fintype ↑t := Finite.fintype ht
⊢ ∃ n x, True
[PROOFSTEP]
exact ⟨Fintype.card t, Fintype.equivFin t, trivial⟩
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
this : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → ∃ n x, True
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
choose N e _ using this
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
let N := fun p : GHSpace =>
N p (s p)
(hs p).1
-- equiv from `s p`, a nice finite subset of `p.rep`, to `fin (N p)`, called `E p`
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
let E := fun p : GHSpace =>
e p (s p)
(hs p).1
-- A function `F` associating to `p : GHSpace` the data of all distances between points
-- in the `ε`-dense set `s p`.
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
let F : GHSpace → Σ n : ℕ, Fin n → Fin n → ℤ := fun p => ⟨N p, fun a b => ⌊ε⁻¹ * dist ((E p).symm a) ((E p).symm b)⌋⟩
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
⊢ ∃ β x F, ∀ (x y : GHSpace), F x = F y → dist x y ≤ δ
[PROOFSTEP]
refine'
⟨Σ n, Fin n → Fin n → ℤ, by infer_instance, F, fun p q hpq => _⟩
/- As the target space of F is countable, it suffices to show that two points
`p` and `q` with `F p = F q` are at distance `≤ δ`.
For this, we construct a map `Φ` from `s p ⊆ p.rep` (representing `p`)
to `q.rep` (representing `q`) which is almost an isometry on `s p`, and
with image `s q`. For this, we compose the identification of `s p` with `fin (N p)`
and the inverse of the identification of `s q` with `fin (N q)`. Together with
the fact that `N p = N q`, this constructs `Ψ` between `s p` and `s q`, and then
composing with the canonical inclusion we get `Φ`. -/
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
⊢ Encodable ((n : ℕ) × (Fin n → Fin n → ℤ))
[PROOFSTEP]
infer_instance
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
⊢ dist p q ≤ δ
[PROOFSTEP]
have Npq : N p = N q := (Sigma.mk.inj_iff.1 hpq).1
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
⊢ dist p q ≤ δ
[PROOFSTEP]
let Ψ : s p → s q := fun x => (E q).symm (Fin.castIso Npq ((E p) x))
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
⊢ dist p q ≤ δ
[PROOFSTEP]
let Φ : s p → q.Rep := fun x => Ψ x
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ dist p q ≤ δ
[PROOFSTEP]
have main : ghDist p.Rep q.Rep ≤ ε + ε / 2 + ε :=
by
refine' ghDist_le_of_approx_subsets Φ _ _ _
show ∀ x : p.Rep, ∃ y ∈ s p, dist x y ≤ ε
·
-- by construction, `s p` is `ε`-dense
intro x
have : x ∈ ⋃ y ∈ s p, ball y ε := (hs p).2 (mem_univ _)
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
exact ⟨y, ys, le_of_lt hy⟩
show ∀ x : q.Rep, ∃ z : s p, dist x (Φ z) ≤ ε
·
-- by construction, `s q` is `ε`-dense, and it is the range of `Φ`
intro x
have : x ∈ ⋃ y ∈ s q, ball y ε := (hs q).2 (mem_univ _)
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
let i : ℕ := E q ⟨y, ys⟩
let hi := ((E q) ⟨y, ys⟩).is_lt
have ihi_eq : (⟨i, hi⟩ : Fin (N q)) = (E q) ⟨y, ys⟩ := by rw [Fin.ext_iff, Fin.val_mk]
have hiq : i < N q := hi
have hip : i < N p := by rwa [Npq.symm] at hiq
let z := (E p).symm ⟨i, hip⟩
use z
have C1 : (E p) z = ⟨i, hip⟩ := (E p).apply_symm_apply ⟨i, hip⟩
have C2 : Fin.castIso Npq ⟨i, hip⟩ = ⟨i, hi⟩ := rfl
have C3 : (E q).symm ⟨i, hi⟩ = ⟨y, ys⟩ := by rw [ihi_eq]; exact (E q).symm_apply_apply ⟨y, ys⟩
have : Φ z = y := by simp only; rw [C1, C2, C3]
rw [this]
exact le_of_lt hy
show ∀ x y : s p, |dist x y - dist (Φ x) (Φ y)| ≤ ε
·
/- the distance between `x` and `y` is encoded in `F p`, and the distance between
`Φ x` and `Φ y` (two points of `s q`) is encoded in `F q`, all this up to `ε`.
As `F p = F q`, the distances are almost equal. -/
-- porting note : we have to circumvent the absence of `change … with … `
intro x y
rw [show dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y) from rfl]
-- introduce `i`, that codes both `x` and `Φ x` in `fin (N p) = fin (N q)`
let i : ℕ := E p x
have hip : i < N p := ((E p) x).2
have hiq : i < N q := by rwa [Npq] at hip
have i' : i = (E q) (Ψ x) := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- introduce `j`, that codes both `y` and `Φ y` in `fin (N p) = fin (N q)`
let j : ℕ := E p y
have hjp : j < N p := ((E p) y).2
have hjq : j < N q := by rwa [Npq] at hjp
have j' : j = ((E q) (Ψ y)).1 := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- Express `dist x y` in terms of `F p`
have : (F p).2 ((E p) x) ((E p) y) = ⌊ε⁻¹ * dist x y⌋ := by simp only [(E p).symm_apply_apply]
have Ap : (F p).2 ⟨i, hip⟩ ⟨j, hjp⟩ = ⌊ε⁻¹ * dist x y⌋ := by
rw [← this]
-- Express `dist (Φ x) (Φ y)` in terms of `F q`
have : (F q).2 ((E q) (Ψ x)) ((E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ := by simp only [(E q).symm_apply_apply]
have Aq : (F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ :=
by
rw [← this]
-- porting note: `congr` fails to make progress
refine congr_arg₂ (F q).2 ?_ ?_ <;> ext1
exacts [i', j']
-- use the equality between `F p` and `F q` to deduce that the distances have equal
-- integer parts
have : (F p).2 ⟨i, hip⟩ ⟨j, hjp⟩ = (F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩ :=
by
have hpq' : HEq (F p).snd (F q).snd := (Sigma.mk.inj_iff.1 hpq).2
rw [Fin.heq_fun₂_iff Npq Npq] at hpq'
rw [← hpq']
-- porting note : new version above, because `change … with…` is not implemented
-- we want to `subst hpq` where `hpq : F p = F q`, except that `subst` only works
-- with a constant, so replace `F q` (and everything that depends on it) by a constant `f`
-- then `subst`
-- revert hiq hjq
-- change N q with (F q).1
-- generalize F q = f at hpq ⊢
-- subst hpq
-- rfl
rw [Ap, Aq] at this
have I :=
calc
|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| = |ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y))| := (abs_mul _ _).symm
_ = |ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)| := by congr; ring
_ ≤ 1 := le_of_lt (abs_sub_lt_one_of_floor_eq_floor this)
calc
|dist x y - dist (Ψ x) (Ψ y)| = ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)| := by
rw [mul_inv_cancel (ne_of_gt εpos), one_mul]
_ = ε * (|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)|) := by rw [abs_of_nonneg (le_of_lt (inv_pos.2 εpos)), mul_assoc]
_ ≤ ε * 1 := (mul_le_mul_of_nonneg_left I (le_of_lt εpos))
_ = ε := mul_one _
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
[PROOFSTEP]
refine' ghDist_le_of_approx_subsets Φ _ _ _
[GOAL]
case refine'_1
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep p), ∃ y, y ∈ s p ∧ dist x y ≤ ε
case refine'_2
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep q), ∃ y, dist x (Φ y) ≤ ε
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
show ∀ x : p.Rep, ∃ y ∈ s p, dist x y ≤ ε
[GOAL]
case refine'_1
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep p), ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
intro x
[GOAL]
case refine'_1
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep p
⊢ ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
have : x ∈ ⋃ y ∈ s p, ball y ε := (hs p).2 (mem_univ _)
[GOAL]
case refine'_1
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep p
this : x ∈ ⋃ (y : GHSpace.Rep p) (_ : y ∈ s p), ball y ε
⊢ ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
[GOAL]
case refine'_1.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep p
this : x ∈ ⋃ (y : GHSpace.Rep p) (_ : y ∈ s p), ball y ε
y : GHSpace.Rep p
ys : y ∈ s p
hy : x ∈ ball y ε
⊢ ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
exact ⟨y, ys, le_of_lt hy⟩
[GOAL]
case refine'_2
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep q), ∃ y, dist x (Φ y) ≤ ε
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
show ∀ x : q.Rep, ∃ z : s p, dist x (Φ z) ≤ ε
[GOAL]
case refine'_2
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep q), ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
intro x
[GOAL]
case refine'_2
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have : x ∈ ⋃ y ∈ s q, ball y ε := (hs q).2 (mem_univ _)
[GOAL]
case refine'_2
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
let i : ℕ := E q ⟨y, ys⟩
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
let hi := ((E q) ⟨y, ys⟩).is_lt
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have ihi_eq : (⟨i, hi⟩ : Fin (N q)) = (E q) ⟨y, ys⟩ := by rw [Fin.ext_iff, Fin.val_mk]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
⊢ { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
[PROOFSTEP]
rw [Fin.ext_iff, Fin.val_mk]
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have hiq : i < N q := hi
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have hip : i < N p := by rwa [Npq.symm] at hiq
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
⊢ i < N p
[PROOFSTEP]
rwa [Npq.symm] at hiq
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
let z := (E p).symm ⟨i, hip⟩
[GOAL]
case refine'_2.intro.intro
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
use z
[GOAL]
case h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have C1 : (E p) z = ⟨i, hip⟩ := (E p).apply_symm_apply ⟨i, hip⟩
[GOAL]
case h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have C2 : Fin.castIso Npq ⟨i, hip⟩ = ⟨i, hi⟩ := rfl
[GOAL]
case h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have C3 : (E q).symm ⟨i, hi⟩ = ⟨y, ys⟩ := by rw [ihi_eq]; exact (E q).symm_apply_apply ⟨y, ys⟩
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
⊢ ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
[PROOFSTEP]
rw [ihi_eq]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
⊢ ↑(E q).symm (↑(E q) { val := y, property := ys }) = { val := y, property := ys }
[PROOFSTEP]
exact (E q).symm_apply_apply ⟨y, ys⟩
[GOAL]
case h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have : Φ z = y := by simp only; rw [C1, C2, C3]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
⊢ Φ z = y
[PROOFSTEP]
simp only
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
⊢ ↑(↑(e q (s q) (_ : Set.Finite (s q))).symm
(↑(Fin.castIso Npq)
(↑(e p (s p) (_ : Set.Finite (s p)))
(↑(e p (s p) (_ : Set.Finite (s p))).symm
{ val := ↑(↑(e q (s q) (_ : Set.Finite (s q))) { val := y, property := ys }), isLt := hip })))) =
y
[PROOFSTEP]
rw [C1, C2, C3]
[GOAL]
case h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this✝ : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
this : Φ z = y
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
rw [this]
[GOAL]
case h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this✝ : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y ε
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y ε
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N✝ q (s q) (_ : Set.Finite (s q)) :=
Fin.is_lt (↑(E q) { val := y, property := ys })
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
this : Φ z = y
⊢ dist x y ≤ ε
[PROOFSTEP]
exact le_of_lt hy
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
show ∀ x y : s p, |dist x y - dist (Φ x) (Φ y)| ≤ ε
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
intro x y
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
⊢ |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
rw [show dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y) from rfl]
-- introduce `i`, that codes both `x` and `Φ x` in `fin (N p) = fin (N q)`
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
let i : ℕ := E p x
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hip : i < N p := ((E p) x).2
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hiq : i < N q := by rwa [Npq] at hip
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
⊢ i < N q
[PROOFSTEP]
rwa [Npq] at hip
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have i' : i = (E q) (Ψ x) := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- introduce `j`, that codes both `y` and `Φ y` in `fin (N p) = fin (N q)`
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
⊢ i = ↑(↑(E q) (Ψ x))
[PROOFSTEP]
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- introduce `j`, that codes both `y` and `Φ y` in `fin (N p) = fin (N q)`
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
let j : ℕ := E p y
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hjp : j < N p := ((E p) y).2
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hjq : j < N q := by rwa [Npq] at hjp
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
⊢ j < N q
[PROOFSTEP]
rwa [Npq] at hjp
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have j' : j = ((E q) (Ψ y)).1 := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- Express `dist x y` in terms of `F p`
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
⊢ j = ↑(↑(E q) (Ψ y))
[PROOFSTEP]
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- Express `dist x y` in terms of `F p`
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have : (F p).2 ((E p) x) ((E p) y) = ⌊ε⁻¹ * dist x y⌋ := by simp only [(E p).symm_apply_apply]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
[PROOFSTEP]
simp only [(E p).symm_apply_apply]
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have Ap : (F p).2 ⟨i, hip⟩ ⟨j, hjp⟩ = ⌊ε⁻¹ * dist x y⌋ := by
rw [← this]
-- Express `dist (Φ x) (Φ y)` in terms of `F q`
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
⊢ Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
[PROOFSTEP]
rw [← this]
-- Express `dist (Φ x) (Φ y)` in terms of `F q`
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have : (F q).2 ((E q) (Ψ x)) ((E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ := by simp only [(E q).symm_apply_apply]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
⊢ Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
[PROOFSTEP]
simp only [(E q).symm_apply_apply]
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have Aq : (F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ :=
by
rw [← this]
-- porting note: `congr` fails to make progress
refine congr_arg₂ (F q).2 ?_ ?_ <;> ext1
exacts [i', j']
-- use the equality between `F p` and `F q` to deduce that the distances have equal
-- integer parts
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
[PROOFSTEP]
rw [← this]
-- porting note: `congr` fails to make progress
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y))
[PROOFSTEP]
refine congr_arg₂ (F q).2 ?_ ?_
[GOAL]
case refine_1
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ { val := i, isLt := hiq } = ↑(E q) (Ψ x)
[PROOFSTEP]
ext1
[GOAL]
case refine_2
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ { val := j, isLt := hjq } = ↑(E q) (Ψ y)
[PROOFSTEP]
ext1
[GOAL]
case refine_1.h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ ↑{ val := i, isLt := hiq } = ↑(↑(E q) (Ψ x))
case refine_2.h
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ ↑{ val := j, isLt := hjq } = ↑(↑(E q) (Ψ y))
[PROOFSTEP]
exacts [i', j']
-- use the equality between `F p` and `F q` to deduce that the distances have equal
-- integer parts
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have : (F p).2 ⟨i, hip⟩ ⟨j, hjp⟩ = (F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩ :=
by
have hpq' : HEq (F p).snd (F q).snd := (Sigma.mk.inj_iff.1 hpq).2
rw [Fin.heq_fun₂_iff Npq Npq] at hpq'
rw [← hpq']
-- porting note : new version above, because `change … with…` is not implemented
-- we want to `subst hpq` where `hpq : F p = F q`, except that `subst` only works
-- with a constant, so replace `F q` (and everything that depends on it) by a constant `f`
-- then `subst`
-- revert hiq hjq
-- change N q with (F q).1
-- generalize F q = f at hpq ⊢
-- subst hpq
-- rfl
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } =
Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }
[PROOFSTEP]
have hpq' : HEq (F p).snd (F q).snd := (Sigma.mk.inj_iff.1 hpq).2
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
hpq' : HEq (F p).snd (F q).snd
⊢ Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } =
Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }
[PROOFSTEP]
rw [Fin.heq_fun₂_iff Npq Npq] at hpq'
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
hpq' :
∀ (i j : Fin (N p)),
Sigma.snd (F p) i j = Sigma.snd (F q) { val := ↑i, isLt := (_ : ↑i < N q) } { val := ↑j, isLt := (_ : ↑j < N q) }
⊢ Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } =
Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }
[PROOFSTEP]
rw [← hpq']
-- porting note : new version above, because `change … with…` is not implemented
-- we want to `subst hpq` where `hpq : F p = F q`, except that `subst` only works
-- with a constant, so replace `F q` (and everything that depends on it) by a constant `f`
-- then `subst`
-- revert hiq hjq
-- change N q with (F q).1
-- generalize F q = f at hpq ⊢
-- subst hpq
-- rfl
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this :
Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } =
Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
rw [Ap, Aq] at this
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have I :=
calc
|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| = |ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y))| := (abs_mul _ _).symm
_ = |ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)| := by congr; ring
_ ≤ 1 := le_of_lt (abs_sub_lt_one_of_floor_eq_floor this)
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ |ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y))| = |ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)|
[PROOFSTEP]
congr
[GOAL]
case e_a
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y)) = ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)
[PROOFSTEP]
ring
[GOAL]
case refine'_3
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
I : |ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| ≤ 1
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
calc
|dist x y - dist (Ψ x) (Ψ y)| = ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)| := by
rw [mul_inv_cancel (ne_of_gt εpos), one_mul]
_ = ε * (|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)|) := by rw [abs_of_nonneg (le_of_lt (inv_pos.2 εpos)), mul_assoc]
_ ≤ ε * 1 := (mul_le_mul_of_nonneg_left I (le_of_lt εpos))
_ = ε := mul_one _
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
I : |ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| ≤ 1
⊢ |dist x y - dist (Ψ x) (Ψ y)| = ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)|
[PROOFSTEP]
rw [mul_inv_cancel (ne_of_gt εpos), one_mul]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
this✝¹ : Sigma.snd (F p) (↑(E p) x) (↑(E p) y) = ⌊ε⁻¹ * dist x y⌋
Ap : Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp } = ⌊ε⁻¹ * dist x y⌋
this✝ : Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
Aq : Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq } = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
I : |ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| ≤ 1
⊢ ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)| = ε * (|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)|)
[PROOFSTEP]
rw [abs_of_nonneg (le_of_lt (inv_pos.2 εpos)), mul_assoc]
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
main : ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
⊢ dist p q ≤ δ
[PROOFSTEP]
calc
dist p q = ghDist p.Rep q.Rep := dist_ghDist p q
_ ≤ ε + ε / 2 + ε := main
_ = δ := by ring
[GOAL]
δ : ℝ
δpos : δ > 0
ε : ℝ := 2 / 5 * δ
εpos : 0 < ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
hs : ∀ (p : GHSpace), Set.Finite (s p) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x ε
N✝ : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → Set.Finite t → ℕ
e : (p : GHSpace) → (t : Set (GHSpace.Rep p)) → (a : Set.Finite t) → ↑t ≃ Fin (N✝ p t a)
x✝ : ∀ (p : GHSpace) (t : Set (GHSpace.Rep p)), Set.Finite t → True
N : GHSpace → ℕ := fun p => N✝ p (s p) (_ : Set.Finite (s p))
E : (p : GHSpace) → ↑(s p) ≃ Fin (N✝ p (s p) (_ : Set.Finite (s p))) := fun p => e p (s p) (_ : Set.Finite (s p))
F : GHSpace → (n : ℕ) × (Fin n → Fin n → ℤ) :=
fun p => { fst := N p, snd := fun a b => ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋ }
p q : GHSpace
hpq : F p = F q
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
main : ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
⊢ ε + ε / 2 + ε = δ
[PROOFSTEP]
ring
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
⊢ TotallyBounded t
[PROOFSTEP]
refine' Metric.totallyBounded_of_finite_discretization fun δ δpos => _
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
let ε := 1 / 5 * δ
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
have εpos : 0 < ε := mul_pos (by norm_num) δpos
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
⊢ 0 < 1 / 5
[PROOFSTEP]
norm_num
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
rcases Metric.tendsto_atTop.1 ulim ε εpos with ⟨n, hn⟩
[GOAL]
case intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
have u_le_ε : u n ≤ ε := by
have := hn n le_rfl
simp only [Real.dist_eq, add_zero, sub_eq_add_neg, neg_zero] at this
exact
le_of_lt
(lt_of_le_of_lt (le_abs_self _) this)
-- construct a finite subset `s p` of `p` which is `ε`-dense and has cardinal `≤ K n`
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
⊢ u n ≤ ε
[PROOFSTEP]
have := hn n le_rfl
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
this : dist (u n) 0 < ε
⊢ u n ≤ ε
[PROOFSTEP]
simp only [Real.dist_eq, add_zero, sub_eq_add_neg, neg_zero] at this
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
this : |u n| < 1 / 5 * δ
⊢ u n ≤ ε
[PROOFSTEP]
exact
le_of_lt
(lt_of_le_of_lt (le_abs_self _) this)
-- construct a finite subset `s p` of `p` which is `ε`-dense and has cardinal `≤ K n`
[GOAL]
case intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
have : ∀ p : GHSpace, ∃ s : Set p.Rep, ∃ N ≤ K n, ∃ _ : Equiv s (Fin N), p ∈ t → univ ⊆ ⋃ x ∈ s, ball x (u n) :=
by
intro p
by_cases hp : p ∉ t
· have : Nonempty (Equiv (∅ : Set p.Rep) (Fin 0)) := by rw [← Fintype.card_eq];
simp only [empty_card', Fintype.card_fin]
use∅, 0, bot_le, choice this
exact fun hp' => (hp hp').elim
· rcases hcov _ (Set.not_not_mem.1 hp) n with ⟨s, ⟨scard, scover⟩⟩
rcases Cardinal.lt_aleph0.1 (lt_of_le_of_lt scard (Cardinal.nat_lt_aleph0 _)) with ⟨N, hN⟩
rw [hN, Cardinal.natCast_le] at scard
have : #s = #(Fin N) := by rw [hN, Cardinal.mk_fin]
cases' Quotient.exact this with E
use s, N, scard, E
simp only [scover, imp_true_iff]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
⊢ ∀ (p : GHSpace), ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
intro p
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
by_cases hp : p ∉ t
[GOAL]
case pos
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬p ∈ t
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
have : Nonempty (Equiv (∅ : Set p.Rep) (Fin 0)) := by rw [← Fintype.card_eq]; simp only [empty_card', Fintype.card_fin]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬p ∈ t
⊢ Nonempty (↑∅ ≃ Fin 0)
[PROOFSTEP]
rw [← Fintype.card_eq]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬p ∈ t
⊢ Fintype.card ↑∅ = Fintype.card (Fin 0)
[PROOFSTEP]
simp only [empty_card', Fintype.card_fin]
[GOAL]
case pos
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬p ∈ t
this : Nonempty (↑∅ ≃ Fin 0)
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
use∅, 0, bot_le, choice this
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬p ∈ t
this : Nonempty (↑∅ ≃ Fin 0)
⊢ p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ ∅), ball x (u n)
[PROOFSTEP]
exact fun hp' => (hp hp').elim
[GOAL]
case neg
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
rcases hcov _ (Set.not_not_mem.1 hp) n with ⟨s, ⟨scard, scover⟩⟩
[GOAL]
case neg.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scard : #↑s ≤ ↑(K n)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
rcases Cardinal.lt_aleph0.1 (lt_of_le_of_lt scard (Cardinal.nat_lt_aleph0 _)) with ⟨N, hN⟩
[GOAL]
case neg.intro.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scard : #↑s ≤ ↑(K n)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
N : ℕ
hN : #↑s = ↑N
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
rw [hN, Cardinal.natCast_le] at scard
[GOAL]
case neg.intro.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
N : ℕ
scard : N ≤ K n
hN : #↑s = ↑N
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
have : #s = #(Fin N) := by rw [hN, Cardinal.mk_fin]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
N : ℕ
scard : N ≤ K n
hN : #↑s = ↑N
⊢ #↑s = #(Fin N)
[PROOFSTEP]
rw [hN, Cardinal.mk_fin]
[GOAL]
case neg.intro.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
N : ℕ
scard : N ≤ K n
hN : #↑s = ↑N
this : #↑s = #(Fin N)
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
cases' Quotient.exact this with E
[GOAL]
case neg.intro.intro.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
N : ℕ
scard : N ≤ K n
hN : #↑s = ↑N
this : #↑s = #(Fin N)
E : ↑s ≃ Fin N
⊢ ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
use s, N, scard, E
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
p : GHSpace
hp : ¬¬p ∈ t
s : Set (GHSpace.Rep p)
scover : univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
N : ℕ
scard : N ≤ K n
hN : #↑s = ↑N
this : #↑s = #(Fin N)
E : ↑s ≃ Fin N
⊢ p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
[PROOFSTEP]
simp only [scover, imp_true_iff]
[GOAL]
case intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
this : ∀ (p : GHSpace), ∃ s N, N ≤ K n ∧ ∃ x, p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
choose s N hN E hs using this
[GOAL]
case intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
let M := ⌊ε⁻¹ * max C 0⌋₊
[GOAL]
case intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
let F : GHSpace → Σ k : Fin (K n).succ, Fin k → Fin k → Fin M.succ := fun p =>
⟨⟨N p, lt_of_le_of_lt (hN p) (Nat.lt_succ_self _)⟩, fun a b =>
⟨min M ⌊ε⁻¹ * dist ((E p).symm a) ((E p).symm b)⌋₊, (min_le_left _ _).trans_lt (Nat.lt_succ_self _)⟩⟩
[GOAL]
case intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
⊢ ∃ β x F, ∀ (x y : ↑t), F x = F y → dist ↑x ↑y < δ
[PROOFSTEP]
refine' ⟨_, _, fun p => F p, _⟩
[GOAL]
case intro.refine'_1
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
⊢ Fintype ((k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)))
case intro.refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
⊢ ∀ (x y : ↑t), (fun p => F ↑p) x = (fun p => F ↑p) y → dist ↑x ↑y < δ
[PROOFSTEP]
infer_instance
-- It remains to show that if `F p = F q`, then `p` and `q` are `ε`-close
[GOAL]
case intro.refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
⊢ ∀ (x y : ↑t), (fun p => F ↑p) x = (fun p => F ↑p) y → dist ↑x ↑y < δ
[PROOFSTEP]
rintro ⟨p, pt⟩ ⟨q, qt⟩ hpq
[GOAL]
case intro.refine'_2.mk.mk
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
⊢ dist ↑{ val := p, property := pt } ↑{ val := q, property := qt } < δ
[PROOFSTEP]
have Npq : N p = N q := Fin.ext_iff.1 (Sigma.mk.inj_iff.1 hpq).1
[GOAL]
case intro.refine'_2.mk.mk
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
⊢ dist ↑{ val := p, property := pt } ↑{ val := q, property := qt } < δ
[PROOFSTEP]
let Ψ : s p → s q := fun x => (E q).symm (Fin.castIso Npq ((E p) x))
[GOAL]
case intro.refine'_2.mk.mk
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
⊢ dist ↑{ val := p, property := pt } ↑{ val := q, property := qt } < δ
[PROOFSTEP]
let Φ : s p → q.Rep := fun x => Ψ x
[GOAL]
case intro.refine'_2.mk.mk
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ dist ↑{ val := p, property := pt } ↑{ val := q, property := qt } < δ
[PROOFSTEP]
have main : ghDist p.Rep q.Rep ≤ ε + ε / 2 + ε := by
-- to prove the main inequality, argue that `s p` is `ε`-dense in `p`, and `s q` is `ε`-dense
-- in `q`, and `s p` and `s q` are almost isometric. Then closeness follows
-- from `ghDist_le_of_approx_subsets`
refine' ghDist_le_of_approx_subsets Φ _ _ _
show ∀ x : p.Rep, ∃ y ∈ s p, dist x y ≤ ε
·
-- by construction, `s p` is `ε`-dense
intro x
have : x ∈ ⋃ y ∈ s p, ball y (u n) := (hs p pt) (mem_univ _)
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
exact ⟨y, ys, le_trans (le_of_lt hy) u_le_ε⟩
show ∀ x : q.Rep, ∃ z : s p, dist x (Φ z) ≤ ε
·
-- by construction, `s q` is `ε`-dense, and it is the range of `Φ`
intro x
have : x ∈ ⋃ y ∈ s q, ball y (u n) := (hs q qt) (mem_univ _)
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
let i : ℕ := E q ⟨y, ys⟩
let hi := ((E q) ⟨y, ys⟩).2
have ihi_eq : (⟨i, hi⟩ : Fin (N q)) = (E q) ⟨y, ys⟩ := by rw [Fin.ext_iff, Fin.val_mk]
have hiq : i < N q := hi
have hip : i < N p := by rwa [Npq.symm] at hiq
let z := (E p).symm ⟨i, hip⟩
use z
have C1 : (E p) z = ⟨i, hip⟩ := (E p).apply_symm_apply ⟨i, hip⟩
have C2 : Fin.castIso Npq ⟨i, hip⟩ = ⟨i, hi⟩ := rfl
have C3 : (E q).symm ⟨i, hi⟩ = ⟨y, ys⟩ := by rw [ihi_eq]; exact (E q).symm_apply_apply ⟨y, ys⟩
have : Φ z = y := by simp only; rw [C1, C2, C3]
rw [this]
exact le_trans (le_of_lt hy) u_le_ε
show ∀ x y : s p, |dist x y - dist (Φ x) (Φ y)| ≤ ε
·
/- the distance between `x` and `y` is encoded in `F p`, and the distance between
`Φ x` and `Φ y` (two points of `s q`) is encoded in `F q`, all this up to `ε`.
As `F p = F q`, the distances are almost equal. -/
intro x y
have : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y) := rfl
rw [this]
-- introduce `i`, that codes both `x` and `Φ x` in `fin (N p) = fin (N q)`
let i : ℕ := E p x
have hip : i < N p := ((E p) x).2
have hiq : i < N q := by rwa [Npq] at hip
have i' : i = (E q) (Ψ x) := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- introduce `j`, that codes both `y` and `Φ y` in `fin (N p) = fin (N q)`
let j : ℕ := E p y
have hjp : j < N p := ((E p) y).2
have hjq : j < N q := by rwa [Npq] at hjp
have j' : j = (E q) (Ψ y) := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- Express `dist x y` in terms of `F p`
have Ap : ((F p).2 ⟨i, hip⟩ ⟨j, hjp⟩).1 = ⌊ε⁻¹ * dist x y⌋₊ :=
calc
((F p).2 ⟨i, hip⟩ ⟨j, hjp⟩).1 = ((F p).2 ((E p) x) ((E p) y)).1 := by congr
_ = min M ⌊ε⁻¹ * dist x y⌋₊ := by simp only [(E p).symm_apply_apply]
_ = ⌊ε⁻¹ * dist x y⌋₊ := by
refine' min_eq_right (Nat.floor_mono _)
refine' mul_le_mul_of_nonneg_left (le_trans _ (le_max_left _ _)) (inv_pos.2 εpos).le
change dist (x : p.Rep) y ≤ C
refine' (dist_le_diam_of_mem isCompact_univ.bounded (mem_univ _) (mem_univ _)).trans _
exact hdiam p pt
have Aq : ((F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩).1 = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ :=
calc
((F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩).1 = ((F q).2 ((E q) (Ψ x)) ((E q) (Ψ y))).1 := by
-- Porting note: `congr` drops `Fin.val` but fails to make further progressexact
congr_arg₂ (Fin.val <| (F q).2 · ·) (Fin.ext i') (Fin.ext j')
_ = min M ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ := by simp only [(E q).symm_apply_apply]
_ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ := by
refine' min_eq_right (Nat.floor_mono _)
refine' mul_le_mul_of_nonneg_left (le_trans _ (le_max_left _ _)) (inv_pos.2 εpos).le
change dist (Ψ x : q.Rep) (Ψ y) ≤ C
refine (dist_le_diam_of_mem isCompact_univ.bounded (mem_univ _) (mem_univ _)).trans ?_
exact hdiam q qt
have : ((F p).2 ⟨i, hip⟩ ⟨j, hjp⟩).1 = ((F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩).1 :=
by
have hpq' : HEq (F p).snd (F q).snd := (Sigma.mk.inj_iff.1 hpq).2
rw [Fin.heq_fun₂_iff Npq Npq] at hpq'
rw [← hpq']
-- porting note: new version above because `subst…` does not work
-- we want to `subst hpq` where `hpq : F p = F q`, except that `subst` only works
-- with a constant, so replace `F q` (and everything that depends on it) by a constant `f`
-- then `subst`
-- dsimp only [show N q = (F q).1 from rfl] at hiq hjq ⊢
-- generalize F q = f at hpq ⊢
-- subst hpq
-- intros
-- rfl
have : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ :=
by
rw [Ap, Aq] at this
have D : 0 ≤ ⌊ε⁻¹ * dist x y⌋ := floor_nonneg.2 (mul_nonneg (le_of_lt (inv_pos.2 εpos)) dist_nonneg)
have D' : 0 ≤ ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ := floor_nonneg.2 (mul_nonneg (le_of_lt (inv_pos.2 εpos)) dist_nonneg)
rw [← Int.toNat_of_nonneg D, ← Int.toNat_of_nonneg D', Int.floor_toNat, Int.floor_toNat, this]
-- deduce that the distances coincide up to `ε`, by a straightforward computation
-- that should be automated
have I :=
calc
|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| = |ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y))| := (abs_mul _ _).symm
_ = |ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)| := by congr; ring
_ ≤ 1 := le_of_lt (abs_sub_lt_one_of_floor_eq_floor this)
calc
|dist x y - dist (Ψ x) (Ψ y)| = ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)| := by
rw [mul_inv_cancel (ne_of_gt εpos), one_mul]
_ = ε * (|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)|) := by rw [abs_of_nonneg (le_of_lt (inv_pos.2 εpos)), mul_assoc]
_ ≤ ε * 1 := (mul_le_mul_of_nonneg_left I (le_of_lt εpos))
_ = ε := mul_one _
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
[PROOFSTEP]
refine' ghDist_le_of_approx_subsets Φ _ _ _
[GOAL]
case refine'_1
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep p), ∃ y, y ∈ s p ∧ dist x y ≤ ε
case refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep q), ∃ y, dist x (Φ y) ≤ ε
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
show ∀ x : p.Rep, ∃ y ∈ s p, dist x y ≤ ε
[GOAL]
case refine'_1
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep p), ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
intro x
[GOAL]
case refine'_1
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep p
⊢ ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
have : x ∈ ⋃ y ∈ s p, ball y (u n) := (hs p pt) (mem_univ _)
[GOAL]
case refine'_1
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep p
this : x ∈ ⋃ (y : GHSpace.Rep p) (_ : y ∈ s p), ball y (u n)
⊢ ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
[GOAL]
case refine'_1.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep p
this : x ∈ ⋃ (y : GHSpace.Rep p) (_ : y ∈ s p), ball y (u n)
y : GHSpace.Rep p
ys : y ∈ s p
hy : x ∈ ball y (u n)
⊢ ∃ y, y ∈ s p ∧ dist x y ≤ ε
[PROOFSTEP]
exact ⟨y, ys, le_trans (le_of_lt hy) u_le_ε⟩
[GOAL]
case refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep q), ∃ y, dist x (Φ y) ≤ ε
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
show ∀ x : q.Rep, ∃ z : s p, dist x (Φ z) ≤ ε
[GOAL]
case refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x : GHSpace.Rep q), ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
intro x
[GOAL]
case refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have : x ∈ ⋃ y ∈ s q, ball y (u n) := (hs q qt) (mem_univ _)
[GOAL]
case refine'_2
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
rcases mem_iUnion₂.1 this with ⟨y, ys, hy⟩
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
let i : ℕ := E q ⟨y, ys⟩
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
let hi := ((E q) ⟨y, ys⟩).2
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have ihi_eq : (⟨i, hi⟩ : Fin (N q)) = (E q) ⟨y, ys⟩ := by rw [Fin.ext_iff, Fin.val_mk]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
⊢ { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
[PROOFSTEP]
rw [Fin.ext_iff, Fin.val_mk]
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have hiq : i < N q := hi
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
have hip : i < N p := by rwa [Npq.symm] at hiq
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
⊢ i < N p
[PROOFSTEP]
rwa [Npq.symm] at hiq
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
let z := (E p).symm ⟨i, hip⟩
[GOAL]
case refine'_2.intro.intro
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
⊢ ∃ z, dist x (Φ z) ≤ ε
[PROOFSTEP]
use z
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have C1 : (E p) z = ⟨i, hip⟩ := (E p).apply_symm_apply ⟨i, hip⟩
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have C2 : Fin.castIso Npq ⟨i, hip⟩ = ⟨i, hi⟩ := rfl
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have C3 : (E q).symm ⟨i, hi⟩ = ⟨y, ys⟩ := by rw [ihi_eq]; exact (E q).symm_apply_apply ⟨y, ys⟩
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
⊢ ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
[PROOFSTEP]
rw [ihi_eq]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
⊢ ↑(E q).symm (↑(E q) { val := y, property := ys }) = { val := y, property := ys }
[PROOFSTEP]
exact (E q).symm_apply_apply ⟨y, ys⟩
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
have : Φ z = y := by simp only; rw [C1, C2, C3]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
⊢ Φ z = y
[PROOFSTEP]
simp only
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
⊢ ↑(↑(E q).symm
(↑(Fin.castIso Npq) (↑(E p) (↑(E p).symm { val := ↑(↑(E q) { val := y, property := ys }), isLt := hip })))) =
y
[PROOFSTEP]
rw [C1, C2, C3]
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this✝ : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
this : Φ z = y
⊢ dist x (Φ z) ≤ ε
[PROOFSTEP]
rw [this]
[GOAL]
case h
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x : GHSpace.Rep q
this✝ : x ∈ ⋃ (y : GHSpace.Rep q) (_ : y ∈ s q), ball y (u n)
y : GHSpace.Rep q
ys : y ∈ s q
hy : x ∈ ball y (u n)
i : ℕ := ↑(↑(E q) { val := y, property := ys })
hi : ↑(↑(E q) { val := y, property := ys }) < N q := (↑(E q) { val := y, property := ys }).isLt
ihi_eq : { val := i, isLt := hi } = ↑(E q) { val := y, property := ys }
hiq : i < N q
hip : i < N p
z : (fun x => ↑(s p)) { val := i, isLt := hip } := ↑(E p).symm { val := i, isLt := hip }
C1 : ↑(E p) z = { val := i, isLt := hip }
C2 : ↑(Fin.castIso Npq) { val := i, isLt := hip } = { val := i, isLt := hi }
C3 : ↑(E q).symm { val := i, isLt := hi } = { val := y, property := ys }
this : Φ z = y
⊢ dist x y ≤ ε
[PROOFSTEP]
exact le_trans (le_of_lt hy) u_le_ε
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
show ∀ x y : s p, |dist x y - dist (Φ x) (Φ y)| ≤ ε
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
⊢ ∀ (x y : ↑(s p)), |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
intro x y
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
⊢ |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
have : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y) := rfl
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
⊢ |dist x y - dist (Φ x) (Φ y)| ≤ ε
[PROOFSTEP]
rw [this]
-- introduce `i`, that codes both `x` and `Φ x` in `fin (N p) = fin (N q)`
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
let i : ℕ := E p x
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hip : i < N p := ((E p) x).2
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hiq : i < N q := by rwa [Npq] at hip
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
⊢ i < N q
[PROOFSTEP]
rwa [Npq] at hip
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have i' : i = (E q) (Ψ x) := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- introduce `j`, that codes both `y` and `Φ y` in `fin (N p) = fin (N q)`
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
⊢ i = ↑(↑(E q) (Ψ x))
[PROOFSTEP]
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- introduce `j`, that codes both `y` and `Φ y` in `fin (N p) = fin (N q)`
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
let j : ℕ := E p y
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hjp : j < N p := ((E p) y).2
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have hjq : j < N q := by rwa [Npq] at hjp
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
⊢ j < N q
[PROOFSTEP]
rwa [Npq] at hjp
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have j' : j = (E q) (Ψ y) := by
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- Express `dist x y` in terms of `F p`
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
⊢ j = ↑(↑(E q) (Ψ y))
[PROOFSTEP]
simp only [Equiv.apply_symm_apply, Fin.coe_castIso]
-- Express `dist x y` in terms of `F p`
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have Ap : ((F p).2 ⟨i, hip⟩ ⟨j, hjp⟩).1 = ⌊ε⁻¹ * dist x y⌋₊ :=
calc
((F p).2 ⟨i, hip⟩ ⟨j, hjp⟩).1 = ((F p).2 ((E p) x) ((E p) y)).1 := by congr
_ = min M ⌊ε⁻¹ * dist x y⌋₊ := by simp only [(E p).symm_apply_apply]
_ = ⌊ε⁻¹ * dist x y⌋₊ := by
refine' min_eq_right (Nat.floor_mono _)
refine' mul_le_mul_of_nonneg_left (le_trans _ (le_max_left _ _)) (inv_pos.2 εpos).le
change dist (x : p.Rep) y ≤ C
refine' (dist_le_diam_of_mem isCompact_univ.bounded (mem_univ _) (mem_univ _)).trans _
exact hdiam p pt
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ↑(Sigma.snd (F p) (↑(E p) x) (↑(E p) y))
[PROOFSTEP]
congr
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ ↑(Sigma.snd (F p) (↑(E p) x) (↑(E p) y)) = min M ⌊ε⁻¹ * dist x y⌋₊
[PROOFSTEP]
simp only [(E p).symm_apply_apply]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ min M ⌊ε⁻¹ * dist x y⌋₊ = ⌊ε⁻¹ * dist x y⌋₊
[PROOFSTEP]
refine' min_eq_right (Nat.floor_mono _)
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ ε⁻¹ * dist x y ≤ ε⁻¹ * max C 0
[PROOFSTEP]
refine' mul_le_mul_of_nonneg_left (le_trans _ (le_max_left _ _)) (inv_pos.2 εpos).le
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ dist x y ≤ C
[PROOFSTEP]
change dist (x : p.Rep) y ≤ C
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ dist ↑x ↑y ≤ C
[PROOFSTEP]
refine' (dist_le_diam_of_mem isCompact_univ.bounded (mem_univ _) (mem_univ _)).trans _
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
⊢ diam univ ≤ C
[PROOFSTEP]
exact hdiam p pt
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have Aq : ((F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩).1 = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ :=
calc
((F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩).1 = ((F q).2 ((E q) (Ψ x)) ((E q) (Ψ y))).1 := by
-- Porting note: `congr` drops `Fin.val` but fails to make further progressexact
congr_arg₂ (Fin.val <| (F q).2 · ·) (Fin.ext i') (Fin.ext j')
_ = min M ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ := by simp only [(E q).symm_apply_apply]
_ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ := by
refine' min_eq_right (Nat.floor_mono _)
refine' mul_le_mul_of_nonneg_left (le_trans _ (le_max_left _ _)) (inv_pos.2 εpos).le
change dist (Ψ x : q.Rep) (Ψ y) ≤ C
refine (dist_le_diam_of_mem isCompact_univ.bounded (mem_univ _) (mem_univ _)).trans ?_
exact hdiam q qt
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) =
↑(Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y)))
[PROOFSTEP]
exact congr_arg₂ (Fin.val <| (F q).2 · ·) (Fin.ext i') (Fin.ext j')
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ ↑(Sigma.snd (F q) (↑(E q) (Ψ x)) (↑(E q) (Ψ y))) = min M ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
[PROOFSTEP]
simp only [(E q).symm_apply_apply]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ min M ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
[PROOFSTEP]
refine' min_eq_right (Nat.floor_mono _)
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ ε⁻¹ * dist (Ψ x) (Ψ y) ≤ ε⁻¹ * max C 0
[PROOFSTEP]
refine' mul_le_mul_of_nonneg_left (le_trans _ (le_max_left _ _)) (inv_pos.2 εpos).le
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ dist (Ψ x) (Ψ y) ≤ C
[PROOFSTEP]
change dist (Ψ x : q.Rep) (Ψ y) ≤ C
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ dist ↑(Ψ x) ↑(Ψ y) ≤ C
[PROOFSTEP]
refine (dist_le_diam_of_mem isCompact_univ.bounded (mem_univ _) (mem_univ _)).trans ?_
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
⊢ diam univ ≤ C
[PROOFSTEP]
exact hdiam q qt
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have : ((F p).2 ⟨i, hip⟩ ⟨j, hjp⟩).1 = ((F q).2 ⟨i, hiq⟩ ⟨j, hjq⟩).1 :=
by
have hpq' : HEq (F p).snd (F q).snd := (Sigma.mk.inj_iff.1 hpq).2
rw [Fin.heq_fun₂_iff Npq Npq] at hpq'
rw [← hpq']
-- porting note: new version above because `subst…` does not work
-- we want to `subst hpq` where `hpq : F p = F q`, except that `subst` only works
-- with a constant, so replace `F q` (and everything that depends on it) by a constant `f`
-- then `subst`
-- dsimp only [show N q = (F q).1 from rfl] at hiq hjq ⊢
-- generalize F q = f at hpq ⊢
-- subst hpq
-- intros
-- rfl
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
⊢ ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
[PROOFSTEP]
have hpq' : HEq (F p).snd (F q).snd := (Sigma.mk.inj_iff.1 hpq).2
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
hpq' : HEq (F p).snd (F q).snd
⊢ ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
[PROOFSTEP]
rw [Fin.heq_fun₂_iff Npq Npq] at hpq'
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
hpq' :
∀ (i j : Fin (N p)),
Sigma.snd (F p) i j = Sigma.snd (F q) { val := ↑i, isLt := (_ : ↑i < N q) } { val := ↑j, isLt := (_ : ↑j < N q) }
⊢ ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
[PROOFSTEP]
rw [← hpq']
-- porting note: new version above because `subst…` does not work
-- we want to `subst hpq` where `hpq : F p = F q`, except that `subst` only works
-- with a constant, so replace `F q` (and everything that depends on it) by a constant `f`
-- then `subst`
-- dsimp only [show N q = (F q).1 from rfl] at hiq hjq ⊢
-- generalize F q = f at hpq ⊢
-- subst hpq
-- intros
-- rfl
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ :=
by
rw [Ap, Aq] at this
have D : 0 ≤ ⌊ε⁻¹ * dist x y⌋ := floor_nonneg.2 (mul_nonneg (le_of_lt (inv_pos.2 εpos)) dist_nonneg)
have D' : 0 ≤ ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ := floor_nonneg.2 (mul_nonneg (le_of_lt (inv_pos.2 εpos)) dist_nonneg)
rw [← Int.toNat_of_nonneg D, ← Int.toNat_of_nonneg D', Int.floor_toNat, Int.floor_toNat, this]
-- deduce that the distances coincide up to `ε`, by a straightforward computation
-- that should be automated
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
⊢ ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
[PROOFSTEP]
rw [Ap, Aq] at this
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this : ⌊ε⁻¹ * dist x y⌋₊ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
⊢ ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
[PROOFSTEP]
have D : 0 ≤ ⌊ε⁻¹ * dist x y⌋ := floor_nonneg.2 (mul_nonneg (le_of_lt (inv_pos.2 εpos)) dist_nonneg)
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this : ⌊ε⁻¹ * dist x y⌋₊ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
D : 0 ≤ ⌊ε⁻¹ * dist x y⌋
⊢ ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
[PROOFSTEP]
have D' : 0 ≤ ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋ := floor_nonneg.2 (mul_nonneg (le_of_lt (inv_pos.2 εpos)) dist_nonneg)
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this : ⌊ε⁻¹ * dist x y⌋₊ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
D : 0 ≤ ⌊ε⁻¹ * dist x y⌋
D' : 0 ≤ ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
[PROOFSTEP]
rw [← Int.toNat_of_nonneg D, ← Int.toNat_of_nonneg D', Int.floor_toNat, Int.floor_toNat, this]
-- deduce that the distances coincide up to `ε`, by a straightforward computation
-- that should be automated
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝¹ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this✝ :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
have I :=
calc
|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| = |ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y))| := (abs_mul _ _).symm
_ = |ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)| := by congr; ring
_ ≤ 1 := le_of_lt (abs_sub_lt_one_of_floor_eq_floor this)
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝¹ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this✝ :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ |ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y))| = |ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)|
[PROOFSTEP]
congr
[GOAL]
case e_a
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝¹ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this✝ :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
⊢ ε⁻¹ * (dist x y - dist (Ψ x) (Ψ y)) = ε⁻¹ * dist x y - ε⁻¹ * dist (Ψ x) (Ψ y)
[PROOFSTEP]
ring
[GOAL]
case refine'_3
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝¹ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this✝ :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
I : |ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| ≤ 1
⊢ |dist x y - dist (Ψ x) (Ψ y)| ≤ ε
[PROOFSTEP]
calc
|dist x y - dist (Ψ x) (Ψ y)| = ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)| := by
rw [mul_inv_cancel (ne_of_gt εpos), one_mul]
_ = ε * (|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)|) := by rw [abs_of_nonneg (le_of_lt (inv_pos.2 εpos)), mul_assoc]
_ ≤ ε * 1 := (mul_le_mul_of_nonneg_left I (le_of_lt εpos))
_ = ε := mul_one _
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝¹ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this✝ :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
I : |ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| ≤ 1
⊢ |dist x y - dist (Ψ x) (Ψ y)| = ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)|
[PROOFSTEP]
rw [mul_inv_cancel (ne_of_gt εpos), one_mul]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
x y : ↑(s p)
this✝¹ : dist (Φ x) (Φ y) = dist (Ψ x) (Ψ y)
i : ℕ := ↑(↑(E p) x)
hip : i < N p
hiq : i < N q
i' : i = ↑(↑(E q) (Ψ x))
j : ℕ := ↑(↑(E p) y)
hjp : j < N p
hjq : j < N q
j' : j = ↑(↑(E q) (Ψ y))
Ap : ↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) = ⌊ε⁻¹ * dist x y⌋₊
Aq : ↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq }) = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋₊
this✝ :
↑(Sigma.snd (F p) { val := i, isLt := hip } { val := j, isLt := hjp }) =
↑(Sigma.snd (F q) { val := i, isLt := hiq } { val := j, isLt := hjq })
this : ⌊ε⁻¹ * dist x y⌋ = ⌊ε⁻¹ * dist (Ψ x) (Ψ y)⌋
I : |ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)| ≤ 1
⊢ ε * ε⁻¹ * |dist x y - dist (Ψ x) (Ψ y)| = ε * (|ε⁻¹| * |dist x y - dist (Ψ x) (Ψ y)|)
[PROOFSTEP]
rw [abs_of_nonneg (le_of_lt (inv_pos.2 εpos)), mul_assoc]
[GOAL]
case intro.refine'_2.mk.mk
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
main : ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
⊢ dist ↑{ val := p, property := pt } ↑{ val := q, property := qt } < δ
[PROOFSTEP]
calc
dist p q = ghDist p.Rep q.Rep := dist_ghDist p q
_ ≤ ε + ε / 2 + ε := main
_ = δ / 2 := by simp only [one_div]; ring
_ < δ := half_lt_self δpos
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
main : ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
⊢ ε + ε / 2 + ε = δ / 2
[PROOFSTEP]
simp only [one_div]
[GOAL]
t : Set GHSpace
C : ℝ
u : ℕ → ℝ
K : ℕ → ℕ
ulim : Tendsto u atTop (𝓝 0)
hdiam : ∀ (p : GHSpace), p ∈ t → diam univ ≤ C
hcov : ∀ (p : GHSpace), p ∈ t → ∀ (n : ℕ), ∃ s, #↑s ≤ ↑(K n) ∧ univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s), ball x (u n)
δ : ℝ
δpos : δ > 0
ε : ℝ := 1 / 5 * δ
εpos : 0 < ε
n : ℕ
hn : ∀ (n_1 : ℕ), n_1 ≥ n → dist (u n_1) 0 < ε
u_le_ε : u n ≤ ε
s : (p : GHSpace) → Set (GHSpace.Rep p)
N : GHSpace → ℕ
hN : ∀ (p : GHSpace), N p ≤ K n
E : (p : GHSpace) → ↑(s p) ≃ Fin (N p)
hs : ∀ (p : GHSpace), p ∈ t → univ ⊆ ⋃ (x : GHSpace.Rep p) (_ : x ∈ s p), ball x (u n)
M : ℕ := ⌊ε⁻¹ * max C 0⌋₊
F : GHSpace → (k : Fin (Nat.succ (K n))) × (Fin ↑k → Fin ↑k → Fin (Nat.succ M)) :=
fun p =>
{ fst := { val := N p, isLt := (_ : N p < Nat.succ (K n)) },
snd := fun a b =>
{ val := min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊,
isLt := (_ : min M ⌊ε⁻¹ * dist (↑(E p).symm a) (↑(E p).symm b)⌋₊ < Nat.succ M) } }
p : GHSpace
pt : p ∈ t
q : GHSpace
qt : q ∈ t
hpq : (fun p => F ↑p) { val := p, property := pt } = (fun p => F ↑p) { val := q, property := qt }
Npq : N p = N q
Ψ : ↑(s p) → ↑(s q) := fun x => ↑(E q).symm (↑(Fin.castIso Npq) (↑(E p) x))
Φ : ↑(s p) → GHSpace.Rep q := fun x => ↑(Ψ x)
main : ghDist (GHSpace.Rep p) (GHSpace.Rep q) ≤ ε + ε / 2 + ε
⊢ 5⁻¹ * δ + 5⁻¹ * δ / 2 + 5⁻¹ * δ = δ / 2
[PROOFSTEP]
ring
[GOAL]
X : ℕ → Type
inst✝³ : (n : ℕ) → MetricSpace (X n)
inst✝² : ∀ (n : ℕ), CompactSpace (X n)
inst✝¹ : ∀ (n : ℕ), Nonempty (X n)
A : Type
inst✝ : MetricSpace A
⊢ MetricSpace A
[PROOFSTEP]
infer_instance
[GOAL]
X : ℕ → Type
inst✝³ : (n : ℕ) → MetricSpace (X n)
inst✝² : ∀ (n : ℕ), CompactSpace (X n)
inst✝¹ : ∀ (n : ℕ), Nonempty (X n)
A : Type
inst✝ : MetricSpace A
x y : A
⊢ edist (id x) (id y) = edist x y
[PROOFSTEP]
exact rfl
[GOAL]
X : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X n)
inst✝ : ∀ (n : ℕ), Nonempty (X n)
n✝ n : ℕ
Y : AuxGluingStruct (X n)
⊢ MetricSpace (GlueSpace (_ : Isometry Y.embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))))
[PROOFSTEP]
infer_instance
[GOAL]
X : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X n)
inst✝ : ∀ (n : ℕ), Nonempty (X n)
⊢ CompleteSpace GHSpace
[PROOFSTEP]
set d := fun n : ℕ ↦ ((1 : ℝ) / 2) ^ n
[GOAL]
X : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X n)
inst✝ : ∀ (n : ℕ), Nonempty (X n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
⊢ CompleteSpace GHSpace
[PROOFSTEP]
have : ∀ n : ℕ, 0 < d n := fun _ ↦ by
positivity
-- start from a sequence of nonempty compact metric spaces within distance `1/2^n` of each other
[GOAL]
X : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X n)
inst✝ : ∀ (n : ℕ), Nonempty (X n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
x✝ : ℕ
⊢ 0 < d x✝
[PROOFSTEP]
positivity
-- start from a sequence of nonempty compact metric spaces within distance `1/2^n` of each other
[GOAL]
X : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X n)
inst✝ : ∀ (n : ℕ), Nonempty (X n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
⊢ CompleteSpace GHSpace
[PROOFSTEP]
refine'
Metric.complete_of_convergent_controlled_sequences d this fun u hu =>
_
-- `X n` is a representative of `u n`
[GOAL]
X : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X n)
inst✝ : ∀ (n : ℕ), Nonempty (X n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let X n := (u n).Rep
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let Y := auxGluing X
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have E : ∀ n : ℕ, GlueSpace (Y n).isom (isometry_optimalGHInjl (X n) (X (n + 1))) = (Y (n + 1)).Space := fun n => by
dsimp only [auxGluing]; rfl
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
n : ℕ
⊢ GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
[PROOFSTEP]
dsimp only [auxGluing]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
n : ℕ
⊢ GlueSpace
(_ :
Isometry
(Nat.rec default
(fun n Y =>
{
Space :=
GlueSpace (_ : Isometry Y.embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))),
metric := inferInstance,
embed :=
toGlueR (_ : Isometry Y.embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) ∘
optimalGHInjr (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))),
isom :=
(_ :
Isometry
(toGlueR (_ : Isometry Y.embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) ∘
optimalGHInjr (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) })
n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) =
(Nat.rec default
(fun n Y =>
{
Space :=
GlueSpace (_ : Isometry Y.embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))),
metric := inferInstance,
embed :=
toGlueR (_ : Isometry Y.embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) ∘
optimalGHInjr (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))),
isom :=
(_ :
Isometry
(toGlueR (_ : Isometry Y.embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) ∘
optimalGHInjr (GHSpace.Rep (u n)) (GHSpace.Rep (u (n + 1))))) })
(n + 1)).Space
[PROOFSTEP]
rfl
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let c n := cast (E n)
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have ic : ∀ n, Isometry (c n) := fun n x y => by dsimp only [auxGluing]; exact rfl
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
n : ℕ
x y : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1))))
⊢ edist (c n x) (c n y) = edist x y
[PROOFSTEP]
dsimp only [auxGluing]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
n : ℕ
x y : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1))))
⊢ edist
(cast
(_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
x)
(cast
(_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
y) =
edist x y
[PROOFSTEP]
exact rfl
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let f : ∀ n, (Y n).Space → (Y (n + 1)).Space := fun n =>
c n ∘ toGlueL (Y n).isom (isometry_optimalGHInjl (X n) (X n.succ))
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have I : ∀ n, Isometry (f n) := fun n =>
(ic n).comp
(toGlueL_isometry _ _)
-- consider the inductive limit `Z0` of the `Y n`, and then its completion `Z`
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let Z0 := Metric.InductiveLimit I
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let Z := UniformSpace.Completion Z0
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let Φ := toInductiveLimit I
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let coeZ :=
((↑) : Z0 → Z)
-- let `X2 n` be the image of `X n` in the space `Z`
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let X2 n := range (coeZ ∘ Φ n ∘ (Y n).embed)
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have isom : ∀ n, Isometry (coeZ ∘ Φ n ∘ (Y n).embed) := by
intro n
refine' UniformSpace.Completion.coe_isometry.comp _
exact (toInductiveLimit_isometry _ _).comp (Y n).isom
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
⊢ ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
[PROOFSTEP]
intro n
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
[PROOFSTEP]
refine' UniformSpace.Completion.coe_isometry.comp _
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ Isometry (Φ n ∘ (Y n).embed)
[PROOFSTEP]
exact (toInductiveLimit_isometry _ _).comp (Y n).isom
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have X2n :
∀ n,
X2 n =
range
((coeZ ∘ Φ n.succ ∘ c n ∘ toGlueR (Y n).isom (isometry_optimalGHInjl (X n) (X n.succ))) ∘
optimalGHInjl (X n) (X n.succ)) :=
by
intro n
change
X2 n =
range
(coeZ ∘
Φ n.succ ∘
c n ∘ toGlueR (Y n).isom (isometry_optimalGHInjl (X n) (X n.succ)) ∘ optimalGHInjl (X n) (X n.succ))
simp only
--[X2, Φ]
rw [← toInductiveLimit_commute I]
simp only
--[f]
rw [← toGlue_commute]
rfl
-- simp_rw [range_comp] at X2n
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
⊢ ∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
[PROOFSTEP]
intro n
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
[PROOFSTEP]
change
X2 n =
range
(coeZ ∘
Φ n.succ ∘ c n ∘ toGlueR (Y n).isom (isometry_optimalGHInjl (X n) (X n.succ)) ∘ optimalGHInjl (X n) (X n.succ))
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ X2 n =
range
(coeZ ∘
Φ (Nat.succ n) ∘
c n ∘
toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n)))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
[PROOFSTEP]
simp only
--[X2, Φ]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ range (↑(InductiveLimit I) ∘ toInductiveLimit I n ∘ (auxGluing (fun n => GHSpace.Rep (u n)) n).embed) =
range
(↑(InductiveLimit I) ∘
toInductiveLimit I (Nat.succ n) ∘
cast
(_ :
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) =
(Y (n + 1)).Space) ∘
toGlueR (_ : Isometry (auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))) ∘
optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))
[PROOFSTEP]
rw [← toInductiveLimit_commute I]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ range
(↑(InductiveLimit I) ∘
(toInductiveLimit I (Nat.succ n) ∘ f n) ∘ (auxGluing (fun n => GHSpace.Rep (u n)) n).embed) =
range
(↑(InductiveLimit I) ∘
toInductiveLimit I (Nat.succ n) ∘
cast
(_ :
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) =
(Y (n + 1)).Space) ∘
toGlueR (_ : Isometry (auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))) ∘
optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))
[PROOFSTEP]
simp only
--[f]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ range
(↑(InductiveLimit I) ∘
(toInductiveLimit I (Nat.succ n) ∘
cast
(_ :
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) =
(Y (n + 1)).Space) ∘
toGlueL (_ : Isometry (auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n)))))) ∘
(auxGluing (fun n => GHSpace.Rep (u n)) n).embed) =
range
(↑(InductiveLimit I) ∘
toInductiveLimit I (Nat.succ n) ∘
cast
(_ :
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) =
(Y (n + 1)).Space) ∘
toGlueR (_ : Isometry (auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))) ∘
optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))
[PROOFSTEP]
rw [← toGlue_commute]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
n : ℕ
⊢ range
(↑(InductiveLimit I) ∘
(toInductiveLimit I (Nat.succ n) ∘
cast
(_ :
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) =
(Y (n + 1)).Space) ∘
toGlueL (_ : Isometry (auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n)))))) ∘
(auxGluing (fun n => GHSpace.Rep (u n)) n).embed) =
range
(↑(InductiveLimit I) ∘
toInductiveLimit I (Nat.succ n) ∘
cast
(_ :
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) =
(Y (n + 1)).Space) ∘
toGlueL (_ : Isometry (auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
(_ : Isometry (optimalGHInjl (GHSpace.Rep (u n)) (GHSpace.Rep (u (Nat.succ n))))) ∘
(auxGluing (fun n => GHSpace.Rep (u n)) n).embed)
[PROOFSTEP]
rfl
-- simp_rw [range_comp] at X2n
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have X2nsucc :
∀ n,
X2 n.succ =
range
((coeZ ∘ Φ n.succ ∘ c n ∘ toGlueR (Y n).isom (isometry_optimalGHInjl (X n) (X n.succ))) ∘
optimalGHInjr (X n) (X n.succ)) :=
by
intro n
rfl
-- simp_rw [range_comp] at X2nsucc
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
⊢ ∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
[PROOFSTEP]
intro n
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
n : ℕ
⊢ X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
[PROOFSTEP]
rfl
-- simp_rw [range_comp] at X2nsucc
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have D2 : ∀ n, hausdorffDist (X2 n) (X2 n.succ) < d n := fun n ↦
by
rw [X2n n, X2nsucc n, range_comp, range_comp, hausdorffDist_image, hausdorffDist_optimal, ← dist_ghDist]
· exact hu n n n.succ (le_refl n) (le_succ n)
· apply UniformSpace.Completion.coe_isometry.comp _
exact
(toInductiveLimit_isometry _ _).comp
((ic n).comp (toGlueR_isometry _ _))
-- consider `X2 n` as a member `X3 n` of the type of nonempty compact subsets of `Z`, which
-- is a metric space
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
n : ℕ
⊢ hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
[PROOFSTEP]
rw [X2n n, X2nsucc n, range_comp, range_comp, hausdorffDist_image, hausdorffDist_optimal, ← dist_ghDist]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
n : ℕ
⊢ dist (u n) (u (Nat.succ n)) < d n
[PROOFSTEP]
exact hu n n n.succ (le_refl n) (le_succ n)
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
n : ℕ
⊢ Isometry
(coeZ ∘
Φ (Nat.succ n) ∘ c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n)))))
[PROOFSTEP]
apply UniformSpace.Completion.coe_isometry.comp _
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
n : ℕ
⊢ Isometry
(Φ (Nat.succ n) ∘ c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n)))))
[PROOFSTEP]
exact
(toInductiveLimit_isometry _ _).comp
((ic n).comp (toGlueR_isometry _ _))
-- consider `X2 n` as a member `X3 n` of the type of nonempty compact subsets of `Z`, which
-- is a metric space
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
let X3 : ℕ → NonemptyCompacts Z := fun n =>
⟨⟨X2 n, isCompact_range (isom n).continuous⟩, range_nonempty _⟩
-- `X3 n` is a Cauchy sequence by construction, as the successive distances are
-- bounded by `(1/2)^n`
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have : CauchySeq X3 := by
refine' cauchySeq_of_le_geometric (1 / 2) 1 (by norm_num) fun n => _
rw [one_mul]
exact
le_of_lt
(D2 n)
-- therefore, it converges to a limit `L`
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
⊢ CauchySeq X3
[PROOFSTEP]
refine' cauchySeq_of_le_geometric (1 / 2) 1 (by norm_num) fun n => _
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
⊢ 1 / 2 < 1
[PROOFSTEP]
norm_num
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
n : ℕ
⊢ dist (X3 n) (X3 (n + 1)) ≤ 1 * (1 / 2) ^ n
[PROOFSTEP]
rw [one_mul]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
n : ℕ
⊢ dist (X3 n) (X3 (n + 1)) ≤ (1 / 2) ^ n
[PROOFSTEP]
exact
le_of_lt
(D2 n)
-- therefore, it converges to a limit `L`
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this : CauchySeq X3
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
rcases cauchySeq_tendsto_of_complete this with
⟨L, hL⟩
-- By construction, the image of `X3 n` in the Gromov-Hausdorff space is `u n`.
[GOAL]
case intro
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
have : ∀ n, (NonemptyCompacts.toGHSpace ∘ X3) n = u n :=
by
intro n
rw [Function.comp_apply, NonemptyCompacts.toGHSpace, ← (u n).toGHSpace_rep, toGHSpace_eq_toGHSpace_iff_isometryEquiv]
constructor
convert (isom n).isometryEquivOnRange.symm
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
⊢ ∀ (n : ℕ), (NonemptyCompacts.toGHSpace ∘ X3) n = u n
[PROOFSTEP]
intro n
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
n : ℕ
⊢ (NonemptyCompacts.toGHSpace ∘ X3) n = u n
[PROOFSTEP]
rw [Function.comp_apply, NonemptyCompacts.toGHSpace, ← (u n).toGHSpace_rep, toGHSpace_eq_toGHSpace_iff_isometryEquiv]
[GOAL]
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
n : ℕ
⊢ Nonempty ({ x // x ∈ X3 n } ≃ᵢ GHSpace.Rep (u n))
[PROOFSTEP]
constructor
[GOAL]
case val
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
n : ℕ
⊢ { x // x ∈ X3 n } ≃ᵢ GHSpace.Rep (u n)
[PROOFSTEP]
convert (isom n).isometryEquivOnRange.symm
[GOAL]
case intro
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝¹ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this✝ : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
this : ∀ (n : ℕ), (NonemptyCompacts.toGHSpace ∘ X3) n = u n
⊢ ∃ x, Tendsto u atTop (𝓝 x)
[PROOFSTEP]
use L.toGHSpace
[GOAL]
case h
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝¹ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this✝ : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
this : ∀ (n : ℕ), (NonemptyCompacts.toGHSpace ∘ X3) n = u n
⊢ Tendsto u atTop (𝓝 (NonemptyCompacts.toGHSpace L))
[PROOFSTEP]
apply Filter.Tendsto.congr this
[GOAL]
case h
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝¹ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this✝ : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
this : ∀ (n : ℕ), (NonemptyCompacts.toGHSpace ∘ X3) n = u n
⊢ Tendsto (fun x => (NonemptyCompacts.toGHSpace ∘ X3) x) atTop (𝓝 (NonemptyCompacts.toGHSpace L))
[PROOFSTEP]
refine' Tendsto.comp _ hL
[GOAL]
case h
X✝ : ℕ → Type
inst✝² : (n : ℕ) → MetricSpace (X✝ n)
inst✝¹ : ∀ (n : ℕ), CompactSpace (X✝ n)
inst✝ : ∀ (n : ℕ), Nonempty (X✝ n)
d : ℕ → ℝ := fun n => (1 / 2) ^ n
this✝¹ : ∀ (n : ℕ), 0 < d n
u : ℕ → GHSpace
hu : ∀ (N n m : ℕ), N ≤ n → N ≤ m → dist (u n) (u m) < d N
X : ℕ → Type := fun n => GHSpace.Rep (u n)
Y : (n : ℕ) → AuxGluingStruct (X n) := auxGluing X
E : ∀ (n : ℕ), GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space
c : (n : ℕ) →
GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) → (Y (n + 1)).Space :=
fun n =>
cast (_ : GlueSpace (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (n + 1)))) = (Y (n + 1)).Space)
ic : ∀ (n : ℕ), Isometry (c n)
f : (n : ℕ) → (Y n).Space → (Y (n + 1)).Space :=
fun n => c n ∘ toGlueL (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))
I : ∀ (n : ℕ), Isometry (f n)
Z0 : Type := InductiveLimit I
Z : Type := UniformSpace.Completion Z0
Φ : (n : ℕ) → (Y n).Space → InductiveLimit I := toInductiveLimit I
coeZ : Z0 → UniformSpace.Completion Z0 := ↑Z0
X2 : ℕ → Set (UniformSpace.Completion Z0) := fun n => range (coeZ ∘ Φ n ∘ (Y n).embed)
isom : ∀ (n : ℕ), Isometry (coeZ ∘ Φ n ∘ (Y n).embed)
X2n :
∀ (n : ℕ),
X2 n =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjl (X n) (X (Nat.succ n)))
X2nsucc :
∀ (n : ℕ),
X2 (Nat.succ n) =
range
((coeZ ∘
Φ (Nat.succ n) ∘
c n ∘ toGlueR (_ : Isometry (Y n).embed) (_ : Isometry (optimalGHInjl (X n) (X (Nat.succ n))))) ∘
optimalGHInjr (X n) (X (Nat.succ n)))
D2 : ∀ (n : ℕ), hausdorffDist (X2 n) (X2 (Nat.succ n)) < d n
X3 : ℕ → NonemptyCompacts Z :=
fun n =>
{ toCompacts := { carrier := X2 n, isCompact' := (_ : IsCompact (range (coeZ ∘ Φ n ∘ (Y n).embed))) },
nonempty' := (_ : Set.Nonempty (range fun y => (coeZ ∘ Φ n ∘ (Y n).embed) y)) }
this✝ : CauchySeq X3
L : NonemptyCompacts Z
hL : Tendsto X3 atTop (𝓝 L)
this : ∀ (n : ℕ), (NonemptyCompacts.toGHSpace ∘ X3) n = u n
⊢ Tendsto NonemptyCompacts.toGHSpace (𝓝 L) (𝓝 (NonemptyCompacts.toGHSpace L))
[PROOFSTEP]
apply toGHSpace_continuous.tendsto
|
{"mathlib_filename": "Mathlib.Topology.MetricSpace.GromovHausdorff", "llama_tokens": 317325}
|
# ------------------------------------------------------------------
# Licensed under the ISC License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
BallSampler(radius, [maxsize])
A method for sampling isolated points from spatial objects using
a ball neighborhood of given `radius`. The maximum size `maxsize`
of the sample can be specified, but is not required.
"""
struct BallSampler{B<:BallNeighborhood} <: AbstractSampler
ball::B
maxsize::Union{Int,Nothing}
end
BallSampler(ball::BallNeighborhood) = BallSampler(ball, nothing)
BallSampler(radius::Real, maxsize=nothing) =
BallSampler(BallNeighborhood(radius), maxsize)
function sample(object::AbstractSpatialObject{T,N}, sampler::BallSampler) where {T,N}
npts = npoints(object)
ball = sampler.ball
size = sampler.maxsize ≠ nothing ? sampler.maxsize : Inf
# neighborhood search method with ball
searcher = NeighborhoodSearcher(object, ball)
# pre-allocate memory for coordinates
coords = MVector{N,T}(undef)
locations = Vector{Int}()
notviewed = trues(npts)
while length(locations) < size && any(notviewed)
location = rand(findall(notviewed))
coordinates!(coords, object, location)
# neighbors (including the location)
neighbors = search(coords, searcher)
push!(locations, location)
notviewed[neighbors] .= false
end
view(object, locations)
end
|
{"hexsha": "3919030b0e68733f09055ae4e51c9e1fd4e2fb4e", "size": 1428, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/sampling/ball_sampler.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/GeoStatsBase.jl-323cb8eb-fbf6-51c0-afd0-f8fba70507b2", "max_stars_repo_head_hexsha": "45361a196106c7078385275770658699de8e6a1e", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sampling/ball_sampler.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/GeoStatsBase.jl-323cb8eb-fbf6-51c0-afd0-f8fba70507b2", "max_issues_repo_head_hexsha": "45361a196106c7078385275770658699de8e6a1e", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sampling/ball_sampler.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/GeoStatsBase.jl-323cb8eb-fbf6-51c0-afd0-f8fba70507b2", "max_forks_repo_head_hexsha": "45361a196106c7078385275770658699de8e6a1e", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.75, "max_line_length": 85, "alphanum_fraction": 0.6659663866, "num_tokens": 320}
|
#%%
"""Demonstrate the effect of compressing the number of thresholds of a random
forest."""
import sys
from numpy.linalg import LinAlgError
from tqdm import tqdm
import os.path
import numpy as np
import xgboost as xgb
sys.path.insert(1, "..")
from datasets import load_data
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, log_loss
from ttml.ttml import TTML
from ttml.tt_rlinesearch import TTLS
from ttml.forest_compression import compress_forest_thresholds
from sklearn.ensemble import (
RandomForestRegressor,
RandomForestClassifier,
)
import scipy.special
import matplotlib.pyplot as plt
dataset_name = "airfoil"
DATASET_FOLDER = "../datasets/data"
dataset = load_data.dataset_loaders[dataset_name](DATASET_FOLDER)
X = dataset["X"]
y = dataset["y"]
X_train, X_val, y_train, y_val = train_test_split(
X.astype(float),
y.astype(float),
test_size=0.2,
random_state=179,
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
max_leaf_nodes = 100
max_depth = None
if dataset["regression"]:
forest_estim = RandomForestRegressor
task = "regression"
metric = mean_squared_error
else:
forest_estim = RandomForestClassifier
task = "classification"
metric = log_loss
forest = forest_estim(
n_estimators=128,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
)
forest.fit(X_train, y_train)
# %%
# %%
# %%_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, log_loss
from ttml.ttml import TTML
from ttml.tt_rlinesearch import TTLS
from ttml.forest_compression import compress_forest_thresholds
from sklearn.ensemble import (
RandomForestRegressor,
RandomForestClassifier,
)
import scipy.special
import matplotlib.pyplot as plt
dataset_name = "airfoil"
DATASET_FOLDER = "../datasets/data"
dataset = load_data.dataset_loaders[dataset_name](DATASET_FOLDER)
X = dataset["X"]
y = dataset["y"]
X_train, X_val, y_train, y_val = train_test_split(
X.astype(float),
y.astype(float),
test_size=0.2,
random_state=179,
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
max_leaf_nodes = 100
max_depth = None
if dataset["regression"]:
forest_estim = RandomForestRegressor
task = "regression"
metric = mean_squared_error
else:
forest_estim = RandomForestClassifier
task = "classification"
metric = log_loss
forest = forest_estim(
n_estimators=128,
max_leaf_nodes=max_leaf_nodes,
max_depth=max_depth,
)
forest.fit(X_train, y_train)
def val_loss(forest):
loss = metric(y_val, forest.predict(X_val))
return loss
uncompresed_loss = val_loss(forest)
# %%
num_thresholds = np.arange(10, 100)
losses = []
for num_t in tqdm(num_thresholds):
compressed_forest, _ = compress_forest_thresholds(forest, num_t)
losses.append(val_loss(compressed_forest))
# %%
plt.figure(figsize=(10,6))
plt.plot(num_thresholds, losses, ".")
plt.axhline(uncompresed_loss, c="k", ls="--")
plt.xlabel("Number of decision boundaries")
plt.ylabel("Validation MSE")
# plt.savefig("./figures/forest_compression.pdf", format='pdf',bbox_inches='tight')
# %%
|
{"hexsha": "67a86c4fe3c78e8fcdd2c9e5af4f664fcc32fcf9", "size": 3326, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/forest_compression_benchmark.py", "max_stars_repo_name": "RikVoorhaar/ttml", "max_stars_repo_head_hexsha": "3786cfc02976f7d6cd5f045f213e28793f4ece61", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/forest_compression_benchmark.py", "max_issues_repo_name": "RikVoorhaar/ttml", "max_issues_repo_head_hexsha": "3786cfc02976f7d6cd5f045f213e28793f4ece61", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/forest_compression_benchmark.py", "max_forks_repo_name": "RikVoorhaar/ttml", "max_forks_repo_head_hexsha": "3786cfc02976f7d6cd5f045f213e28793f4ece61", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2773722628, "max_line_length": 83, "alphanum_fraction": 0.7594708358, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 813}
|
#
# Atomic database using formal values of https://en.wikipedia.org/wiki/Standard_atomic_weight#:~:text=The%20standard%20atomic%20weight%20(A,atomic%20mass%20constant%20mu.
#
atomic_weight = Dict(
"h" => 1.008,
"he" => 4.0026,
"li" => 6.94,
"be" => 9.0122,
"b" => 10.81,
"c" => 12.011,
"n" => 14.007,
"o" => 15.999,
"f" => 18.998,
"ne" => 20.180,
"na" => 22.990,
"mg" => 24.305,
"al" => 26.982,
"si" => 28.085,
"p" => 30.974,
"s" => 32.06,
"cl" => 35.45,
"ar" => 39.95,
"k" => 39.098,
"ca" => 40.078,
"sc" => 44.956,
"ti" => 47.867,
"v" => 50.942,
"cr" => 51.996,
"mn" => 54.938,
"fe" => 55.845,
"co" => 58.933,
"ni" => 58.693,
"cu" => 63.546,
"zn" => 65.38,
"ga" => 69.723,
"ge" => 72.630,
"as" => 74.922,
"se" => 78.971,
"br" => 79.904,
"kr" => 83.798,
"rb" => 85.468,
"sr" => 87.62,
"y" => 88.906,
"zr" => 91.224,
"nb" => 92.906,
"mo" => 95.95,
"tc" => 97.907, # no value
"ru" => 101.07,
"rh" => 102.91,
"pd" => 106.42,
"ag" => 107.87,
"cd" => 112.41,
"in" => 114.82,
"sn" => 118.71,
"sb" => 121.76,
"te" => 127.60,
"i" => 126.90,
"xe" => 131.29,
"cs" => 132.91,
"ba" => 137.33,
"la" => 138.91,
"ce" => 140.12,
"pr" => 140.91,
"nd" => 144.24,
"pm" => 145, # no value
"sm" => 150.36,
"eu" => 151.96,
"gd" => 157.25,
"tb" => 158.93,
"dy" => 162.50,
"ho" => 164.93,
"er" => 167.26,
"tm" => 168.93,
"yb" => 173.05,
"lu" => 174.97,
"hf" => 178.49,
"ta" => 180.95,
"w" => 183.84,
"re" => 186.21,
"os" => 190.23,
"ir" => 192.22,
"pt" => 195.08,
"au" => 196.97,
"hg" => 200.59,
"tl" => 204.38,
"pb" => 207.2,
"bi" => 208.98,
"po" => 209.98, # no value
"at" => 209.99, # no value
"rn" => 222.02, # no value
"fr" => 223, # no value
"ra" => 226, # no value
"ac" => 227, # no value
"th" => 232.04,
"pa" => 231.04,
"u" => 238.03,
"np" => 237, # no value
"pu" => 244, # no value
"am" => 243, # no value
"cm" => 247, # no value
"bk" => 247, # no value
"cf" => 251, # no value
"es" => 252, # no value
"fm" => 257, # no value
"md" => 258, # no value
"no" => 259, # no value
"lr" => 262, # no value
"rf" => 261, # no value
"db" => 262, # no value
"sg" => 266, # no value
"bh" => 264, # no value
"hs" => 277, # no value
"mt" => 268, # no value
"ds" => 271, # no value
"rg" => 272, # no value
"cn" => 285, # no value
"nh" => 284, # no value
"fl" => 289, # no value
"mc" => 288, # no value
"lv" => 293, # no value
"ts" => 294, # no value
"og" => 294, # no value
)
|
{"hexsha": "87cc778a0022d56abd08593bc752faa8d32f2d58", "size": 3006, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Data.jl", "max_stars_repo_name": "SeleneSofi/MolarWeight.jl", "max_stars_repo_head_hexsha": "21d1b3b3a77601d442f14e84613d48f42ef1496b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Data.jl", "max_issues_repo_name": "SeleneSofi/MolarWeight.jl", "max_issues_repo_head_hexsha": "21d1b3b3a77601d442f14e84613d48f42ef1496b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Data.jl", "max_forks_repo_name": "SeleneSofi/MolarWeight.jl", "max_forks_repo_head_hexsha": "21d1b3b3a77601d442f14e84613d48f42ef1496b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2419354839, "max_line_length": 170, "alphanum_fraction": 0.377910845, "num_tokens": 1404}
|
"Evaluate the model"""
import os
import nltk
import torch
import random
import logging
import argparse
import numpy as np
import utils as utils
from metrics import get_entities
from data_loader import DataLoader
from SequenceTagger import BertForSequenceTagging
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='msra', help="Directory containing the dataset")
parser.add_argument('--seed', type=int, default=23, help="random seed for initialization")
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def interAct(model, data_iterator, params, mark='Interactive', verbose=False):
"""Evaluate the model on `steps` batches."""
# set model to evaluation mode
model.eval()
idx2tag = params.idx2tag
batch_data, batch_token_starts = next(data_iterator)
batch_masks = batch_data.gt(0)
batch_output = model((batch_data, batch_token_starts), token_type_ids=None, attention_mask=batch_masks)[0] # shape: (batch_size, max_len, num_labels)
batch_output = batch_output.detach().cpu().numpy()
pred_tags = []
pred_tags.extend([[idx2tag.get(idx) for idx in indices] for indices in np.argmax(batch_output, axis=2)])
return(get_entities(pred_tags))
def bert_ner_init():
args = parser.parse_args()
tagger_model_dir = 'experiments/' + args.dataset
# Load the parameters from json file
json_path = os.path.join(tagger_model_dir, 'params.json')
assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
# Use GPUs if available
params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Set the random seed for reproducible experiments
random.seed(args.seed)
torch.manual_seed(args.seed)
params.seed = args.seed
# Set the logger
utils.set_logger(os.path.join(tagger_model_dir, 'evaluate.log'))
# Create the input data pipeline
logging.info("Loading the dataset...")
# Initialize the DataLoader
data_dir = 'data/' + args.dataset
if args.dataset in ["conll"]:
bert_class = 'bert-base-cased'
elif args.dataset in ["msra"]:
bert_class = 'bert-base-chinese'
elif args.dataset in ["pe"]:
bert_class = 'bert-base-multilingual-cased'
data_loader = DataLoader(data_dir, bert_class, params, token_pad_idx=0, tag_pad_idx=-1)
# Load the model
model = BertForSequenceTagging.from_pretrained(tagger_model_dir)
model.to(params.device)
return model, data_loader, args.dataset, params
def BertNerResponse(model, queryString):
model, data_loader, dataset, params = model
if dataset in ['msra','pe']:
queryString = [i for i in queryString]
elif dataset in ['conll']:
queryString = nltk.word_tokenize(queryString)
with open('data/' + dataset + '/interactive/sentences.txt', 'w') as f:
f.write(' '.join(queryString))
inter_data = data_loader.load_data('interactive')
inter_data_iterator = data_loader.data_iterator(inter_data, shuffle=False)
result = interAct(model, inter_data_iterator, params)
res = []
for item in result:
if dataset in ['msra','pe']:
res.append((''.join(queryString[item[1]:item[2]+1]), item[0]))
elif dataset in ['conll']:
res.append((' '.join(queryString[item[1]:item[2]+1]), item[0]))
return res
def main():
model = bert_ner_init()
while True:
query = input('Input:')
if query == 'exit':
break
print(BertNerResponse(model, query))
if __name__ == '__main__':
main()
|
{"hexsha": "1cefea898b846e1d73ab147f971659cdf58a9e6b", "size": 3629, "ext": "py", "lang": "Python", "max_stars_repo_path": "interactive.py", "max_stars_repo_name": "ssabzzz/BERT-NER", "max_stars_repo_head_hexsha": "ab60d6afee2b5b4200149c6270823872fd8efecd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "interactive.py", "max_issues_repo_name": "ssabzzz/BERT-NER", "max_issues_repo_head_hexsha": "ab60d6afee2b5b4200149c6270823872fd8efecd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "interactive.py", "max_forks_repo_name": "ssabzzz/BERT-NER", "max_forks_repo_head_hexsha": "ab60d6afee2b5b4200149c6270823872fd8efecd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2844827586, "max_line_length": 154, "alphanum_fraction": 0.6844860843, "include": true, "reason": "import numpy", "num_tokens": 852}
|
import numpy as np
import sys
import time
import os
#from generation_model import Generation_model
class data_collection:
train_x_matrix = None
train_y_vector = None
valid_x_matrix = None
valid_y_vector = None
test_x_matrix = None
test_y_vector = None
train_y_matrix = None
valid_y_matrix = None
test_y_matrix = None
num_classes = 0
min_class = 0
class_column = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __init__(self, train_x_matrix, train_y_vector, class_column=0):
self.train_x_matrix = train_x_matrix
self.train_y_vector = train_y_vector
self.class_column = class_column
def gene_data_stru(self):
train_x_matrix = self.train_x_matrix
if train_x_matrix is None:
raise Exception("Missing training data")
train_y_vector = self.train_y_vector
train_shape_len = len(train_x_matrix.shape)
if train_shape_len == 3:
train_ins, attr_len, attr_num = train_x_matrix.shape
input_map = 1
train_x_matrix = train_x_matrix.reshape(train_ins, attr_len, attr_num, input_map)
elif train_shape_len == 4:
train_ins, attr_len, attr_num, input_map = train_x_matrix.shape
else:
raise Exception("Input x matrix invalid shape!!!")
min_class = min(train_y_vector)
max_class = max(train_y_vector)
num_classes = max_class - min_class + 1
return return_data_stru(num_classes, min_class, attr_num, attr_len, self.class_column, train_ins)
def data_check(self, num_classes, min_class):
if self.train_y_vector is not None:
self.train_y_matrix = y_vector_to_matrix(self.train_y_vector, num_classes, min_class)
if self.test_y_vector is not None:
self.test_y_matrix = y_vector_to_matrix(self.test_y_vector, num_classes, min_class)
if self.valid_y_vector is not None:
self.valid_y_matrix = y_vector_to_matrix(self.valid_y_vector, num_classes, min_class)
elif self.test_y_vector is not None:
self.valid_x_matrix = self.test_x_matrix
self.valid_y_vector = self.test_y_vector
self.valid_y_matrix = self.test_y_matrix
########################################################################################
## data structure part
# starting from min_class, max_class = min_class + num_classes - 1
# It means that all numbers between min_class to max_class should be used as a label
class data_structure:
def __init__(self, num_classes, min_class, attr_num, attr_len, class_c=0, train_ins=-1):
self.num_classes = num_classes
self.attr_num = attr_num
self.attr_len = attr_len
self.class_column = class_c
self.train_ins = train_ins
self.min_class = min_class
def print_to_string(self):
ret_str = 'num of classes: ' + str(self.num_classes) +'\nattribute number: ' + str(self.attr_num) +'\nattribute length: ' + str(self.attr_len) +'\nclass column: ' + str(self.class_column) +'\ntrain_ins: ' + str(self.train_ins)
return ret_str
def return_data_stru(num_classes, min_class, attr_num, attr_len, class_column, train_ins=-1):
return data_structure(num_classes, min_class, attr_num, attr_len, class_column, train_ins)
def data_stru_gene(train_y_vector, class_colum=0):
min_class = min(train_y_vector)
max_class = max(train_y_vector)
num_clasess = max_class - min_class + 1
def copy_data_stru(in_data_stru):
return data_structure(in_data_stru.num_classes, in_data_stru.start_class, in_data_stru.attr_num, in_data_stru.attr_len)
## end of data structure part
########################################################################################
def train_test_transpose(data_matrix, attr_num, attr_len, trans=True):
data_row, data_col = data_matrix.shape
data_matrix = data_matrix.reshape(data_row, attr_num, attr_len, 1)
if trans == True:
data_matrix = np.transpose(data_matrix, (0, 2, 3, 1))
else:
data_matrix = np.transpose(data_matrix, (0, 2, 1, 3))
#data_matrix = data_matrix.reshape(data_row, data_col)
return data_matrix
def y_vector_to_matrix(y_vector, num_classes, start_class=0):
vector_len = len(y_vector)
# print y_vector
# print vector_len
# print num_classes
# print "========"
y_matrix = np.zeros((vector_len, num_classes))
count = 0
for item in y_vector:
y_matrix[count, int(item)-start_class] = int(1)
count = count + 1
return y_matrix
def class_label_vector_checking(y_vector):
min_class = min(y_vector)
max_class = max(y_vector)
class_index_dict = {}
min_length = -1
max_length = -1
for c in range(min_class, max_class+1):
c_index = np.where(y_vector==c)[0]
class_index_dict[c] = c_index
if min_length == -1:
min_length = len(c_index)
elif len(c_index) < min_length:
min_length = len(c_index)
if max_length == -1:
max_length = len(c_index)
elif len(c_index) > max_length:
max_length = len(c_index)
return class_index_dict, min_length, max_length
def feature_data_generation_4d(data_matrix, feature_index_list):
row_n, attr_len, num_map, attr_num = data_matrix.shape
ret_matrix = []
new_row_col = 0
new_attr = len(feature_index_list)
new_row_col = new_attr * attr_len
for i in range(0, row_n):
ori_matrix = data_matrix[i].reshape(attr_len, attr_num)
matrix = ori_matrix[:, feature_index_list]
ret_matrix.append(matrix.reshape(new_row_col))
data_matrix = np.array(ret_matrix).reshape(row_n, new_row_col)
return np.array(ret_matrix).reshape(row_n, new_row_col), new_attr
def feature_data_generation(data_matrix, attr_len, attr_num, feature_index_list):
row_n, col_n = data_matrix.shape
ret_matrix = []
new_row_col = 0
new_attr = len(feature_index_list)
new_row_col = new_attr * attr_len
for i in range(0, row_n):
ori_matrix = data_matrix[i].reshape(attr_len, attr_num)
matrix = ori_matrix[:, feature_index_list]
ret_matrix.append(matrix.reshape(new_row_col))
data_matrix = np.array(ret_matrix).reshape(row_n, new_row_col)
return np.array(ret_matrix).reshape(row_n, new_row_col), new_attr
def feature_data_generation_v1(data_matrix, attr_num, feature_index_list, group_list=[]):
row_n, col_n = data_matrix.shape
attr_len = col_n/attr_num
ret_matrix = []
new_row_col = 0
new_attr = len(feature_index_list)
if len(group_list) > 0:
for group in group_list:
new_attr = new_attr + len(group)
new_row_col = new_attr * attr_len
for i in range(0, row_n):
ori_matrix = data_matrix[i].reshape(attr_num, attr_len)
if len(group_list) > 0:
group_count = 0
for group in group_list:
if group_count == 0:
matrix = ori_matrix[group, :]
else:
matrix = np.append(matrix, ori_matrix[group, :])
group_count = group_count + 1
matrix = np.append(matrix, ori_matrix[feature_index_list, :])
else:
matrix = ori_matrix[feature_index_list, :]
ret_matrix.append(matrix.reshape(new_row_col))
data_matrix = np.array(ret_matrix).reshape(row_n, new_row_col)
return np.array(ret_matrix).reshape(row_n, new_row_col), new_attr, attr_len
def z_normlization(time_series):
mean = np.mean(time_series)
dev = np.std(time_series)
return (time_series - mean)/dev
if __name__ == '__main__':
series1 = [2.02, 2.33, 2.99, 6.85, 9.20, 8.80, 7.50, 6.00, 5.85, 3.85, 4.85, 3.85, 2.22, 1.45, 1.34]
series2 = [-0.12, -0.16, -0.13, 0.28, 0.37, 0.39, 0.18, 0.09, 0.15, -0.06, 0.06, -0.07, -0.13, -0.18, -0.26]
norm_1 = z_normlization(series1)
norm_2 = z_normlization(series2)
x = range(0, len(series1))
#data_str = 'uci'
#uci_data_stru = return_data_stru(data_str)
##uci_data_stru.num_classes = 3
##uci_data_stru.start_class = 11
#uci_data_stru.print_to_string()
#
row_num = 1
data_matrix = np.random.rand(row_num, 24)
max_gap = 0
attr_num=6
feature_index_list = np.array([row_num, 3])
print (data_matrix.reshape(row_num, attr_num, 4))
model = Generation_model(attr_num, 3, [], 2)
matrix, attr_num, attr_len = feature_data_generation(data_matrix, attr_num, model.selected_list, model.groups)
print ("===")
print (model.selected_list)
print (model.groups)
print (matrix.reshape(row_num, attr_num, attr_len))
###########################################################################
## Duplication part, used for multiple time series data. Do duplication on attributes dimension in order to run CNN
## duplicate rows
#def row_duplication(data_matrix, max_gap):
# ret_matrix = data_matrix.copy()
# row_n, col_n = ret_matrix.shape
# for i in range(2, max_gap+1):
# ret_matrix = np.append(ret_matrix, data_matrix[::i], axis=0)
# ret_matrix = np.append(ret_matrix, data_matrix[1::i], axis=0)
# return ret_matrix
#
#
## duplicate cols
#def col_duplication(data_matrix, max_gap):
# ret_matrix = data_matrix.copy()
# row_n, col_n = ret_matrix.shape
# for i in range(2, max_gap+1):
# ret_matrix = np.append(ret_matrix, data_matrix[:, ::i], axis=1)
# ret_matrix = np.append(ret_matrix, data_matrix[:, 1::i], axis=1)
# return ret_matrix
#
#
## Use to update data matrix in order to generate features based on time dimension
## data_matrix: A * T: A is number of attributes, T is the length of time dimension
## max_gap: In order to run cnn feature detection, we would like to do duplication on attribute dimension. No duplication if map_gap ==0
## data_stru: information for data_matrix
## Logic: 1, do matrix transpose to get data_matrix T * A
## 2, in order to get rid of the effect from attribute order, we do duplication on attribute dimension
## result from step 2 is T * (A + A/2 + A/3 + ... until max_gap)
## return updated data_matrix and updated data_stru
#def time_as_feature_transpose(data_matrix, max_gap):
# data_matrix = np.transpose(data_matrix) # T * A
# if max_gap == 0:
# return data_matrix
#
# data_matrix = col_duplication(data_matrix, max_gap) # data_matrix with duplication on attribute dimension (column dimension)
# return data_matrix
#
#
## We need d3_time_as_feature_transpose because the original data matrix is N * (A * T), N is number of instances, and column lenght is (A * T) which is the number of attributes times attribute length
#def d3_time_as_feature_transpose(d3_data_matrix, max_gap, data_stru):
# attr_num = data_stru.attr_num
# attr_len = data_stru.attr_len # which is the length of time dimension now
#
# row_n, col_n = d3_data_matrix.shape
# ret_data_matrix = list()
# data_row_matrix = d3_data_matrix[0].reshape(attr_num, attr_len)
# data_row_matrix = time_as_feature_transpose(data_row_matrix, max_gap)
# new_row, new_col = data_row_matrix.shape
# new_row_col_all = new_row * new_col
# ret_data_matrix.append(data_row_matrix.reshape(new_row_col_all))
# for i in range(1, row_n):
# data_row_matrix = d3_data_matrix[i].reshape(attr_num, attr_len)
# data_row_matrix = time_as_feature_transpose(data_row_matrix, max_gap)
# ret_data_matrix.append(data_row_matrix.reshape(new_row_col_all))
#
# ret_data_matrix = np.array(ret_data_matrix).reshape(row_n, new_row_col_all)
# ret_data_stru = data_structure(data_stru.num_classes, data_stru.start_class, new_row, new_col, data_stru.class_column)
#
# return ret_data_matrix, ret_data_stru
#
##End of Duplication part
###########################################################################
#
#
###########################################################################
##cross validataion part
#
##Given data matrix (x_matrix) and correspsonding class label vector (y_vector), do cross validation
##Need num_classes to make sure the validataion is balanced for all classes
##ratio: the ratio of testing data
#def cross_validation(x_matrix, y_vector, num_classes, ratio=0.1):
# instance_count = len(y_vector)
# one_class_count = instance_count/num_classes
# start = 0;
# end = start + one_class_count
# train_x_matrix, test_x_matrix, train_y_vector, test_y_vector = train_test_split(x_matrix[start:end, :], y_vector[start:end], test_size=ratio, random_state=0)
# start = end
# end = end + one_class_count
# while(end<=instance_count):
# sub_train_x, sub_test_x, sub_train_y, sub_test_y = train_test_split(x_matrix[start:end, :], y_vector[start:end], test_size=ratio, random_state=0)
# train_x_matrix = np.concatenate((train_x_matrix, sub_train_x), axis = 0)
# test_x_matrix = np.concatenate((test_x_matrix, sub_test_x), axis=0)
# train_y_vector.extend(sub_train_y)
# test_y_vector.extend(sub_test_y)
# start = end
# end = end + one_class_count
# return train_x_matrix, train_y_vector, test_x_matrix, test_y_vector
#
##End of cross validation part
###########################################################################
#
#
###########################################################################
##Using feature to generate partial data
#
##For multiple time series data matrix
##data_matrix: N * M: N is the number of instances, M is a vector to represnet the attr * time matrix
##Need attr_num to reshape the 1*M vector to attr * time matrix
##attr_index_list: numpy array for the key attribute indexes
##time_index_list: numpy array for the key time indexes
#def old_feature_data_generation(data_matrix, attr_num, attr_index_list = None, method='attribute', time_index_list = None):
# row_n, col_n = data_matrix.shape
# time_len = col_n/attr_num
# ret_matrix = []
# new_row_col = 0
# if method == 'attribute':
# new_row = len(attr_index_list)
# new_row_col = new_row * time_len
# for i in range(0, row_n):
# matrix = data_matrix[i].reshape(attr_num, time_len)
# matrix = matrix[attr_index_list, :]
# ret_matrix.append(matrix.reshape(new_row_col))
# attr_num = new_row
# elif method == 'time':
# new_col = len(time_index_list)
# new_row_col = attr_num * new_col
# for i in range(0, row_n):
# matrix = data_matrix[i].reshape(attr_num, time_len)
# matrix = matrix[:, time_index_list]
# ret_matrix.append(matrix.reshape(new_row_col))
# time_len = new_col
# return np.array(ret_matrix).reshape(row_n, new_row_col), attr_num, time_len
#
## input data_matrix: 2d matrix with r * (a * l).
## r number of instances and each instance has a attributes and each attribute has length l
## atr_num: attribute number
## fature_index_list: a list contains the index of picked attributes
## Rturn: return a data matrix only contains the attributes from feature_index_list
#def feature_data_generation(data_matrix, attr_num, feature_index_list, feature_col_update=True):
# row_n, col_n = data_matrix.shape
# attr_len = col_n/attr_num
# ret_matrix = []
# new_row_col = 0
#
# if feature_col_update == True:
# new_row = len(feature_index_list)
# new_row_col = new_row * attr_len
# else:
# new_row = attr_num
# new_row_col = col_n
#
# for i in range(0, row_n):
# matrix = data_matrix[i].reshape(attr_num, attr_len)
# if feature_col_update == True:
# matrix = matrix[feature_index_list, :]
# ret_matrix.append(matrix.reshape(new_row_col))
# else:
# a = range(0, attr_num)
# remove_index = [x for x in a if x not in feature_index_list]
# matrix[remove_index, :] = 0
# ret_matrix.append(matrix.reshape(new_row_col))
#
# attr_num = new_row
# return np.array(ret_matrix).reshape(row_n, new_row_col), attr_num, attr_len
#
#
#
#
#
#
#
#def class_label_vector_checking(y_vector):
# min_class = min(y_vector)
# max_class = max(y_vector)
# class_index_dict = {}
# min_length = -1
# max_length = -1
# for c in range(min_class, max_class+1):
# c_index = np.where(y_vector==c)[0]
# class_index_dict[c] = c_index
# if min_length == -1:
# min_length = len(c_index)
# elif len(c_index) < min_length:
# min_length = len(c_index)
# if max_length == -1:
# max_length = len(c_index)
# elif len(c_index) > max_length:
# max_length = len(c_index)
#
# return class_index_dict, min_length, max_length
|
{"hexsha": "ce011d45cc819dd006392f36980b6913c1e00d3c", "size": 16805, "ext": "py", "lang": "Python", "max_stars_repo_path": "Baselines/mtsc_ca_sfcn/src/fileio/data_processing.py", "max_stars_repo_name": "JingweiZuo/SMATE", "max_stars_repo_head_hexsha": "d3e847038d9b7fb2bc08b3720b93f80b934e538d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-04-21T08:32:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T06:12:10.000Z", "max_issues_repo_path": "Baselines/mtsc_ca_sfcn/src/fileio/data_processing.py", "max_issues_repo_name": "SMATE2021/SMATE", "max_issues_repo_head_hexsha": "d3e847038d9b7fb2bc08b3720b93f80b934e538d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-24T10:38:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T10:38:46.000Z", "max_forks_repo_path": "Baselines/mtsc_ca_sfcn/src/fileio/data_processing.py", "max_forks_repo_name": "SMATE2021/SMATE", "max_forks_repo_head_hexsha": "d3e847038d9b7fb2bc08b3720b93f80b934e538d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7211981567, "max_line_length": 237, "alphanum_fraction": 0.6614102946, "include": true, "reason": "import numpy", "num_tokens": 4275}
|
# #create one label
# #update it with images
# #create callback for mouse hover and click
# #register the clicked point
# #use a button for operations on registered pixel coord
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QSlider
from PyQt5.QtGui import QIcon, QImage, QPixmap
from PyQt5.QtCore import pyqtSlot, pyqtSignal
import PyQt5.QtCore as QtCore
import matplotlib.pyplot as plt
import cvgutils.Viz as viz
import numpy as np
import h5py
import os
import torch
import cv2
# import cvgutils.Viz as viz
# from cvgutils import ui
class ImageViewer(QLabel):
mouseMoved = pyqtSignal(int,int)
mouseClicked = pyqtSignal(int,int)
def __init__(self,parent):
super().__init__(parent)
self.setMouseTracking(True)
def mouseMoveEvent(self, e):
self.mouseMoved.emit(e.x(),e.y())
def mouseReleaseEvent(self, e):
self.mouseClicked.emit(e.x(),e.y())
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 button - pythonspot.com'
self.left = 50
self.top = 50
self.width = 1500
self.height = 600
self.initUI()
def print(self,x,y):
x = int(x / self.sl.value())
y = int(y / self.sl.value())
text = f'x: {x}, y: {y}'
print(text)
def print2(self,x,y):
#plot opacity, transmittance
#plot siren opacity, transmittance
x = int(x / self.sl.value())
y = int(y / self.sl.value())
transmittance_siren = self.f['transmittance_pred'][self.imid][y,x,:,0]
transmittance_GT = self.f['fine_acc_alpha'][self.imid][y,x,:,0]
if('weight_pred' in self.f):
weight_pred = self.f['weight_pred'][self.imid][y,x,:,0]
weight_gt = self.f['weight_GT'][self.imid][y,x,:,0]
if('fine_alpha' in self.f):
fine_alpha = self.f['fine_alpha'][self.imid][y,x,:,0]
fine_samples_t = self.f['fine_samples_t'][self.imid][y,x,:,0]
if('fine_samples_pred' in self.f):
fine_raycolor_pred = self.f['fine_samples_pred'][self.imid][y,x,:,0]
if('fine_samples_GT' in self.f):
fine_raycolor_gt = self.f['fine_samples_GT'][self.imid][y,x,:,0]
if('fine_samples_color' in self.f):
fine_samples_color = self.f['fine_samples_color'][self.imid][y,x,:,0]
# fine_rayposdir = self.f['fine_rayposdir'][self.imid][y,x,:,:]
#log transmittance
eps = 1e-5
logrange = np.log(1+eps) - np.log(eps)
im = viz.plotOverlay(fine_samples_t,-np.log(transmittance_siren+1e-5) / logrange,-np.log(transmittance_GT+1e-5) / logrange,xlabel='t',ylabel='log transmittance',title='log Transmittance')
if(not os.path.exists('renderout')):
os.makedirs('renderout')
cv2.imwrite('renderout/log_transmittance.png',im)
#transmittance
if('transmittance_siren' in locals() and 'transmittance_GT' in locals()):
im = viz.plotOverlay(fine_samples_t,transmittance_siren,transmittance_GT,xlabel='t',title='Transmittance ReLU',ylabel='Transmittance')
if(not os.path.exists('renderout')):
os.makedirs('renderout')
cv2.imwrite('renderout/transmittance.png',im)
if('weight_pred' in locals() and 'weight_gt' in locals()):
im = viz.plotOverlay(fine_samples_t,weight_pred,weight_gt,xlabel='t',ylabel='Weight',title='Weight ReLU')
if(not os.path.exists('renderout')):
os.makedirs('renderout')
cv2.imwrite('renderout/Weight.png',im)
if('fine_alpha' in locals()):
im = viz.plot(fine_samples_t,fine_alpha,'.','t','alpha','alpha GT')
if(not os.path.exists('renderout')):
os.makedirs('renderout')
cv2.imwrite('renderout/alpha.png',im)
if('fine_samples_color' in locals()):
im = viz.plot(fine_samples_t,fine_samples_color,'.','t','sampled color','sampled color gt')
if(not os.path.exists('renderout')):
os.makedirs('renderout')
cv2.imwrite('renderout/sampled_color_gt.png',im)
if('fine_samples_color' in locals() and 'weight_gt' in locals()):
# color_gt = fine_samples_color * weight_gt
# color_pred = fine_samples_color * weight_pred
color_gt = weight_gt
color_pred = weight_pred
im = viz.plotOverlay(fine_samples_t,color_pred,color_gt,xlabel='t',ylabel='Intensity',title='color')
if(not os.path.exists('renderout')):
os.makedirs('renderout')
cv2.imwrite('renderout/color.png',im)
def zoom(self):
im = self.imorig.copy()
dsize = (int(im.shape[1]*self.sl.value()),int(im.shape[0]*self.sl.value()))
im = cv2.resize(im,dsize)
im = np.clip(im,0,1) ** (1/2.2) * 255
im = im.astype(np.uint8)[:,:,:].copy()
h = im.shape[0]
w = im.shape[1]
qim = QImage(im, w, h, 3 * w, QImage.Format_RGB888)
self.label.setPixmap(QPixmap(qim))
self.label.setGeometry(QtCore.QRect(0,0,w,h))
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.label = ImageViewer(self)
self.imid = 0
self.f = h5py.File('./hdf5/hdf5-w99-497.hdf5','r')
self.imorig = self.f['fine_raycolor_pred'][self.imid].copy()
im = np.clip(self.imorig,0,1) ** (1/2.2) * 255
im = im.astype(np.uint8)[:,:,:].copy()
cv2.imwrite('renderout/im.png',im[:,:,::-1])
im2 = self.f['fine_raycolor'][self.imid].copy()
im2 = np.clip(im2,0,1) ** (1/2.2) * 255
im2 = im2.astype(np.uint8)[:,:,:].copy()
cv2.imwrite('renderout/imGT.png',im2[:,:,::-1])
transmittance_siren = self.f['transmittance_pred'][self.imid]
transmittance_GT = self.f['fine_acc_alpha'][self.imid]
if('weight_pred' in self.f):
weight_pred = self.f['weight_pred'][self.imid]
weight_gt = self.f['weight_GT'][self.imid]
# fine_samples_t = self.f['fine_samples_t'][self.imid]
# fine_rayposdir = self.f['fine_rayposdir'][self.imid]
wpim = weight_pred.sum(-2)
wgim = weight_gt.sum(-2)
werr = (((weight_pred - weight_gt) ** 2).sum(-2) ** 0.5)
cv2.imwrite('renderout/wpim.exr',wpim.astype(np.float32))
cv2.imwrite('renderout/wgim.exr',wgim.astype(np.float32))
cv2.imwrite('renderout/werr.exr',werr.astype(np.float32))
self.h = im.shape[0]
self.w = im.shape[1]
qim = QImage(im, self.w, self.h, 3 * self.w, QImage.Format_RGB888)
self.label.setPixmap(QPixmap(qim))
# self.setCentralWidget(self.label)
self.label.move(0,0)
# fine_acc_alpha_pred = self.f['fine_acc_alpha_pred'][self.imid].sum(1).reshape(self.h,self.w,1).repeat(3,2)
# fine_acc_alpha = self.f['fine_acc_alpha'][self.imid].sum(1).reshape(self.h,self.w,1).repeat(3,2)
# fine_alpha = self.f['fine_alpha'][self.imid].sum(1).reshape(self.h,self.w,1).repeat(3,2)
# fine_alpha_pred = self.f['fine_alpha_pred'][self.imid].sum(1).reshape(self.h,self.w,1).repeat(3,2)
# sidebyside = np.concatenate((fine_acc_alpha_pred,fine_acc_alpha,fine_alpha_pred,fine_alpha),axis=1)
# sidebyside = np.clip(sidebyside*0.001,0,1) ** (1/2.2) * 255
# sidebyside = sidebyside.astype(np.uint8)
# label2 = ImageViewer(self)
# qim2 = QImage(sidebyside, sidebyside.shape[1], sidebyside.shape[0], 3 * sidebyside.shape[1], QImage.Format_RGB888)
# label2.setPixmap(QPixmap(qim2))
# label2.move(0,self.h)
self.sl = QSlider(QtCore.Qt.Horizontal,self)
self.sl.setMaximum(3)
self.sl.setMinimum(1)
self.sl.setSingleStep(0.2)
self.sl.valueChanged.connect(self.zoom)
self.sl.move(1024,0)
button = QPushButton('PyQt5 button', self)
button.setToolTip('This is an example button')
button.move(1024,70)
button.clicked.connect(self.on_click)
self.label.mouseMoved.connect(self.print)
self.label.mouseClicked.connect(self.print2)
self.show()
@pyqtSlot()
def on_click(self):
print('PyQt5 button click')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
|
{"hexsha": "499996364c36fa0825c54710f006dac58f9ec4e0", "size": 8568, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/volumeDensityViewer.py", "max_stars_repo_name": "mshafiei/cvutils", "max_stars_repo_head_hexsha": "5805229d8822a9ee4a3c63e060358aca96fe5338", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-19T12:38:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T21:30:36.000Z", "max_issues_repo_path": "apps/volumeDensityViewer.py", "max_issues_repo_name": "mshafiei/cvgutils", "max_issues_repo_head_hexsha": "ea93b3cb70a969c3b814f5a9f7672c6e00de5c61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apps/volumeDensityViewer.py", "max_forks_repo_name": "mshafiei/cvgutils", "max_forks_repo_head_hexsha": "ea93b3cb70a969c3b814f5a9f7672c6e00de5c61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8511627907, "max_line_length": 195, "alphanum_fraction": 0.6062091503, "include": true, "reason": "import numpy", "num_tokens": 2318}
|
import argparse
from pathlib import Path
from typing import List, Optional
import numpy as np
from pycbc.types import TimeSeries, FrequencySeries
from command_line import path_to_dir
from gw_data import (
train_file,
training_labels_file,
FREQ_SERIES_DELTA_F,
NOISE_FILENAME,
N_SIGNALS,
SIGNAL_DELTA_T,
)
from preprocessors import filter_sig
def update_psd(
det_psd: FrequencySeries, fs: FrequencySeries, count: int
) -> FrequencySeries:
return (abs(fs) + (count - 1) * det_psd) / count
def sig_to_fs(sig: np.ndarray) -> FrequencySeries:
ts = TimeSeries(sig * filter_sig.WINDOW, delta_t=SIGNAL_DELTA_T)
return ts.to_frequencyseries(FREQ_SERIES_DELTA_F)
def compute_noise(source_dir: Path, sample_ids: List[str]) -> np.ndarray:
if len(sample_ids) == 0:
raise ValueError("sample_ids cannot be empty")
# Algorithm provided in https://github.com/gwastro/pycbc/issues/3761#issuecomment-895066248
psds: Optional[List[FrequencySeries]] = None
for count, idd in enumerate(sample_ids, start=1):
sigs = np.load(str(train_file(source_dir, idd)))
fss = [sig_to_fs(sigs[i]) for i in range(N_SIGNALS)]
psds = [
(update_psd(psds[i], fss[i], count) if psds else abs(fss[i]))
for i in range(N_SIGNALS)
]
if count % 1000 == 0:
print(f"Completed {count} of {len(sample_ids)}")
assert psds
return np.stack(psds)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"source",
help="directory containing the input dataset, in the original g2net directory structure",
type=path_to_dir,
)
arg_parser.add_argument(
"dest",
help="directory to write noise.npy",
type=Path,
)
args = arg_parser.parse_args()
rows = np.loadtxt(str(training_labels_file(args.source)), delimiter=",", dtype=str)
negative_ids = [idd for idd, label in rows[1:] if label == "0"]
noise = compute_noise(args.source, negative_ids)
np.save(args.dest / NOISE_FILENAME, noise)
|
{"hexsha": "1893f3204450b7d4c6127341a3e6f0efe7fd7916", "size": 2108, "ext": "py", "lang": "Python", "max_stars_repo_path": "compute_noise.py", "max_stars_repo_name": "wisdom-parts/kaggle-gw", "max_stars_repo_head_hexsha": "12df76920e6e1cb5e0f2ffa80cd3f0f3b3586903", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "compute_noise.py", "max_issues_repo_name": "wisdom-parts/kaggle-gw", "max_issues_repo_head_hexsha": "12df76920e6e1cb5e0f2ffa80cd3f0f3b3586903", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compute_noise.py", "max_forks_repo_name": "wisdom-parts/kaggle-gw", "max_forks_repo_head_hexsha": "12df76920e6e1cb5e0f2ffa80cd3f0f3b3586903", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6901408451, "max_line_length": 97, "alphanum_fraction": 0.6797912713, "include": true, "reason": "import numpy", "num_tokens": 531}
|
subroutine scale_op(label_res,mode,idx_blk,fac,label_inp,nblk,
& op_info,orb_info,str_info)
*----------------------------------------------------------------------*
* scale blocks of operator list
* mode == 1:
* by factor fac,
* if nblk==-1, all blocks are scaled with the same factor
* mode == 2:
* label_inp(2) contains scalar ME-list with respective scaling
* factor
*----------------------------------------------------------------------*
implicit none
integer, parameter ::
& ntest = 00
include 'stdunit.h'
include 'opdim.h'
include 'def_orbinf.h'
include 'mdef_operator_info.h'
include 'def_graph.h'
include 'def_strinf.h'
include 'ifc_memman.h'
include 'par_opnames_gen.h'
integer, intent(in) ::
& mode, nblk, idx_blk(*)
real(8), intent(in) ::
& fac(*)
character(*), intent(in) ::
& label_res, label_inp(2)
type(operator_info), intent(inout) ::
& op_info
type(orbinf), intent(in) ::
& orb_info
type(strinf), intent(in) ::
& str_info
type(me_list), pointer ::
& me_res, me_inp, me_fac
integer ::
& idx_res, idx_inp, idx_fac, idx, idxnd,
& njoined, iblk, ipri
logical ::
& open_close_res, open_close_inp, open_close_fac,
& same
real(8) ::
& cpu, sys, wall, cpu0, sys0, wall0, xnorm2, factor
integer, pointer ::
& occ(:,:,:)
integer, external ::
& idx_mel_list, vtx_type
call atim_csw(cpu0,sys0,wall0)
if(ntest.ge.100)then
write(lulog,*) '===================='
write(lulog,*) ' scale operators '
write(lulog,*) '===================='
write(lulog,*) 'Result: ',trim(label_res)
if (mode.eq.1) then
write(lulog,*) 'The factors and summands: '
do idx = 1, nblk
write(lulog,'(3x,f12.6,x,i8)') fac(idx),idx_blk(idx)
end do
if (nblk.lt.0) then
write(lulog,'(3x,f12.6,x,a)')
& fac(idx),'applied to all blocks'
end if
else
write(lulog,*) 'Scaling factor(s) on: ',trim(label_inp(2))
end if
endif
idx_res = idx_mel_list(label_res,op_info)
idx_inp = idx_mel_list(label_inp(1),op_info)
if (mode.ge.2) then
idx_fac = idx_mel_list(label_inp(2),op_info)
else
idx_fac = 1
end if
if (idx_res.lt.0) then
write(lulog,*) '"',trim(label_res),'"'
write(lulog,*) idx_res
call quit(1,'scale_op','label not on list (1)')
end if
if (idx_inp.lt.0) then
write(lulog,*) '"',trim(label_inp(1)),'"'
write(lulog,*) idx_inp
call quit(1,'scale_op','label not on list (2)')
end if
if (idx_fac.lt.0) then
write(lulog,*) '"',trim(label_inp(2)),'"'
write(lulog,*) idx_fac
call quit(1,'scale_op','label not on list (3)')
end if
same = idx_res.eq.idx_inp
! Point to the relevant operators and their associated files.
me_res => op_info%mel_arr(idx_res)%mel
if (.not.associated(me_res%fhand))
& call quit(1,'scale_op','no file handle defined for '//
& trim(me_res%label))
open_close_res = me_res%fhand%unit.le.0
if(open_close_res)then
call file_open(me_res%fhand)
endif
me_inp => op_info%mel_arr(idx_inp)%mel
if (.not.same) then
if (.not.associated(me_inp%fhand))
& call quit(1,'scale_op','no file handle defined for '//
& trim(me_inp%label))
open_close_inp = me_inp%fhand%unit.le.0
if (open_close_inp) then
call file_open(me_inp%fhand)
endif
else
open_close_inp = .false.
end if
if (mode.eq.1) then
idxnd = nblk
if (nblk.lt.0) idxnd = me_inp%op%n_occ_cls
open_close_fac = .false.
else
idxnd = me_inp%op%n_occ_cls
me_fac => op_info%mel_arr(idx_fac)%mel
if (vtx_type(me_fac%op).ne.vtxtyp_scalar)
& call quit(1,'scale_op',
& trim(me_fac%label)//' is not a scalar')
open_close_fac = me_fac%fhand%unit.le.0
if (open_close_fac) then
call file_open(me_fac%fhand)
endif
end if
njoined = me_res%op%njoined
! needed: outer loop over active records
! load factor
if (mode.ge.2) then
if (me_fac%fhand%buffered) then
factor = me_fac%fhand%buffer(1)
else
call get_vec(me_fac%fhand,factor,1,1)
end if
if (mode.ge.3) factor = 1d0/factor
if (ntest.ge.10) write(lulog,*)
& 'factor from list: ',factor
else
factor = fac(1)
end if
do idx = 1, idxnd
if (mode.eq.1.and.nblk.gt.0) then
iblk = idx_blk(idx)
factor = fac(idx)
else
iblk = idx
end if
call scale_opblk(xnorm2,factor,me_inp,me_res,
& iblk,iblk,orb_info)
end do
! needed: close loop over active records
if (open_close_fac)
& call file_close_keep(me_fac%fhand)
if (open_close_res)
& call file_close_keep(me_res%fhand)
if (open_close_inp)
& call file_close_keep(me_inp%fhand)
if (ntest.ge.10) then
write(lulog,*) 'dump of scaled list:'
if (ntest.ge.10) ipri = 1
if (ntest.ge.50) ipri = 2
if (ntest.ge.100) ipri = 3
if (ntest.ge.500) ipri = 4
if (ntest.ge.1000) ipri = 5
call wrt_mel_file(lulog,ipri,me_res,
& 1,me_res%op%n_occ_cls,
& str_info,orb_info)
end if
call atim_csw(cpu,sys,wall)
call prtim(lulog,'time for scaling',
& cpu-cpu0,sys-sys0,wall-wall0)
return
end
|
{"hexsha": "ad345608573a814997bb2f5b4131d9dc856a5171", "size": 5951, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "contract/scale_op.f", "max_stars_repo_name": "ak-ustutt/GeCCo-public", "max_stars_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "contract/scale_op.f", "max_issues_repo_name": "ak-ustutt/GeCCo-public", "max_issues_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "contract/scale_op.f", "max_forks_repo_name": "ak-ustutt/GeCCo-public", "max_forks_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4736842105, "max_line_length": 72, "alphanum_fraction": 0.5313392707, "num_tokens": 1729}
|
const ID = Int
const uidcounter = Counter(0)
# # __init__() = global uidcounter = Counter(0) #= also works =#
# __init__() = reset!(uidcounter)
"Unique id"
# uid() = (global uidcounter; @show increment!(uidcounter))
uid() = increment!(uidcounter)
@spec :nocheck (x = [uid() for i = 1:Inf]; unique(x) == x)
"Construct globally unique id for indices for ω"
macro id()
uid()
end
|
{"hexsha": "7ecc6402d983b6b70e5ac6bb4e414bf96d559017", "size": 382, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/space/idgen.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Omega.jl-1af16e33-887a-59b3-8344-18f1671b3ade", "max_stars_repo_head_hexsha": "9dbaa559991a728e8239767d9627419e41037847", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/space/idgen.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Omega.jl-1af16e33-887a-59b3-8344-18f1671b3ade", "max_issues_repo_head_hexsha": "9dbaa559991a728e8239767d9627419e41037847", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/space/idgen.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Omega.jl-1af16e33-887a-59b3-8344-18f1671b3ade", "max_forks_repo_head_hexsha": "9dbaa559991a728e8239767d9627419e41037847", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.4705882353, "max_line_length": 64, "alphanum_fraction": 0.6596858639, "num_tokens": 114}
|
import numpy as np
import pandas as pd
import pandas.api.types as ptypes
def cast_to_dateime(df, columns=None, format=None, return_df=False):
""" Given a list of columns, cast them to datetime
Parameters
----------
df : Pandas DataFrame
A dataframe containing the data to transform
columns: list[str]
A list of columns for which to cast to datetime
format : string, default None
strftime to parse time, eg “%d/%m/%Y”,
Specifying this will speed up the conversion
return_df: bool, default False
Boolean flag for returning the original data place with text
processed inplace.
Returns
----------
df_copy: Pandas DataFrame
The original dataframe with the specified columns text processed
If return_df is True, return df with columns cast inplace
"""
# Check for columns existence
if not columns:
raise ValueError
# Check to make sure columns are in df
# and are string dtypes
assert all(True for col in columns if col in df.columns.values.tolist())
assert all(ptypes.is_datetime64_any_dtype(df[col]) for col in columns)
# Defensive copy
df_copy = df.copy()
# For each column, process according to parameters
for col in columns:
df_copy[col] = df_copy[col].apply(pd.to_datetime, format=format)
if return_df:
return df_copy
else:
return df_copy[columns]
|
{"hexsha": "d512f9813b8bcf6fa544584bcaf45f6e91f75298", "size": 1456, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_science_toolbox/pandas/datetime/cast_to_datetime.py", "max_stars_repo_name": "safurrier/data_science_utils", "max_stars_repo_head_hexsha": "842b025ea3197e8a9946401257b2fa22ef1bf82d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-14T21:16:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-05T20:20:42.000Z", "max_issues_repo_path": "data_science_toolbox/pandas/datetime/cast_to_datetime.py", "max_issues_repo_name": "safurrier/data_science_utils", "max_issues_repo_head_hexsha": "842b025ea3197e8a9946401257b2fa22ef1bf82d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_science_toolbox/pandas/datetime/cast_to_datetime.py", "max_forks_repo_name": "safurrier/data_science_utils", "max_forks_repo_head_hexsha": "842b025ea3197e8a9946401257b2fa22ef1bf82d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-30T20:59:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-30T20:59:04.000Z", "avg_line_length": 32.3555555556, "max_line_length": 80, "alphanum_fraction": 0.665521978, "include": true, "reason": "import numpy", "num_tokens": 324}
|
import spira.all as spira
import numpy as np
from spira.yevon.geometry import shapes
from spira.yevon.geometry.route.route_shaper import RouteSimple
from spira.yevon.geometry.route.route_shaper import RouteGeneral
from spira.yevon.utils.geometry import scale_coord_up as scu
from spira.yevon.geometry.route.manhattan import __Manhattan__
from spira.core.parameters.descriptor import Parameter
from spira.yevon.geometry.vector import *
from copy import deepcopy
class RouteBase180(__Manhattan__):
def create_quadrant_one(self):
h = (self.p2[1]-self.p1[1])/2 - self.radius
self.b1.distance_alignment(port=self.b1.ports['P2'], destination=self.ports['T1'], distance=h)
self.b2.distance_alignment(port=self.b2.ports['P1'], destination=self.ports['T2'], distance=-h)
r1 = self.route_straight(self.b1.ports['P2'], self.ports['T1'])
r2 = self.route_straight(self.b2.ports['P1'], self.ports['T2'])
r3 = self.route_straight(self.b2.ports['P2'], self.b1.ports['P1'])
D = spira.Cell(name='Q1')
D += self.b1
D += self.b2
D += r1
D += r2
D += r3
# D += self.ports['T1']
# D += self.ports['T2']
# D.rotate(angle=self.port1.orientation, center=self.p1)
# D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_quadrant_two(self):
h = (self.p2[1]-self.p1[1])/2 - self.radius
self.b1.distance_alignment(port=self.b1.ports['P1'], destination=self.ports['T1'], distance=h)
self.b2.distance_alignment(port=self.b2.ports['P2'], destination=self.ports['T2'], distance=-h)
r1 = self.route_straight(self.b2.ports['P2'], self.ports['T2'])
r2 = self.route_straight(self.b1.ports['P1'], self.ports['T1'])
r3 = self.route_straight(self.b2.ports['P1'], self.b1.ports['P2'])
D = spira.Cell(name='Q2')
D += self.b1
D += self.b2
D += r1
D += r2
D += r3
# D += self.ports['T1']
# D += self.ports['T2']
# D.rotate(angle=self.port1.orientation, center=self.p1)
# D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_quadrant_three(self):
# self.b1.connect(port=self.b1.ports['P2'], destination=self.ports['T1'])
# # h = self.p2[1] + (self.p1[1]-self.p2[1])/2 + self.radius
# h = (self.p1[1]-self.p2[1])/2 + self.radius
# self.b1.move(midpoint=self.b1.ports['P2'], destination=[0, h])
# self.b2.connect(port=self.b2.ports['P2'], destination=self.b1.ports['P1'])
# # h = self.p2[1] + (self.p1[1]-self.p2[1])/2 - self.radius
# h = (self.p1[1]-self.p2[1])/2 - self.radius
# self.b2.move(midpoint=self.b2.ports['P1'], destination=[self.ports['T2'].midpoint[0], h])
# r1 = self.route_straight(self.b2.ports['P1'], self.ports['T2'])
# r2 = self.route_straight(self.b1.ports['P2'], self.ports['T1'])
# r3 = self.route_straight(self.b2.ports['P2'], self.b1.ports['P1'])
D = spira.Cell(name='Q3')
D += self.b1
# D += [self.b1, self.b2, r1, r2, r3]
# D += self.ports['T1']
# D += self.ports['T2']
# D.rotate(angle=self.port1.orientation, center=self.p1)
# D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_quadrant_four(self):
h = self.p2[1] + (self.p1[1]-self.p2[1])/2 + self.radius
self.b1.distance_alignment(port=self.b1.ports['P1'], destination=self.ports['T2'], distance=h)
# self.b1.connect(port=self.b1.ports['P1'], destination=self.ports['T1'])
# # h = self.p2[1] + (self.p1[1]-self.p2[1])/2 + self.radius
# h = (self.p1[1]-self.p2[1])/2 + self.radius
# self.b1.move(midpoint=self.b1.ports['P1'], destination=[0, h])
# self.b2.connect(port=self.b2.ports['P1'], destination=self.b1.ports['P2'])
# # h = self.p2[1] + (self.p1[1]-self.p2[1])/2 - self.radius
# h = (self.p1[1]-self.p2[1])/2 - self.radius
# self.b2.move(midpoint=self.b2.ports['P2'], destination=[self.ports['T2'].midpoint[0], h])
# r1 = self.route_straight(self.b1.ports['P1'], self.ports['T1'])
# r2 = self.route_straight(self.b2.ports['P2'], self.ports['T2'])
# r3 = self.route_straight(self.b2.ports['P1'], self.b1.ports['P2'])
D = spira.Cell(name='Q4')
D += self.b1
# D += [self.b1, self.b2, r1, r2, r3]
# D += self.ports['T1']
# D += self.ports['T2']
# D.rotate(angle=self.port1.orientation, center=self.p1)
# D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
class RouteParallel(__Manhattan__):
parallel = Parameter(fdef_name='create_parallel_route')
quadrant_one_parallel = Parameter(fdef_name='create_quadrant_one_parallel')
q1 = Parameter(fdef_name='create_q1_180')
q2 = Parameter(fdef_name='create_q2_180')
q3 = Parameter(fdef_name='create_q3_180')
q4 = Parameter(fdef_name='create_q4_180')
def create_parallel_route(self):
p1, p2 = self.p1, self.p2
b1, b2 = self.b2, self.b1
dx = max(p1[0], p2[0])
dy = max(p1[1], p2[1])
if p2[0] > p1[0]:
b1, b2 = self.b1, self.b2
h = p2[1] + self.length
d1 = [0, h]
d2 = [self.ports['T2'].midpoint[0], h]
b1.connect(port=b1.ports['P2'], destination=self.ports['T1'])
b1.move(midpoint=b1.ports['P2'], destination=d1)
b2.connect(port=b2.ports['P2'], destination=b1.ports['P1'])
b2.move(midpoint=b2.ports['P1'], destination=d2)
r1 = self.route_straight(b1.ports['P2'], self.ports['T1'])
r2 = self.route_straight(b2.ports['P1'], self.ports['T2'])
r3 = self.route_straight(b1.ports['P1'], b2.ports['P2'])
D = spira.Cell(name='Parallel')
D += [self.b1, self.b2, r1, r2, r3]
t1 = self.ports['T1']
t2 = self.ports['T2']
t1.rotate(angle=self.port1.orientation)
t2.rotate(angle=self.port1.orientation)
D.rotate(angle=self.port1.orientation, center=self.p1)
D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_quadrant_one_parallel(self):
p1 = [self.port1.midpoint[0], self.port1.midpoint[1]]
p2 = [self.port2.midpoint[0], self.port2.midpoint[1]]
b1, b2 = self.b2, self.b1
d1, d2 = [0,0], [0,0]
if self.port1.orientation == 0:
dy = max(p1[1], p2[1])
if p2[0] > p1[0]:
b1, b2 = self.b1, self.b2
h = dy + self.length
d1 = [0, h]
d2 = [self.ports['T2'].midpoint[0], h]
elif self.port1.orientation == 90:
dx = max(p1[0], p2[0])
if p2[1] > p1[1]:
b1, b2 = self.b1, self.b2
h = dx - self.length
d1 = [h, 0]
d2 = [h, self.ports['T2'].midpoint[1]]
elif self.port1.orientation == -90:
dx = min(p1[0], p2[0])
if p1[1] > p2[1]:
b1, b2 = self.b1, self.b2
h = dx + self.length
d1 = [h, 0]
d2 = [h, self.ports['T2'].midpoint[1]]
elif self.port1.orientation == 180:
dy = min(p1[1], p2[1])
if p1[0] > p2[0]:
b1, b2 = self.b1, self.b2
elif p2[0] > p1[0]:
b1, b2 = self.b2, self.b1
h = dy - self.length
d1 = [0, h]
d2 = [self.ports['T2'].midpoint[0], h]
b1.connect(port=b1.ports['P2'], destination=self.ports['T1'])
b1.move(midpoint=b1.ports['P2'], destination=d1)
b2.connect(port=b2.ports['P1'], destination=b1.ports['P1'])
b2.move(midpoint=b2.ports['P2'], destination=d2)
r1 = self.route_straight(b1.ports['P2'], self.ports['T1'])
r2 = self.route_straight(b2.ports['P2'], self.ports['T2'])
r3 = self.route_straight(b1.ports['P1'], b2.ports['P1'])
return [self.b1, self.b2, r1, r2, r3]
def create_q1_180(self):
b1 = self.b1
b2 = self.b2
b1.connect(port=b1.ports['P2'], destination=self.ports['T1'])
h = self.p2[1] + self.radius + self.length
b1.move(midpoint=b1.ports['P2'], destination=[0, h])
b2.connect(port=b2.ports['P1'], destination=b1.ports['P1'])
b2.move(midpoint=b2.ports['P2'], destination=[self.ports['T2'].midpoint[0], h])
r1 = self.route_straight(b1.ports['P2'], self.ports['T1'])
r2 = self.route_straight(b2.ports['P2'], self.ports['T2'])
r3 = self.route_straight(b1.ports['P1'], b2.ports['P1'])
D = spira.Cell(name='SameQ1')
D += [self.b1, self.b2, r1, r2, r3]
D += self.ports['T1']
D += self.ports['T2']
D.rotate(angle=self.port1.orientation, center=self.p1)
D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_q2_180(self):
b1 = self.b1
b2 = self.b2
b1.connect(port=b1.ports['P1'], destination=self.ports['T2'])
h = self.p2[1] + self.radius + self.length
b1.move(midpoint=b1.ports['P1'], destination=[0, h])
b2.connect(port=b2.ports['P2'], destination=b1.ports['P2'])
b2.move(midpoint=b2.ports['P1'], destination=[self.ports['T2'].midpoint[0], h])
r1 = self.route_straight(b1.ports['P1'], self.ports['T1'])
r2 = self.route_straight(b2.ports['P1'], self.ports['T2'])
r3 = self.route_straight(b1.ports['P2'], b2.ports['P2'])
D = spira.Cell(name='SameQ2')
D += [self.b1, self.b2, r1, r2, r3]
D += self.ports['T1']
D += self.ports['T2']
D.rotate(angle=self.port1.orientation, center=self.p1)
D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_q3_180(self):
b1 = self.b1
b2 = self.b2
b1.connect(port=b1.ports['P1'], destination=self.ports['T2'])
h = self.p1[1] + self.radius + self.length
b1.move(midpoint=b1.ports['P1'], destination=[0, h])
b2.connect(port=b2.ports['P2'], destination=b1.ports['P2'])
b2.move(midpoint=b2.ports['P1'], destination=[self.ports['T2'].midpoint[0], h])
r1 = self.route_straight(b1.ports['P1'], self.ports['T1'])
r2 = self.route_straight(b2.ports['P1'], self.ports['T2'])
r3 = self.route_straight(b1.ports['P2'], b2.ports['P2'])
D = spira.Cell(name='SameQ3')
D += [self.b1, self.b2, r1, r2, r3]
D += self.ports['T1']
D += self.ports['T2']
D.rotate(angle=self.port1.orientation, center=self.p1)
D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
def create_q4_180(self):
b1 = self.b1
b2 = self.b2
b2.connect(port=b2.ports['P1'], destination=self.ports['T1'])
h = self.p1[1] + self.radius + self.length
b2.move(midpoint=b2.ports['P1'], destination=[0, h])
b1.connect(port=b1.ports['P2'], destination=b2.ports['P2'])
b1.move(midpoint=b1.ports['P1'], destination=[self.ports['T2'].midpoint[0], h])
r1 = self.route_straight(b2.ports['P1'], self.ports['T1'])
r2 = self.route_straight(b1.ports['P1'], self.ports['T2'])
r3 = self.route_straight(b1.ports['P2'], b2.ports['P2'])
D = spira.Cell(name='SameQ4')
D += [self.b1, self.b2, r1, r2, r3]
D += self.ports['T1']
D += self.ports['T2']
D.rotate(angle=self.port1.orientation, center=self.p1)
D.move(midpoint=self.ports['T1'], destination=self.port1)
return spira.SRef(D)
class Route180(RouteBase180, RouteParallel):
""" Route ports that has a 180 degree difference. """
def create_elements(self, elems):
p1, p2 = self.p1, self.p2
if self.port1.orientation == self.port2.orientation:
if (p1[1] == p2[1]) or (p1[0] == p2[0]):
R = self.parallel
if (p2[1] > p1[1]) and (p2[0] > p1[0]):
print('Q1 Equal Angles')
R = self.q1
if (p2[1] > p1[1]) and (p2[0] < p1[0]):
print('Q2 Equal Angles')
R = self.q2
if (p2[1] < p1[1]) and (p2[0] < p1[0]):
print('Q3 Equal Angles')
R = self.q3
if (p2[1] < p1[1]) and (p2[0] > p1[0]):
print('Q4 Equal Angles')
R = self.q4
elif np.round(np.abs(np.mod(self.port1.orientation - self.port2.orientation,360)),3) != 180:
raise ValueError('[DEVICE] route() error: Ports do not face each other (orientations must be 180 apart)')
else:
if (p2[1] > p1[1]) and (p2[0] > p1[0]):
print('Q1')
R = self.quadrant_one
if (p2[1] > p1[1]) and (p2[0] < p1[0]):
print('Q2')
R = self.quadrant_two
if (p2[1] < p1[1]) and (p2[0] < p1[0]):
print('Q3')
R = self.quadrant_three
if (p2[1] < p1[1]) and (p2[0] > p1[0]):
print('Q4')
R = self.quadrant_four
elems += R
# points = []
# for e in R.reference.flatten():
# if isinstance(e, spira.Polygon):
# for p in e.points:
# points.append(p)
# route_shape = shapes.Shape(points=points)
# route_shape.apply_merge
# poly = pc.Polygon(points=route_shape.points, layer=self.layer, enable_edges=False)
# elems += poly
return elems
def create_ports(self, ports):
angle_diff = self.port1.orientation - self.port2.orientation
if self.port1.orientation == self.port2.orientation:
ports += spira.Port(name='T1',
width=self.port1.width,
orientation=0
)
ports += spira.Port(name='T2',
midpoint=list(np.subtract(self.p2, self.p1)),
width=self.port2.width,
orientation=0
)
elif np.round(np.abs(np.mod(angle_diff, 360)), 3) != 180:
raise ValueError("2. [DEVICE] route() error: Ports do not " +
"face each other (orientations must be 180 apart)")
else:
ports += spira.Port(name='T1',
width=self.port1.width,
orientation=90
# orientation=0
)
ports += spira.Port(name='T2',
midpoint=list(np.subtract(self.p2, self.p1)),
width=self.port2.width,
orientation=-90
# orientation=180
)
return ports
|
{"hexsha": "6a94cbe1e93fb8e97fcfe35dea25760343b9eff4", "size": 14918, "ext": "py", "lang": "Python", "max_stars_repo_path": "spira/yevon/geometry/route/manhattan180.py", "max_stars_repo_name": "JCoetzee123/spira", "max_stars_repo_head_hexsha": "dae08feba1578ecc8745b45109f4fb7bef374546", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spira/yevon/geometry/route/manhattan180.py", "max_issues_repo_name": "JCoetzee123/spira", "max_issues_repo_head_hexsha": "dae08feba1578ecc8745b45109f4fb7bef374546", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spira/yevon/geometry/route/manhattan180.py", "max_forks_repo_name": "JCoetzee123/spira", "max_forks_repo_head_hexsha": "dae08feba1578ecc8745b45109f4fb7bef374546", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8605769231, "max_line_length": 121, "alphanum_fraction": 0.5501407695, "include": true, "reason": "import numpy", "num_tokens": 4629}
|
x <= not (c or b or a);
|
{"hexsha": "1a63b41f1c7245442023cf3307b75877a96b2adb", "size": 24, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/f/opt3_not_or.f", "max_stars_repo_name": "Deshiuu/351lab-code-copy", "max_stars_repo_head_hexsha": "4d1fdf1f119c6798332c662dee99dd29d7a01520", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-18T23:28:13.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-18T23:28:13.000Z", "max_issues_repo_path": "tests/f/opt3_not_or.f", "max_issues_repo_name": "Deshiuu/351lab-code-copy", "max_issues_repo_head_hexsha": "4d1fdf1f119c6798332c662dee99dd29d7a01520", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/f/opt3_not_or.f", "max_forks_repo_name": "Deshiuu/351lab-code-copy", "max_forks_repo_head_hexsha": "4d1fdf1f119c6798332c662dee99dd29d7a01520", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.0, "max_line_length": 23, "alphanum_fraction": 0.4583333333, "num_tokens": 10}
|
from __future__ import print_function, division
import itertools
from copy import deepcopy
from collections import OrderedDict
from warnings import warn
import pickle
import nilmtk
import pandas as pd
import numpy as np
from hmmlearn import hmm
from nilmtk.feature_detectors import cluster
from nilmtk.disaggregate import Disaggregator
from nilmtk.datastore import HDFDataStore
import datetime
import matplotlib.pyplot as plt
# Python 2/3 compatibility
from six import iteritems, iterkeys
from builtins import range
SEED = 42
# Fix the seed for repeatibility of experiments
np.random.seed(SEED)
def sort_startprob(mapping, startprob):
""" Sort the startprob according to power means; as returned by mapping
"""
num_elements = len(startprob)
new_startprob = np.zeros(num_elements)
for i in range(len(startprob)):
new_startprob[i] = startprob[mapping[i]]
return new_startprob
def sort_covars(mapping, covars):
new_covars = np.zeros_like(covars)
for i in range(len(covars)):
new_covars[i] = covars[mapping[i]]
return new_covars
def sort_transition_matrix(mapping, A):
"""Sorts the transition matrix according to increasing order of
power means; as returned by mapping
Parameters
----------
mapping :
A : numpy.array of shape (k, k)
transition matrix
"""
num_elements = len(A)
A_new = np.zeros((num_elements, num_elements))
for i in range(num_elements):
for j in range(num_elements):
A_new[i, j] = A[mapping[i], mapping[j]]
return A_new
def sort_learnt_parameters(startprob, means, covars, transmat):
mapping = return_sorting_mapping(means)
means_new = np.sort(means, axis=0)
startprob_new = sort_startprob(mapping, startprob)
covars_new = sort_covars(mapping, covars)
transmat_new = sort_transition_matrix(mapping, transmat)
assert np.shape(means_new) == np.shape(means)
assert np.shape(startprob_new) == np.shape(startprob)
assert np.shape(transmat_new) == np.shape(transmat)
return [startprob_new, means_new, covars_new, transmat_new]
def compute_A_fhmm(list_A):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
--------
result : Combined Pi for the FHMM
"""
result = list_A[0]
for i in range(len(list_A) - 1):
result = np.kron(result, list_A[i + 1])
return result
def compute_means_fhmm(list_means):
"""
Returns
-------
[mu, cov]
"""
states_combination = list(itertools.product(*list_means))
num_combinations = len(states_combination)
means_stacked = np.array([sum(x) for x in states_combination])
means = np.reshape(means_stacked, (num_combinations, 1))
cov = np.tile(5 * np.identity(1), (num_combinations, 1, 1))
return [means, cov]
def compute_pi_fhmm(list_pi):
"""
Parameters
-----------
list_pi : List of PI's of individual learnt HMMs
Returns
-------
result : Combined Pi for the FHMM
"""
result = list_pi[0]
for i in range(len(list_pi) - 1):
result = np.kron(result, list_pi[i + 1])
return result
def create_combined_hmm(model):
list_pi = [model[appliance].startprob_ for appliance in model]
list_A = [model[appliance].transmat_ for appliance in model]
list_means = [model[appliance].means_.flatten().tolist()
for appliance in model]
pi_combined = compute_pi_fhmm(list_pi)
A_combined = compute_A_fhmm(list_A)
[mean_combined, cov_combined] = compute_means_fhmm(list_means)
combined_model = hmm.GaussianHMM(n_components=len(pi_combined), covariance_type='full')
combined_model.startprob_ = pi_combined
combined_model.transmat_ = A_combined
combined_model.covars_ = cov_combined
combined_model.means_ = mean_combined
return combined_model
def return_sorting_mapping(means):
means_copy = deepcopy(means)
means_copy = np.sort(means_copy, axis=0)
# Finding mapping
mapping = {}
for i, val in enumerate(means_copy):
mapping[i] = np.where(val == means)[0][0]
return mapping
def decode_hmm(length_sequence, centroids, appliance_list, states):
"""
Decodes the HMM state sequence
"""
hmm_states = {}
hmm_power = {}
total_num_combinations = 1
for appliance in appliance_list:
total_num_combinations *= len(centroids[appliance])
for appliance in appliance_list:
hmm_states[appliance] = np.zeros(length_sequence, dtype=np.int)
hmm_power[appliance] = np.zeros(length_sequence)
for i in range(length_sequence):
factor = total_num_combinations
for appliance in appliance_list:
# assuming integer division (will cause errors in Python 3x)
factor = factor // len(centroids[appliance])
temp = int(states[i]) / factor
hmm_states[appliance][i] = temp % len(centroids[appliance])
hmm_power[appliance][i] = centroids[
appliance][hmm_states[appliance][i]]
return [hmm_states, hmm_power]
class FHMM_EXACT(Disaggregator):
def __init__(self,params):
self.model = {}
self.MODEL_NAME = 'FHMM' # Add the name for the algorithm
self.save_model_path = params.get('save-model-path', None)
self.load_model_path = params.get('pretrained-model-path',None)
self.chunk_wise_training = params.get('chunk_wise_training', False)
self.num_of_states = params.get('num_of_states', 2)
if self.load_model_path:
self.load_model(self.load_model_path)
self.app_names = []
def partial_fit(self, train_main, train_appliances, **load_kwargs):
"""
Train using 1d FHMM.
"""
print(".........................FHMM partial_fit.................")
train_main = pd.concat(train_main, axis=0)
train_app_tmp = []
for app_name, df_list in train_appliances:
df_list = pd.concat(df_list, axis=0)
train_app_tmp.append((app_name,df_list))
self.app_names.append(app_name)
print (train_main.shape)
train_appliances = train_app_tmp
learnt_model = OrderedDict()
num_meters = len(train_appliances)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for appliance, meter in train_appliances:
meter_data = meter.dropna()
X = meter_data.values.reshape((-1, 1))
if not len(X):
print("Submeter '{}' has no samples, skipping...".format(meter))
continue
assert X.ndim == 2
self.X = X
if self.num_of_states > 0:
# User has specified the number of states for this appliance
num_total_states = self.num_of_states
else:
# Find the optimum number of states
states = cluster(meter_data, max_num_clusters)
num_total_states = len(states)
print("Training model for submeter '{}'".format(appliance))
learnt_model[appliance] = hmm.GaussianHMM(num_total_states, "full")
# Fit
learnt_model[appliance].fit(X)
print("Learnt model for : "+appliance)
# Check to see if there are any more chunks.
# TODO handle multiple chunks per appliance.
# Combining to make a AFHMM
self.meters = []
new_learnt_models = OrderedDict()
for meter in learnt_model:
print(meter)
startprob, means, covars, transmat = sort_learnt_parameters(
learnt_model[meter].startprob_, learnt_model[meter].means_,
learnt_model[meter].covars_, learnt_model[meter].transmat_)
new_learnt_models[meter] = hmm.GaussianHMM(startprob.size, "full")
new_learnt_models[meter].startprob_ = startprob
new_learnt_models[meter].transmat_ = transmat
new_learnt_models[meter].means_ = means
new_learnt_models[meter].covars_ = covars
# UGLY! But works.
self.meters.append(meter)
learnt_model_combined = create_combined_hmm(new_learnt_models)
self.individual = new_learnt_models
self.model = learnt_model_combined
print("print ...........",self.model)
print("FHMM partial_fit end.................")
def disaggregate_chunk(self, test_mains_list):
"""Disaggregate the test data according to the model learnt previously
Performs 1D FHMM disaggregation.
For now assuming there is no missing data at this stage.
"""
# See v0.1 code
# for ideas of how to handle missing data in this code if needs be.
# Array of learnt states
test_prediction_list = []
for test_mains in test_mains_list:
learnt_states_array = []
if len(test_mains) == 0:
tmp = pd.DataFrame(index = test_mains.index, columns = self.app_names)
test_prediction_list.append(tmp)
else:
length = len(test_mains.index)
temp = test_mains.values.reshape(length, 1)
learnt_states_array.append(self.model.predict(temp))
# Model
means = OrderedDict()
for elec_meter, model in iteritems(self.individual):
means[elec_meter] = (
model.means_.round().astype(int).flatten().tolist())
means[elec_meter].sort()
decoded_power_array = []
decoded_states_array = []
for learnt_states in learnt_states_array:
[decoded_states, decoded_power] = decode_hmm(
len(learnt_states), means, means.keys(), learnt_states)
decoded_states_array.append(decoded_states)
decoded_power_array.append(decoded_power)
appliance_powers = pd.DataFrame(decoded_power_array[0], dtype='float32')
test_prediction_list.append(appliance_powers)
return test_prediction_list
|
{"hexsha": "e556a741660c55fac80e20bc960ae978acd5dbe5", "size": 10331, "ext": "py", "lang": "Python", "max_stars_repo_path": "nilmtk_contrib/disaggregate/fhmm_exact.py", "max_stars_repo_name": "research-at-scuiot/nilmtk-contrib", "max_stars_repo_head_hexsha": "1e9907313eaa8ab9906b8d0edaf85a8155317d82", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nilmtk_contrib/disaggregate/fhmm_exact.py", "max_issues_repo_name": "research-at-scuiot/nilmtk-contrib", "max_issues_repo_head_hexsha": "1e9907313eaa8ab9906b8d0edaf85a8155317d82", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nilmtk_contrib/disaggregate/fhmm_exact.py", "max_forks_repo_name": "research-at-scuiot/nilmtk-contrib", "max_forks_repo_head_hexsha": "1e9907313eaa8ab9906b8d0edaf85a8155317d82", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3855799373, "max_line_length": 91, "alphanum_fraction": 0.6303358823, "include": true, "reason": "import numpy", "num_tokens": 2355}
|
\iffalse
Noether's theorem says that continuous symmetries of physical systems gives rise to conservation laws. In this class we'll see some examples of low dimensional Lie groups and how they give rise to various phenomenon in physics like time dilation and length contraction in special relativity, spin states of electrons.
Keywords: bilinear forms, signature, SO(2), SO(3), Spin, SO(1,3), Minkowski space and relativity, Noether's theorem, Lie groups.
Prereqs: Linear algebra, Group theory
Homework: Recommended
\fi
\input{../preamble}
\begin{document}
\title{Rotations}
\author{Apurva Nakade}
\thispagestyle{fancy}
\maketitle
\emph{I did not have time to proof-read these notes, these are likely to have more errors than usual :-/}$\\\\$
Let's start by analyzing the orthogonal group in 2 dimensions $O(2)$.
\begin{align}
O(2)
&=
\left\{ \begin{bmatrix} a & b \\ c & d \end{bmatrix} : \begin{bmatrix} a & b \\ c & d \end{bmatrix} \begin{bmatrix} a & c \\ b & d \end{bmatrix} = I_2\right\} \\
SO(2)
&=
\left\{ \begin{bmatrix} a & b \\ c & d \end{bmatrix} : \begin{bmatrix} a & b \\ c & d \end{bmatrix} \begin{bmatrix} a & c \\ b & d \end{bmatrix} = I_2, ad-bc = 1 \right\}
\end{align}
By a direct computation we can show that every element of $O(2)$ is one of the two forms (Exercise \ref{thm:exO2})
\begin{align}
\label{eq:O2}
\begin{bmatrix} \cos \theta & -\sin \theta \\ \sin \theta & \cos \theta \end{bmatrix}, & \begin{bmatrix} \cos \theta & \sin \theta \\ \sin \theta & -\cos \theta \end{bmatrix}
\end{align}
These matrices have determinants $1$ and $-1$ respectively and represent rotations and reflections in $\R^2$. The eigenvalues are of the form $e^{i\theta}, e^{-i\theta}$ for the rotation matrices and $\pm 1$ for the reflection ones and so we get
\begin{proposition}
Every matrix in $O(2)$ is either a rotation, in which case it is similar to a matrix of the form $\begin{bmatrix} e^{i\theta} & \\ & e^{-i\theta} \end{bmatrix}$ or a reflection about a line, in which case it is similar to a matrix of the form $\begin{bmatrix} -1 & \\ & 1 \end{bmatrix}$.
\end{proposition}
\section{Orthogonal matrices}
This method does not generalize to higher dimensions (or does it?) instead we use eigenvalues to analyze the matrices.
\begin{thm}[Spectral theorem]
Every matrix in $O(n)$ and $U(n)$ is diagonalizable over the complex numbers.
\end{thm}
Recall that diagonalizable means that the matrix is similar to a diagonal matrix i.e. it becomes diagonal after doing some base change. Even though $O(n)$ has real entries it's eigenvalues and eigenvectors might be complex i.e. the eigenvectors can be vectors in $\C^n$ instead of $\R^n$.
Because $O(n) \subseteq U(n)$ it suffices to analyze the eigenvectors of unitary matrices. Let $M \in U(n)$ be a unitary matrix. By the Spectral theorem there exist $n$ eigenvectors $v_1, \ldots, v_n \in \C^n$ with corresponding eigenvalues $\lambda_1, \ldots, \lambda_n$ i.e. $A v_i = \lambda_i v_i$. Using the definition of unitary matrices we must have
\begin{alignat}{4}
& & \innerp{Av_i}{Av_i} & = \innerp{v_i}{v_i} \\
\implies \quad & & \innerp{\lambda_i v_i}{\lambda_i v_i} & = \innerp{v_i}{v_i} \\
\implies \quad & & \conj \lambda_i \lambda_i \innerp{v_i}{v_i} & = \innerp{v_i}{v_i} \\
\implies \quad & & \conj \lambda_i \lambda_i & = 1
\end{alignat}
As $O(n) \subseteq U(n)$ the same holds for $O(n)$ so we get the following proposition.
\begin{proposition}
Every eigenvalue of an unitary or an orthogonal matrix is a complex number of norm 1 and hence is of the form $e^{i \theta}$ for some $\theta$.
\end{proposition}
\subsection{Orthogonal matrices in 3 dimensions}
Consider a matrix $A \in O(3)$, by the previous section $A$ has 3 eigenvalues of the form $\lambda_1 = e^{i \theta_1}$, $\lambda_2 = e^{i \theta_2}$, $\lambda_3 = e^{i \theta_3}$ for some $\theta_1$, $\theta_2$, $\theta_3$. But $O(3)$ has real entries and hence the complex eigenvalues of $A$ should come in conjugate pairs. The only way this can happen is if $\theta_1 = 0$ or $\pi$ and $\theta_2 = - \theta_3$.
\begin{proposition}
For any $A \in O(3)$ the eigenvalues of $A$ are of the form $\lambda_1 = \pm 1 , \lambda_2 = e^{i \theta}, \lambda_3 = e^{-i \theta}$ for some $\theta$. Further $\lambda_1 = 1$ iff $A \in SO(3)$.
\end{proposition}
If $A \in SO(3)$ then $A$ is similar to
\begin{align}
\label{eq:O3Type1}
A \sim \begin{bmatrix} 1 & & \\ & e^{i \theta} & \\ & & e^{-i\theta} \end{bmatrix} \sim \begin{bmatrix} 1 & & \\ &\cos \theta & -\sin \theta \\ &\sin \theta & \cos \theta \end{bmatrix}
\end{align}
This is saying that any matrix in $SO(3)$ represents rotation around an axis.
If $A \in O(3) \setminus SO(3)$ then $A$ is similar to
\begin{align}
\label{eq:O3Type2}
A \sim \begin{bmatrix} -1 & & \\ & e^{i \theta} & \\ & & e^{-i\theta} \end{bmatrix} \sim \begin{bmatrix} -1 & & \\ &\cos \theta & -\sin \theta \\ &\sin \theta & \cos \theta \end{bmatrix}
\end{align}
This is saying that any matrix in $O(3) \setminus SO(3)$ represents rotation around an axis followed by a reflection along the perpendicular plane.
\begin{proposition}
Every linear transformation of $\R^3$ that preserves distances is either a rotation about an axis or a rotation about an axis followed by a rotation about the perpendicular plane.
\end{proposition}
\iffalse
\section{$SU(2)$}
The arguments above prove that every unitary matrix is similar to a diagonal matrix with entries of the form $e^{i\theta}$. Since unitary matrices can have entries in complex numbers there are no conditions on the $\theta$'s.
Let us restrict to $n=2$ and consider the matrices in $SU(2)$
\begin{align}
SU(2) = \{ A \in M_{2 \times 2}(\R) : A ^* A = I_2, \det A = 1\}
\end{align}
As we did for $O(2)$ we can explicitly write down the matrices in $SU(2)$. Let $\begin{bmatrix} a & b \\ c & d \end{bmatrix}$ be a matrix in $SU(2)$ then the conditions on $SU(2)$ imply that $d = \conj a$ and $c = -\conj b$ and $\norm{a}^2 + \norm{b}^2 = 1$ i.e.
\begin{align}
SU(2) = \left\{ \begin{bmatrix} a & b \\ -\conj{b} & \conj{a} \end{bmatrix} : a, b \in \C \mbox{ and } \norm{a}^2 + \norm{b}^2 = 1 \right\}
\end{align}
Let $a = x_1 + i y_1$ and $b = x_2 + i y_2$. The the condition $\norm{a}^2 + \norm{b}^2 = 1$ is equivalent to $x_1^2 + y_1^2 + x_2^2 + y_2^2 = 1$, but this is exactly the equation of the sphere in $\R^3$.
\begin{thm}
As a (topological) space $SU(2)$ is isomorphic to $S^3$.
\end{thm}
Note that this implies that every point in $S^3$ gives rise to a unitary transformation of $\C^2$.
\fi
\section{Quaternions}
There is another way to talk about rotations, using quaternions! Recall that \textbf{quaternions} form a non-abelian group, denoted $\mathbb{H}$, that is isomorphic as a set to $\R^4$. Elements of $\mathbb{H}$ are of the form $a + bi + cj + dk$ and satisfy the relations
\begin{align}
i^2 = j^2 = k^2 = -1, ij = k, jk = i, ki = j
\end{align}
\iffalse
Similar to complex numbers we have conjugation and norm on quaternions given by
\begin{align}
\norm{a + bi + cj + dk}^2 & = a^2 + b^2 + c^2 + d^2 \\
\conj{a + bi + cj + dk} & = a - bi - cj - dk
\end{align}
and we also have the identity
\begin{align}
(a + bi + cj + dk).\conj{(a + bi + cj + dk)} = \norm{a + bi + cj + dk}^2
\end{align}
In particular note that if $p$ has norm 1 then $p^-1 = \conj p$.
\fi
A quaternion $p \in \mathbb{H}$ defines a linear transformation $\Phi(p):\mathbb{H} \rightarrow \mathbb{H}$ that sends $v \mapsto p v p^{-1}$. These transformation turn out to be rotations when restricted to the unit quaternion group!
Let $S\mathbb{H}$ denote the group of unit quaternions i.e. $\{ p \in \mathbb{H} : \norm{p} = 1\}$. We think of $\R^3$ as the set of \emph{purely imaginary} quaternions i.e. the vector $(x,y,z)$ represents the quaternion $xi + yj + zk$. It turns out to be the case that when $p \in S \mathbb{H}$ the transformation $v \mapsto p v p^{-1}$ preserves the set of purely imaginary quaternions. In fact a much stronger result holds.
\begin{thm}
\label{thm:quaternions}
The map sending $p\in S\mathbb{H}$ to $\Phi(p)$ defines a homomorphism
\begin{align}
\Phi : S \mathbb{H} \rightarrow SO(3)
\end{align}
This homomorphism is surjective with kernel $\Z/2$.
\end{thm}
The proof of this has several steps and is in Exercises in \ref{sec:exQuaternions}.
The group $S\mathbb{H}$ shows up in several avatars in various branches of mathematics. It is the spin group in 3 dimensions, denoted $Spin(3)$. Because $SO(3)$ is the group of rotation of $\R^3$ the above theorem is asserting that there are two quaternions over each rotation of $\R^3$. In physics this fact becomes relevant because in quantum mechanics certain systems have $S\mathbb{H}$ as their symmetry groups and for such systems there are is a physical quantity, called \textbf{spin} which has two possible values for each value of the angular moment.
\iffalse
\begin{proof}
Suppose $\norm{p} = 1$ for some $p \in \mathbb{H}$. We need to show that for $\alpha, \beta \in \R^3$ we have $\innerp{p\alpha}{p\beta} = \innerp{\alpha}{\beta}$. We need a good way to manipulate inner products. Let $\Re(a+bi+cj+dk) = a$ denote the real part of quaternions. Then it is easy to see that $\innerp{\alpha}{\beta} = \Re(\alpha \beta)$. We're reduced to showing thatwhen $\norm{p} = 1 $ we have
\begin{align}
\label{eq:random}
\Re(p \alpha p ^{-1}.p \beta p ^{-1}) = \Re{(\alpha\beta)}
\end{align}
Now we invoke the notion of conjugate quaternions. Similar to complex numbers we have conjugation and norm on quaternions given by
\begin{align}
\conj{a + bi + cj + dk} & = a - bi - cj - dk \\
\norm{a + bi + cj + dk}^2 & = a^2 + b^2 + c^2 + d^2
\end{align}
and we also have the identity
\begin{align}
(a + bi + cj + dk).\conj{(a + bi + cj + dk)} = \norm{a + bi + cj + dk}^2
\end{align}
In particular note that if $p$ has norm 1 then $p^{-1} = \conj p$. Plugging this back in \eqref{eq:random} gives us the desired result.
The kernel of this homomorphism is exactly the quaternions $p$ such that for all $v$ we have
\begin{align}
p v p ^{-1} = v
\end{align}
The only such $p$'s are the purely \emph{real} quaternions i.e. the ones with no $i,j,k$ components. The only such quaternions of norm 1 are $p = \pm 1$ and so the kernel of $\Phi$ is $\Z/2$.
\end{proof}
\fi
\newpage
\section{Exercise}
\begin{exercise}
\label{thm:exO2}
Consider a matrix $A = \begin{bmatrix} a & b \\ c & d \end{bmatrix} \in O(2)$.
\begin{enumerate}
\item Show that for some $\theta$, $\phi$ we must have $a = \cos \theta$, $b = \sin \theta$, and $c = \cos \phi$, $d = \sin \phi$.
\item Find the relations between $\theta$ and $\phi$ and prove that every matrix in $O(2)$ is of the form \eqref{eq:O2}.
\item Describe the matrices in \eqref{eq:O2} geometrically and compute their eigenvalues.
\end{enumerate}
\end{exercise}
\iffalse
\begin{exercise}
\label{thm:exO(n)}
What is wrong with the following proof?
Suppose $A \in O(n)$ and let $v$ be an eigenvector of $A$ with eigenvalue $\lambda$ then
\begin{alignat}{4}
& & \innerp{Av}{Av} & = \innerp{v}{v} \\
\implies & & \innerp{\lambda v}{\lambda v} & = \innerp{v}{v} \\
\implies & & \lambda^2 \innerp{v}{v} & = \innerp{v}{v} \\
\implies & & \lambda^2 & = 1
\end{alignat}
Hence every eigenvalue of $A$ is $\pm 1$.
\end{exercise}
\fi
\begin{exercise}
Show that the matrices
\begin{align}
\begin{bmatrix} 1 & & \\ &\cos \theta & \sin \theta \\ &\sin \theta & -\cos \theta \end{bmatrix}
&&
\begin{bmatrix} -1 & & \\ &\cos \theta & \sin \theta \\ &\sin \theta & -\cos \theta \end{bmatrix}
\end{align}
are in $O(3)$. What do these geometrically represent? Find the matrices of type \eqref{eq:O3Type1} or \eqref{eq:O3Type2} to which these are similar.
\end{exercise}
\begin{exercise}
Describe the matrices in $SO(n)$ geometrically for arbitrary positive integer $n$. Do these matrices still represent rotations? What is the difference between matrices in $SO(2n)$ and matrices in $SO(2n+1)$.
\end{exercise}
\subsection{Quaternions}
\label{sec:exQuaternions}
The following exercises prove theorem \ref{thm:quaternions}.
\begin{exercise}
The first step is to figure out how to deal with inner products using quaternions. Let $\Re(a + bi + cj + dk) = a$ denote the real part of quaternions.
\begin{enumerate}
\item Show that for two vectors $x, y \in \R^3$ the dot product $\innerp{x}{y}$ is equal to $-\Re(xy)$.
\item Show that for any quaternion $p \conj{p} = \norm{p}^2$ and hence if $p \in S\mathbb{H}$ then $p^{-1} = \conj{p}$.
\item Show that for $p \in S \mathbb{H}$ and $v \in \mathbb{H}$ we have $ \Re{(x)} = \Re{(p x p^{-1}) }$. This implies in particular that $\Phi(p)$ takes the purely imaginary quaternions to purely imaginary quaternions.
\item Show that for $p \in S \mathbb{H}$ and $x, y \in \R^3$ we have $ \innerp{x}{y} = \innerp{px\conj{p}}{px\conj{p}}$.
\end{enumerate}
\end{exercise}
\begin{exercise}
Let $p \in S \mathbb{H}$ be a unit quaternion. The above exercise proves that the transformation $\Phi(p)$ preserves the dot product.
\begin{enumerate}
\item Show that for $q \in \mathbb{H}$ we have $\Phi(pq) = \Phi(p)\Phi(q)$ and hence we have a group homomorphism $\Phi: S\mathbb{H}\rightarrow SO(3)$.
(It is $SO(3)$ and not $SO(4)$ because we're looking at the transformations of the space of purely imaginary quaternions.)
\item Argue that because $S\mathbb{H}$ is connected the image of $\Phi$ should be a subgroup of $SO(3)$ and hence $\Phi$ is a homomorphism $S\mathbb{H} \rightarrow SO(3)$.
\item Show that for the unit quaternion $p = \cos (\theta/2) + \sin (\theta/2)(x i+yj+zk)$ the transformation $\Phi(p)$ fixes the vector $(x,y,z)$. Use this to argue that $\Phi$ is surjective.
\item Show that the center of $S\mathbb{H}$ is the set of purely real quaternions. Argue that the kernel of $\Phi$ is $\Z/2$.
\end{enumerate}
\end{exercise}
\begin{exercise}
\begin{align}
O_8 = \{ \pm 1, \pm i, \pm j, \pm k \}
\end{align}
Let $O_8 \subseteq \mathbb{H}$ be the finite quaternion group. Describe the image of $O_8$ under the homomorphism $\Phi$ (defined in Section \ref{sec:quaternions}).
\end{exercise}
\iffalse
\begin{exercise}
Show that $O(n)$, $SO(n)$, $U(n)$ and $SU(n)$ are compact subsets of $M_{n \times n}(\R \mbox{ or } \C)$, but $GL_n(\R)$, $SL_n(\R)$, $GL_n(\C)$ and $SL_n(\C)$ are not.
\end{exercise}
\begin{exercise}
Find the center of the groups $GL_n(\R)$ and $SL_n(\C)$.
\end{exercise}
\begin{exercise}
A \textbf{maximal torus} of a matrix group $G$ is a maximal abelian subgroup of $G$ i.e. a subgroup $H \subseteq G$ is called a maximal torus if $H$ is abelian and if an abelian subgroup $H' \subseteq G$ contains $H$ then $H=H'$.
Find a maximal torus of each of the following groups: $U(n)$, $SU(n)$, $SO(2n)$ and $SO(2n+1)$.
\end{exercise}
\fi
\end{document}
|
{"hexsha": "12f566892291d8faf214f54ce7660d7320dfd896", "size": 15045, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "03 Symmetries of Spaces/02 Rotations.tex", "max_stars_repo_name": "apurvnakade/mc2017", "max_stars_repo_head_hexsha": "ebec59bce5ee1979872e0f37208da6abd91dbb75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "03 Symmetries of Spaces/02 Rotations.tex", "max_issues_repo_name": "apurvnakade/mc2017", "max_issues_repo_head_hexsha": "ebec59bce5ee1979872e0f37208da6abd91dbb75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "03 Symmetries of Spaces/02 Rotations.tex", "max_forks_repo_name": "apurvnakade/mc2017", "max_forks_repo_head_hexsha": "ebec59bce5ee1979872e0f37208da6abd91dbb75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.1734693878, "max_line_length": 558, "alphanum_fraction": 0.6672648721, "num_tokens": 5076}
|
import numpy as np
import pandas as pd
def get_agg_data(grouped_data, agg_method):
if agg_method == 'mean':
agg_data = grouped_data.mean()
elif agg_method == 'median':
agg_data = grouped_data.median()
else:
raise NotImplementedError()
return agg_data
def get_error(grouped_data, error_method):
if error_method is None or not error_method:
return None
else:
if error_method.lower() == 'se':
error = grouped_data.apply(lambda x: x.std()/(x.count()**.5))
elif error_method.lower() == 'ci95':
error = grouped_data.apply(lambda x: 1.96*x.std()/(x.count()**.5))
elif error_method.lower() == 'std':
error = grouped_data.std()
elif error_method.lower() == 'iqr':
median = grouped_data.median()
error = pd.DataFrame({'lower':median-grouped_data.quantile(.25), 'upper':grouped_data.quantile(.75) - median})
else:
raise NotImplementedError()
return error
class CatPlot:
def __init__(self, data, agg, cluster_var=None, axes_var=None, stack_var=None,
agg_method='mean', error_method='se', x_offset=None, ax=None, clusters=None, cluster_params={},
stack_params={}, swarm=False, swarm_params={}, swarm_cluster_params={}, swarm_stack_params={},
swarm_hist_params={}, violin=False):
self.agg = agg
self.agg_method = agg_method
self.error_method = error_method
self.axes_var = axes_var
self.cluster_var = cluster_var
self.stack_var = stack_var
self.cluster_params = cluster_params
self.stack_params = stack_params
self.swarm_params = swarm_params
self.swarm_cluster_params = swarm_cluster_params
self.swarm_stack_params = swarm_stack_params
self.swarm_hist_params = swarm_hist_params
self.swarm = swarm
self.violin = violin
self.xticklabels = list(data.groupby(self.agg).groups.keys())
self.nbars = len(self.xticklabels)
if self.cluster_var is None:
self.clusters = None,
else:
self.clusters = clusters or list(data.groupby(self.cluster_var).groups.keys())
self.nclusters = len(self.clusters)
self.x_offset = 1/(self.nclusters+1) if x_offset is None else x_offset
self.xtick_coords = np.arange(self.nbars) + (self.nclusters/2) * self.x_offset
self.xtick_coords -= (self.x_offset/2)
if self.axes_var is None:
self._plot_ax(data,ax)
else:
axes_grouped_data = data.groupby(self.axes_var)
if isinstance(ax,dict):
axes = ax
else:
indexes = list(axes_grouped_data.groups.keys())
axes = dict(zip(indexes,ax))
for idx, ax_data in axes_grouped_data:
self._plot_ax(ax_data, axes[idx], title=idx)
def _plot_ax(self, data, ax, title=None):
if self.cluster_var is None:
self._plot_cluster(data, ax)
else:
grouped_data = data.groupby(self.cluster_var)
for i, cluster in enumerate(self.clusters):
# if cluster not in grouped_data.groups:
# continue
try:
cluster_data = grouped_data.get_group(cluster)
except KeyError:
continue
self._plot_cluster(cluster_data, ax, cluster_name=cluster, offset=i)
ax.set_xticks(self.xtick_coords)
ax.set_xticklabels(self.xticklabels)
ax.set_title(title)
def _plot_cluster(self, data, ax, cluster_name=None, offset=0):
bottoms = None
if self.stack_var is None:
self._plot_stack(data, ax, cluster_name=cluster_name, offset=offset, bottoms=bottoms)
else:
for stack, stack_data in data.groupby(self.stack_var):
bottoms = self._plot_stack(stack_data, ax, cluster_name=cluster_name, offset=offset, stack_name=stack, bottoms=bottoms)
def _plot_stack(self, data, ax, cluster_name=None, offset=0, stack_name=None, bottoms=None):
if cluster_name is None and stack_name is None:
label = None
elif cluster_name is None:
label = stack_name
elif stack_name is None:
label = cluster_name
else:
label = cluster_name, stack_name
x = np.arange(self.nbars) + offset*self.x_offset
y = get_agg_data(data.groupby(self.agg), self.agg_method)
yerr = get_error(data.groupby(self.agg), self.error_method)
if bottoms is None:
bottoms = np.zeros(len(x))
y = y.reindex(self.xticklabels).values
if yerr is not None:
yerr = yerr.reindex(self.xticklabels).values.T
self._plot(ax=ax,x=x,y=y,yerr=yerr,label=label,bottoms=bottoms,cluster_name=cluster_name,stack_name=stack_name)
if self.swarm:
self._swarmplot(ax=ax,data=data,x=x,label=label,bottoms=bottoms,cluster_name=cluster_name,stack_name=stack_name)
if self.violin:
self._violinplot(ax=ax,data=data,x=x,label=label,bottoms=bottoms,cluster_name=cluster_name,stack_name=stack_name)
return bottoms + y
def _violinplot(self,ax,data,x,label,bottoms,cluster_name,stack_name):
grouped_data = {key: data for key, data in data.groupby(self.agg)}
violin_data = []
xcoords = []
for xcoord, xlabel, bottom in zip(x, self.xticklabels, bottoms):
if xlabel in grouped_data:
violin_data.append(grouped_data[xlabel]+bottom)
xcoords.append(xcoord)
parts = ax.violinplot(violin_data,xcoords,widths=self.x_offset,showextrema=False)
for name, part in parts.items():
if name == 'bodies':
for pc in part:
pc.set_alpha(0.15)
else:
pc.set_alpha(0.15)
def _swarmplot(self,ax,data,x,label,bottoms,cluster_name,stack_name):
for (_, group_data), xcoord in zip(data.groupby(self.agg),x):
y = group_data.values
y.sort()
xmin,xmax = xcoord-(0.5*self.x_offset),xcoord+(0.5*self.x_offset)
counts, edges = np.histogram(y,**self.swarm_hist_params)
x_ = np.concatenate([np.linspace(xmin,xmax,count+2)[1:-1] for count in counts])
bin_centers = np.mean([edges[1:],edges[:-1]],axis=0)
ax.scatter(x_,y,label=label,**self.swarm_params,**self.swarm_cluster_params.get(cluster_name,{}), **self.swarm_stack_params.get(stack_name,{}))
def _plot(self,ax,x,y,yerr,label,bottoms,cluster_name,stack_name):
return NotImplemented
class Bar(CatPlot):
def __init__(self,**kwargs):
if 'bar_width' in kwargs:
kwargs['x_offset'] = kwargs.pop('bar_width')
super().__init__(**kwargs)
def _plot(self,ax,x,y,yerr,label,bottoms,cluster_name,stack_name):
ax.bar(x, y, yerr=yerr, label=label, width=self.x_offset, bottom=bottoms, **self.cluster_params.get(cluster_name,{}), **self.stack_params.get(stack_name,{}))
class Line(CatPlot):
def __init__(self,**kwargs):
super().__init__(**kwargs)
def _plot(self,ax,x,y,yerr,label,bottoms,cluster_name,stack_name):
ax.errorbar(x, y+bottoms, yerr=yerr, label=label, **self.cluster_params.get(cluster_name,{}), **self.stack_params.get(stack_name,{}))
class ViolinPlot(CatPlot):
def __init__(self,**kwargs):
super().__init__(**kwargs)
def _plot(self,ax,x,y,yerr,label,bottoms,cluster_name,stack_name):
ax.violinplot()
|
{"hexsha": "51ce506613eb4bac07f969ddfa842006bd5097ce", "size": 7660, "ext": "py", "lang": "Python", "max_stars_repo_path": "simianpy/plotting/catplot.py", "max_stars_repo_name": "jselvan/simianpy", "max_stars_repo_head_hexsha": "5b2b162789e11bc89ca2179358ab682269e7df15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simianpy/plotting/catplot.py", "max_issues_repo_name": "jselvan/simianpy", "max_issues_repo_head_hexsha": "5b2b162789e11bc89ca2179358ab682269e7df15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simianpy/plotting/catplot.py", "max_forks_repo_name": "jselvan/simianpy", "max_forks_repo_head_hexsha": "5b2b162789e11bc89ca2179358ab682269e7df15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5555555556, "max_line_length": 165, "alphanum_fraction": 0.6287206266, "include": true, "reason": "import numpy", "num_tokens": 1823}
|
import numpy as np
import torch
import torch.nn as nn
from utils.REDutils import fspecial_gauss
class Downsampler(nn.Module):
"""
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
"""
def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None,
preserve_size=False, pad_type='reflection', transpose_conv=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1 / 2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1. / np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type == 'uniform_blur':
kernel_width = 9
kernel_type_ = 'uniform'
pad_type = 'circular'
elif kernel_type == 'gauss_blur':
kernel_width = 25
sigma = 1.6
kernel_type_ = 'gauss'
pad_type = 'circular'
elif kernel_type in {'lanczos', 'gauss', 'box'}:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
if transpose_conv:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) // 2.)
else:
pad = int((self.kernel.shape[0] - factor) // 2.)
downsampler = nn.ConvTranspose2d(n_planes, n_planes, kernel_size=self.kernel.shape,
stride=factor, padding=pad)
else:
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if pad_type == 'circular':
self.padding = lambda torch_in: pad_circular(torch_in, kernel_width // 2)
elif pad_type == 'reflection':
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) // 2.)
else:
pad = int((self.kernel.shape[0] - factor) // 2.)
self.padding = nn.ReplicationPad2d(pad)
else:
assert False, "pad_type have only circular or reflection options"
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x = input
self.x = x
return self.downsampler_(x)
def get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box', 'uniform', 'blur']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1. / (kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
return fspecial_gauss(kernel_width, sigma)
elif kernel_type == 'uniform':
kernel = np.ones([kernel_width, kernel_width])
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
def pad_circular(x, pad):
"""
:param x: pytorch tensor of shape: [batch, ch, h, w]
:param pad: uint
:return:
"""
x = torch.cat([x, x[:, :, 0:pad]], dim=2)
x = torch.cat([x, x[:, :, :, 0:pad]], dim=3)
x = torch.cat([x[:, :, -2 * pad:-pad], x], dim=2)
x = torch.cat([x[:, :, :, -2 * pad:-pad], x], dim=3)
return x
|
{"hexsha": "03b5463029f0871332b0e9656a00261cba31cb66", "size": 5524, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/downsampler.py", "max_stars_repo_name": "gistvision/DIPsureWithSTE", "max_stars_repo_head_hexsha": "853faac97a451e6430b47f4d4da54c6d08a7ee50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2021-09-06T06:56:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T01:12:23.000Z", "max_issues_repo_path": "models/downsampler.py", "max_issues_repo_name": "scott-mao/DIP-denosing", "max_issues_repo_head_hexsha": "853faac97a451e6430b47f4d4da54c6d08a7ee50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-19T12:14:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-22T14:03:57.000Z", "max_forks_repo_path": "models/downsampler.py", "max_forks_repo_name": "scott-mao/DIP-denosing", "max_forks_repo_head_hexsha": "853faac97a451e6430b47f4d4da54c6d08a7ee50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-10-18T13:35:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T09:34:55.000Z", "avg_line_length": 34.7421383648, "max_line_length": 112, "alphanum_fraction": 0.5322230268, "include": true, "reason": "import numpy", "num_tokens": 1431}
|
function points_to_field(x::AbstractArray{T}, wp::WaveletParams) where T <: AbstractFloat
ws = wp.ws
n = size(x, 2)
N = length(ws)
a = x[1, :]
b = x[2, :]
aw = a * ws'
bw = b * ws'
c = reshape(aw, n, 1, N) .+ reshape(bw, n, N, 1)
m = sum(t -> cis(-T(2π) * t), c, dims=1)
reshape(m, N, N)
end
function phase_harmonics(z::T, k::Int) where T
if k < 0
ph(conj.(z), -k)
elseif k == 0
T(@. abs(z))
elseif k == 1
copy(z)
elseif k == 2
@. z * z / abs(z)
elseif k == 3
@. z ^ 3 / abs2(z)
else
if k % 2 == 0
@. z ^ k / abs(z) ^ (k - 1)
else
@. z ^ k / abs2(z) ^ (k ÷ 2)
end
end
end
all_phase_harmonics(w_jl, K) = mapreduce(k -> phase_harmonics.(w_jl, k), vcat, 0:K - 1)
function wavelet_phase_harmonics(μ, wp, vs)
M = points_to_field(μ, wp)
w_jl = map(Ψ -> ifft(M .* Ψ), wp.Ψs)
w_jlk = map((w, v) -> w .- v, all_phase_harmonics(w_jl, wp.K), vs[2])
m = ifft(M) .- vs[1]
m, w_jlk
end
function v_λ_k_all(μ, wp::WaveletParams{TF}) where TF <: AbstractFloat
m, w_jlk = wavelet_phase_harmonics(μ, wp, (zero(TF), zeros(TF, wp.J * wp.L * wp.K)))
mean(m), mean.(w_jlk)
end
cross_correlation(x, y::AbstractArray{<:Complex}) = mean(@. x * conj(y))
cross_correlation(x, y::AbstractArray{<:Real}) = mean(@. x * y)
cc_subset(w1, w2, ix) = map(ix) do (i, j)
cross_correlation(w1[i], w2[j])
end
cc_subset(w1, w2, ix::Vector{Tuple{Int, Int, Tuple{Int, Int}}}) = map(ix) do (i, j, shift)
cross_correlation(w1[i], circshift(w2[j], shift))
end
function K_all(μ, wp, vs)
m, w = wavelet_phase_harmonics(μ, wp, vs)
c1 = cc_subset(w, w, wp.ix)
c2 = cc_subset(w, w, wp.ix_shift)
c3 = cc_subset([m], w, wp.ix_0)
[c1; c2; c3]
end
function K_all(x, y, wp, vs_x, vs_y)
mx, wx = wavelet_phase_harmonics(x, wp, vs_x)
my, wy = wavelet_phase_harmonics(y, wp, vs_y)
cxy1 = cc_subset(wx, wy, wp.ix)
cxy2 = cc_subset(wx, wy, wp.ix_shift)
cxy3 = cc_subset([mx], wy, wp.ix_0)
[cxy1; cxy2; cxy3]
end
ww_subset(w1, w2, ix) = map(ix) do (i, j)
x = w1[i]
y = w2[j]
std(@. x * conj(y))
end
ww_subset(w1, w2, ix::Vector{Tuple{Int, Int, Tuple{Int, Int}}}) = map(ix) do (i, j, shift)
x = w1[i]
y = circshift(w2[j], shift)
std(@. x * conj(y))
end
function W_all(μ, wp, vs)
m, w = wavelet_phase_harmonics(μ, wp, vs)
c1 = ww_subset(w, w, wp.ix)
c2 = ww_subset(w, w, wp.ix_shift)
c3 = ww_subset([m], w, wp.ix_0)
[c1; c2; c3]
end
function W_all(x, y, wp, vs_x, vs_y)
mx, wx = wavelet_phase_harmonics(x, wp, vs_x)
my, wy = wavelet_phase_harmonics(y, wp, vs_y)
cxy1 = ww_subset(wx, wy, wp.ix)
cxy2 = ww_subset(wx, wy, wp.ix_shift)
cxy3 = ww_subset([mx], wy, wp.ix_0)
[cxy1; cxy2; cxy3]
end
|
{"hexsha": "2cf47a27e790a69b67b8aee8f1b67cf0e185e723", "size": 2886, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wavelet_functions.jl", "max_stars_repo_name": "LexaLutyi/PointProcessWavelet.jl", "max_stars_repo_head_hexsha": "3b33cd386bdb9d3f41bc255eac0e1fb98ad91d72", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wavelet_functions.jl", "max_issues_repo_name": "LexaLutyi/PointProcessWavelet.jl", "max_issues_repo_head_hexsha": "3b33cd386bdb9d3f41bc255eac0e1fb98ad91d72", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wavelet_functions.jl", "max_forks_repo_name": "LexaLutyi/PointProcessWavelet.jl", "max_forks_repo_head_hexsha": "3b33cd386bdb9d3f41bc255eac0e1fb98ad91d72", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0305343511, "max_line_length": 90, "alphanum_fraction": 0.5505890506, "num_tokens": 1153}
|
#!/usr/bin/env python
# coding=utf-8
"""
Script to enrich input txt data file with number of apartments and total
occupants per building, based on year of construction and available net
floor area
(Zensusdatenbank Zensus 2011 der Statistischen Ämter des Bundes und der Länder
Represented by
Bayerisches Landesamt für Statistik
St.-Martin-Straße 47, 81541 München
Tel. +49 89 2119-0.)
This function contains different methods to determine the number of apartments
of a building.
A number of occupants is then assigned to each Apartment by probabilities
depending
onf the net_floor_area calculated from statistic Data. (determine_occ())
Important function: np.random.choice(a,p) custom descrete randomfunction,
returns single items from given list with custom probability p for each item
Implemented 14.10.2016 by jsc-swi
"""
from __future__ import division
import os
import numpy as np
import csv
import pycity_calc.cities.scripts.city_generator.city_generator as citgen
# calculate occupancy probabilities
def calc_occ_probability(net_floor_class, list_occ_prob):
"""
Calculate probabilities for number of occupants depending on net_floor
area.
Parameters
----------
net_floor_class: int
Net floor area categorie
options
0:A<40m^2, 1:40m^2<=A<60m^2, 2:60m^2<=A<80m^2,....,
8:180m^2<=A<200m^2, 9:A>200m^2
list_occ_prob: list
List with
occupancy probabilities:
row: net_floor_class;
columns: number
of occupants, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
Returns
-------
p: list
List with probabilities for each number of occupants
"""
# open .csv file with probabilities, row: net_floor_class;
# columns: number of occupants
# convert str list to float list
liste_int = list(map(float, list_occ_prob[net_floor_class]))
p = []
# calculate probabilities from absolute values
for i in liste_int:
p.append(i / sum(liste_int))
# return list of probabilities for each number of occupants
return p
# calculate number of apartment probabilities
def calc_num_ap_probability(BuildingTypes, build_year_class, list_ap_prob,
custom_p=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1]):
"""
Calculate the probability of the number of apartments for one building
It can be chosen between different methods (configurable
by BuildingTypes) to determine this probability.
Parameters
----------
BuildingTypes: str
Building type string, options:
SFH: Single family house
DFH: Double family house
MFH: Multiple family house
STAT: Statistic distribution of houses
CUSTOM: customized probabilities, list of 10
items required, sum(list)=1
build_year_class: int
Building year class
year of build, 0: before 1950, 1:
1950 to 1969, 2: 1970 to 1989, 3: after 1990
list_ap_prob: list
qapartment probabilities:
row: buildyearclass(see Annotations);
columns: number
of apartments, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
custom_p: list, optional
list of apartment number probabilities, list of 10
items, sum(list)=1 (default: [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1])
Returns
-------
p: list
List of probabilities for each number of apartments
"""
assert BuildingTypes == 'SFH' or BuildingTypes == "DFH" or BuildingTypes == "MFH" or BuildingTypes == "STAT" or BuildingTypes == "CUSTOM", "Unknown BuildTypes: SFH, DFH, MFH, STAT, CUSTOM"
if BuildingTypes == "STAT":
assert build_year_class == 0 or build_year_class == 1 or build_year_class == 2 or build_year_class == 3, 'invalid buildyear class'
# TODO: calculate correct probabilities for apartnumbers 3 to 13,
# BuildingTypes=STAT is not ready yet!
liste_int = list(map(float, list_ap_prob[build_year_class]))
p = []
for i in liste_int:
p.append(i / sum(liste_int))
return p
elif BuildingTypes == "SFH":
return [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
elif BuildingTypes == "DFH":
return [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
elif BuildingTypes == "MFH":
return [0.2, 0.2, 0.2, 0.2, 0, 0, 0, 0, 0, 0]
# TODO determine reasonable values
elif BuildingTypes == "CUSTOM":
assert len(custom_p), "len(custom_p) must be 10"
p = custom_p
return p
# determine and store occupancy of apartment
def determine_occ(ap_net_floor_area, list_occ_prob):
"""
Determine occupancy of apartment. Number of occupants depends
on net_floor area.
Parameters
----------
ap_net_floor_area: float
Netto floor area of apartment
list_occ_prob, 'list', occupancy probabilities:
row: net_floor_class; columns: number
of occupants, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
Returns:
-------
occupants: 'int', number of occupants of an apartment
"""
if ap_net_floor_area < 40: # net floor class 0
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(0,
list_occ_prob)) # determine apartments occupants
elif 60 > ap_net_floor_area >= 40: # net floor class 1
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(1,
list_occ_prob)) # determine apartments occupants
elif 80 > ap_net_floor_area >= 60: # net floor class 2
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(2,
list_occ_prob)) # determine apartments occupants
elif 100 > ap_net_floor_area >= 80: # net floor class 3
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(3,
list_occ_prob)) # determine apartments occupants
elif 120 > ap_net_floor_area >= 100: # net floor class 4
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(4,
list_occ_prob)) # determine apartments occupants
elif 140 > ap_net_floor_area >= 120: # net floor class 5
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(5,
list_occ_prob)) # determine apartments occupants
elif 160 > ap_net_floor_area >= 140: # net floor class 6
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(6,
list_occ_prob)) # determine apartments occupants
elif 180 > ap_net_floor_area >= 160: # net floor class 7
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(7,
list_occ_prob)) # determine apartments occupants
elif 200 > ap_net_floor_area >= 180: # net floor class 8
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(8,
list_occ_prob)) # determine apartments occupants
elif ap_net_floor_area >= 200: # net floor class 9
occupants = np.random.choice([1, 2, 3, 4, 5], 1,
p=calc_occ_probability(9,
list_occ_prob)) # determine apartments occupants
return occupants
# determine number of apartments and occupancy from building
def determine_ap_num(district_data, BuildingTypes, custom_p, list_occ_prob,
list_ap_prob):
"""
Determine number of apartments and occupancy of a building. BuildingTypes
sets the method to
to determine number of apartments.
The apartment net floor area is calculated by dividing the buildings net
floor area into equal parts depending on the number of apartments.
Parameters
----------
district_data: ndarray
Numpy 2d-array with city district data (each
column represents different parameter, see annotations)
BuildingTypes: str
Building type
Options:
SFH: Single family house
DFH: Double family house
MFH: Multiple family house
STAT: Statistic distribution of houses
CUSTOM: customized probabilities, list of 10 items required,
sum(list)=1
custom_p: list
list of apartment number probabilities, list of 10, items, sum(list)=1
list_occ_prob: list
occupancy probabilities:
row: net_floor_class;
columns: number
of occupants, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
list_ap_prob: list
apartment probabilities:
row: buildyearclass (see Annotations);
columns: number
of apartments, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
Returns:
-------
district_data:
ndarray, Numpy 2d-array with city district data (each
column represents different parameter, see annotations)
Annotations
-----------
File structure
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional)
18: Type of attic (int, optional, e.g. 0 for flat roof)
19: Type of cellar (int, optional, e.g. 1 for non heated cellar)
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional)
"""
# Add all buildings from city to their construction year class
nodelist_constr_year_to1950 = []
nodelist_constr_year_1950to1969 = []
nodelist_constr_year_1970to1989 = []
nodelist_constr_year_1990older = []
for i in range(len(district_data)):
buildyear = district_data[i][5]
build_type = district_data[i][3]
assert buildyear is not None, ("No year of construction at node",
district_data[i][0])
if buildyear < 1950:
if build_type == 0:
nodelist_constr_year_to1950.append(i)
elif 1969 >= buildyear >= 1950:
if build_type == 0:
nodelist_constr_year_1950to1969.append(i)
elif 1989 >= buildyear >= 1970:
if build_type == 0:
nodelist_constr_year_1970to1989.append(i)
elif buildyear >= 1990:
if build_type == 0:
nodelist_constr_year_1990older.append(i)
# Generate dictionary with construction years (years as keys (as ints)
dict_const_lists = {}
dict_const_lists[1950] = nodelist_constr_year_to1950
dict_const_lists[1969] = nodelist_constr_year_1950to1969
dict_const_lists[1989] = nodelist_constr_year_1970to1989
dict_const_lists[1990] = nodelist_constr_year_1990older
for key in dict_const_lists:
list_constr = dict_const_lists[key]
if len(list_constr) != 0:
for i in list_constr:
if key == 1950:
b_y_class = 0
elif key == 1969:
b_y_class = 1
elif key == 1989:
b_y_class = 2
elif key == 1990:
b_y_class = 3
list_nb = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Get list of probabilities for each number of apartments
prob = calc_num_ap_probability(BuildingTypes=BuildingTypes,
build_year_class=b_y_class,
list_ap_prob=list_ap_prob,
custom_p=custom_p)
# Generate random sample from list_nb with probabilities prob
num_apartments = np.random.choice(list_nb, 1, prob)
# determine number of apartments in a building
occupants_total = 0
# Loop over apartments
for n in range(int(num_apartments)):
ap_net_floor_area = district_data[i][4] / num_apartments
# calculate apartment net_floor_area
occupants = determine_occ(ap_net_floor_area,
list_occ_prob)
# determine and store occupancy of apartment
occupants_total += occupants
district_data[i][11] = occupants_total
district_data[i][10] = num_apartments
return district_data
def save_dist_data_to_file(dist_data, path):
"""
Save district_data array to path. Replaces all None with np.nan
Parameters
----------
district_data : ndarray
Numpy 2d-array with city district data (each column represents
different parameter, see annotations)
path : str,
Path to save file to
Annotations
-----------
district_data structure
Columns:
1: id (int)
2: x in m (float)
3: y in m (float)
4: building_type (int, e.g. 0 for residential building)
5: net floor area in m2 (float)
6: Year of construction (int, optional)
7: Year of modernization (int, optional)
8: Annual (final) thermal energy demand in kWh (float, optional)
9: Annual electrical energy demand in kWh (float, optional)
10: Usable pv roof area in m2 (float, optional)
11: Number of apartments (int, optional)
12: Total number of occupants (int, optional)
13: Number of floors above the ground (int, optional)
14: Average Height of floors (float, optional)
15: If building has a central AHU or not (boolean, optional)
16: Residential layout (int, optional, e.g. 0 for compact)
17: Neighbour Buildings (int, optional)
18: Type of attic (int, optional, e.g. 0 for flat roof)
19: Type of cellar (int, optional, e.g. 1 for non heated cellar)
20: Dormer (int, optional, 0: no dormer/ 1: dormer)
21: Construction Type(heavy/light, optional)
22: Method_3_nb (for usage of measured, weekly non-res. el. profile
(optional)
23: Method_4_nb (for usage of measured, annual non-res. el. profile
(optional)
"""
# Define header
header = 'id\tX\tY\tbuilding_type\ttab_ease_building_net_floor_area' \
'\ttab_ease_building_build_year\ttab_ease_building_mod_year' \
'\tAnnual thermal e demand in kWh' \
'\tAnnual electr. E demand in kWh\tUsable pv roof area in m2' \
'\tNumber of apartments\tTotal number of occupants' \
'\tNumber of floors\tHeight of floors\twith ahu' \
'\tresidential layout\tneighbour buildings\tattic\tcellar' \
'\tdormer\tconstruction type\tmethod_3_type\tmethod_4_type'
# Replace all None with np.nan to prevent saving errors
for i in range(len(dist_data)):
for j in range(len(dist_data[0])):
if district_data[i][j] is None:
district_data[i][j] = np.nan
# Save to path
np.savetxt(path, dist_data, delimiter='\t', header=header, fmt='%1.0f')
print('Saved district data to path ' + str(path))
def get_list_occ_prob_from_csv():
"""
Load statistical occupancy probabilites from .csv file
(file holds number of apartments with specific net floor area (rows) and
number of persons (columns)
Returns
-------
list_occ_prob: list
List (of lists) for calculation of occupancy probabilities.
row: net_floor_class(see Annotations);
columns: number
of occupants, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
Annotations
-----------
net_floor_class: 'int',
0:A<40m^2, 1:40m^2<=A<60m^2, 2:60m^2<=A<80m^2,....,
8:180m^2<=A<200m^2, 9:A>200m^2
"""
this_path = os.path.dirname(os.path.abspath(__file__))
filename = 'Personen_Wohnungsflaeche.csv'
src_path = os.path.dirname(os.path.dirname(os.path.dirname(this_path)))
input_path = os.path.join(src_path, 'data', 'BuildingData', filename)
with open(input_path) as csvfile:
read = csv.reader(csvfile, delimiter=';')
list_occ_prob = []
for row in read:
list_occ_prob.append(row)
return list_occ_prob
def get_list_ap_prob_from_csv():
"""
Load statistical apartment probabilites from .csv file
(file holds number of buildings with year of construction (row) and
number of apartments per bulding (column))
Returns
-------
list_ap_prob: list
apartment probabilities: row: buildyearclass (see Annotations);
columns: number
of apartments, Database according to Zensus 2011
ROWS AND COLUMNS HAVE TO BE OF THE EXACT FORMAT AS IN FILE
'data/BuildingData/Personen_Wohnungsflaeche_commented.csv'
Annotation
----------
build_year_class: 'int', year of build, 0: before 1950, 1:
1950 to 1969, 2: 1970 to 1989, 3: after 1990
"""
this_path = os.path.dirname(os.path.abspath(__file__))
filename = 'Gebaeude_Wohnung_und_Baujahr.csv'
src_path = os.path.dirname(os.path.dirname(os.path.dirname(this_path)))
input_path = os.path.join(src_path, 'data', 'BuildingData', filename)
with open(input_path) as csvfile:
read = csv.reader(csvfile, delimiter=';')
list_ap_prob = []
for row in read:
list_ap_prob.append(row)
return list_ap_prob
if __name__ == '__main__':
# Input filename
filename = 'test2.txt'
# Output filename
file_out = 'test_enrich2.txt'
# Path definitions
this_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(this_path, 'input', filename)
out_path = os.path.join(this_path, 'output', file_out)
# Load city district data set
district_data = citgen.get_district_data_from_txt(path=file_path)
print('Total district data set:')
print(district_data)
print()
print('Number of occupants per building before data enrichment:')
print(district_data[:, [11]])
print()
# load occ csv. file to list
list_occ_prob = get_list_occ_prob_from_csv()
# load ap csv. file to list
list_ap_prob = get_list_ap_prob_from_csv()
# Add occupants to district data
# Building types:
# #########################################
# 'str', SFH: Single family house
# DFH: Double family house
# MFH: Multiple family house
# STAT: Statistic distribution of houses
# CUSTOM: customized probabilities, list of 10 items required, sum(list) = 1
BuildingTypes = "CUSTOM"
# list of apartment number probabilities
custom_p = [0.4, 0.2, 0.2, 0.2, 0, 0, 0, 0, 0, 0]
district_data = determine_ap_num(district_data, BuildingTypes, custom_p,
list_occ_prob, list_ap_prob)
print('Number of occupants per building after data enrichment:')
print(district_data[:, [11]])
# Save file to out_path
save_dist_data_to_file(dist_data=district_data, path=out_path)
|
{"hexsha": "adbb3d0f4062378a95fd54e98ed872993b465d37", "size": 21209, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycity_calc/toolbox/data_enrichment/occupants/old/Calc_Buildingoccupancy.py", "max_stars_repo_name": "RWTH-EBC/pyCity_calc", "max_stars_repo_head_hexsha": "99fd0dab7f9a9030fd84ba4715753364662927ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-06-22T14:14:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T11:47:01.000Z", "max_issues_repo_path": "pycity_calc/toolbox/data_enrichment/occupants/old/Calc_Buildingoccupancy.py", "max_issues_repo_name": "RWTH-EBC/pyCity_calc", "max_issues_repo_head_hexsha": "99fd0dab7f9a9030fd84ba4715753364662927ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-08-28T19:42:28.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-28T19:43:44.000Z", "max_forks_repo_path": "pycity_calc/toolbox/data_enrichment/occupants/old/Calc_Buildingoccupancy.py", "max_forks_repo_name": "RWTH-EBC/pyCity_calc", "max_forks_repo_head_hexsha": "99fd0dab7f9a9030fd84ba4715753364662927ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3526220615, "max_line_length": 192, "alphanum_fraction": 0.6178509123, "include": true, "reason": "import numpy", "num_tokens": 5331}
|
"""Module containing image segmentation functions.
Example usage:
>>> import numpy as np
>>> from jicbioimage.core.image import Image
>>> ar = np.array([[1, 1, 0, 0, 0],
... [1, 1, 0, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 0, 2, 2, 2],
... [0, 0, 2, 2, 2]], dtype=np.uint8)
...
>>> im = Image.from_array(ar)
>>> connected_components(im) # doctest: +NORMALIZE_WHITESPACE
SegmentedImage([[3, 3, 1, 1, 1],
[3, 3, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 2, 2, 2],
[1, 1, 2, 2, 2]])
>>> connected_components(im, background=0) # doctest: +NORMALIZE_WHITESPACE
SegmentedImage([[2, 2, 0, 0, 0],
[2, 2, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1]])
>>> segmentation = connected_components(im, background=0)
>>> segmentation.history
['Created image from array', 'Applied connected_components transform']
"""
import numpy as np
import scipy.ndimage as nd
import skimage.measure
import skimage.morphology
from jicbioimage.core.image import Image
from jicbioimage.core.transform import transformation
from jicbioimage.core.util.array import pretty_color_array, unique_color_array
__version__ = "0.4.0"
class Region(np.ndarray):
"""Class representing a region of interest in an image.
The :class:`jicbioimage.core.region.Region` class is a subclass of
numpy.ndarray.
However, note that it will compress any data given to it to boolean.
>>> import numpy as np
>>> ar = np.array([-1, 0, 1, 2])
>>> Region(ar)
Region([ True, False, True, True], dtype=bool)
To select an particular element use the
:func:`jicbioimage.core.region.Region.select_from_array` class method.
>>> Region.select_from_array(ar, identifier=2)
Region([False, False, False, True], dtype=bool)
"""
def __new__(cls, input_array):
obj = np.asarray(input_array).view(cls)
return obj.astype(bool)
@classmethod
def select_from_array(cls, array, identifier):
"""Return a region from a numpy array.
:param array: :class:`numpy.ndarray`
:param identifier: value representing the region to select in the array
:returns: :class:`jicbioimage.core.region.Region`
"""
base_array = np.zeros(array.shape)
array_coords = np.where(array == identifier)
base_array[array_coords] = 1
return cls(base_array)
@property
def inner(self):
"""Region formed by taking non-border elements.
:returns: :class:`jicbioimage.core.region.Region`
"""
inner_array = nd.morphology.binary_erosion(self)
return Region(inner_array)
@property
def border(self):
"""Region formed by taking border elements.
:returns: :class:`jicbioimage.core.region.Region`
"""
border_array = self - self.inner
return Region(border_array)
@property
def convex_hull(self):
"""Region representing the convex hull.
:returns: :class:`jicbioimage.core.region.Region`
"""
hull_array = skimage.morphology.convex_hull_image(self)
return Region(hull_array)
@property
def area(self):
"""Number of non-zero elements.
:returns: int
"""
return np.count_nonzero(self)
@property
def index_arrays(self):
"""All nonzero elements as a pair of arrays."""
return np.where(self == True)
@property
def points(self):
"""Region as a list of points."""
return list(zip(*self.index_arrays))
@property
def perimeter(self):
"""Return the perimiter.
:returns: int
"""
return self.border.area
def dilate(self, iterations=1):
"""Return a dilated region.
:param iterations: number of iterations to use in dilation
:returns: :class:`jicbioimage.core.region.Region`
"""
dilated_array = nd.morphology.binary_dilation(self,
iterations=iterations)
return Region(dilated_array)
@property
def centroid(self):
"""Return centroid as (y, x) tuple."""
return tuple([np.mean(ia) for ia in self.index_arrays])
class SegmentedImage(Image):
"""Class representing the results of applying a segmentation to an image.
Each unique pixel value represents a different region of the segmentation.
0 represents background and positive integers represent the different
regions.
"""
@property
def identifiers(self):
"""Return a set of unique identifiers in the segmented image."""
return set(np.unique(self)) - set([0])
@property
def number_of_segments(self):
"""Return the number of segments present in the segmented image."""
return len(self.identifiers)
def region_by_identifier(self, identifier):
"""Return region of interest corresponding to the supplied identifier.
:param identifier: integer corresponding to the segment of interest
:returns: `jicbioimage.core.region.Region`
"""
if identifier < 0:
raise(ValueError("Identifier must be a positive integer."))
if not np.equal(np.mod(identifier, 1), 0):
raise(ValueError("Identifier must be a positive integer."))
if identifier == 0:
raise(ValueError("0 represents the background."))
return Region.select_from_array(self, identifier)
@property
def background(self):
"""Return the segmented image background.
In other words the region with pixel values 0.
:returns: `jicbioimage.core.region.Region`
"""
return Region.select_from_array(self, 0)
@property
def pretty_color_image(self):
"""Return segmentation as a pretty color image.
:returns: `jicbioimage.core.image.Image`
"""
return Image.from_array(pretty_color_array(self))
@property
def unique_color_image(self):
"""Return segmentation as a unique color image.
:returns: `jicbioimage.core.image.Image`
"""
return Image.from_array(unique_color_array(self))
def png(self, width=None):
"""Return png string of image.
:param width: integer specifying the desired width
:returns: png as a string
"""
return self.pretty_color_image.png(width)
def remove_region(self, identifier):
"""Remove region from the segmentation.
:param identifier: region identifier
"""
region = self.region_by_identifier(identifier)
self[region] = 0
def merge_regions(self, id1, id2):
"""Merge two regions into one.
The merged region will take on the id1 identifier.
:param id1: region 1 identifier
:param id2: region 2 identifier
"""
region2 = self.region_by_identifier(id2)
self[region2] = id1
@transformation
def connected_components(image, connectivity=2, background=None):
"""Return :class:`jicbioimage.core.image.SegmentedImage`.
:param image: input :class:`jicbioimage.core.image.Image`
:param connectivity: maximum number of orthagonal hops to consider a
pixel/voxel as a neighbor
:param background: consider all pixels with this value (int) as background
:returns: :class:`jicbioimage.core.image.SegmentedImage`
"""
# Work around skimage.measure.label behaviour in version 0.12 and higher
# treats all 0 pixels as background even if "background" argument is set
# to None.
if background is None:
image[np.where(image == 0)] = np.max(image) + 1
ar = skimage.measure.label(image, connectivity=connectivity,
background=background)
# The :class:`jicbioimage.core.image.SegmentedImage` assumes that zero is
# background. So we need to change the identifier of any pixels that are
# marked as zero if there is no background in the input image.
if background is None:
ar[np.where(ar == 0)] = np.max(ar) + 1
else:
if np.min(ar) == -1:
# Work around skimage.measure.label behaviour pre version 0.12.
# Pre version 0.12 the background in skimage was labeled -1 and the
# first component was labelled with 0.
# The jicbioimage.core.image.SegmentedImage assumes that the
# background is labelled 0.
ar[np.where(ar == 0)] = np.max(ar) + 1
ar[np.where(ar == -1)] = 0
segmentation = SegmentedImage.from_array(ar)
return segmentation
@transformation
def watershed_with_seeds(image, seeds, mask=None):
"""Return :class:`jicbioimage.core.image.SegmentedImage`.
:param image: input :class:`jicbioimage.core.image.Image`
:param seeds: numpy.ndarray of same shape as image,
each seed needs to be a unique integer
:param mask: bool numpy.ndarray of same shape as image,
only regions that are marked as True will be labelled
:returns: :class:`jicbioimage.core.image.SegmentedImage`
"""
ar = skimage.morphology.watershed(-image, seeds, mask=mask)
segmentation = SegmentedImage.from_array(ar)
return segmentation
|
{"hexsha": "856830e710c00794423a433b7c7abec563d9f7d5", "size": 9392, "ext": "py", "lang": "Python", "max_stars_repo_path": "jicbioimage/segment/__init__.py", "max_stars_repo_name": "JIC-CSB/jicbioimage.segment", "max_stars_repo_head_hexsha": "289e5ab834913326a097e57bea458ea0737efb0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jicbioimage/segment/__init__.py", "max_issues_repo_name": "JIC-CSB/jicbioimage.segment", "max_issues_repo_head_hexsha": "289e5ab834913326a097e57bea458ea0737efb0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jicbioimage/segment/__init__.py", "max_forks_repo_name": "JIC-CSB/jicbioimage.segment", "max_forks_repo_head_hexsha": "289e5ab834913326a097e57bea458ea0737efb0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3066666667, "max_line_length": 79, "alphanum_fraction": 0.6275553663, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2213}
|
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
import matplotlib.pyplot as plt
pio.templates.default = "simple_white"
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
df = pd.read_csv(filename, parse_dates=['Date']).dropna().drop_duplicates()
# create day of year column
df['DayOfYear'] = df['Date'].dt.dayofyear
# delete invalid samples (temp is way too low for the specific cities)
df = df[df['Temp'] > -50]
return df
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
df = load_data("../datasets/City_Temperature.csv")
# Question 2 - Exploring data for specific country
df_israel = df[df['Country'] == 'Israel']
fig, ax = plt.subplots()
grouped = df_israel.groupby('Year')
for name, group in grouped:
ax.plot(group.DayOfYear, group.Temp, marker='o', linestyle='', ms=1, label=name)
plt.title("Temperature by Day of Year, color coded by year")
plt.xlabel("Day of Year")
plt.ylabel("Temperature")
ax.legend(loc=2, prop={'size': 5})
plt.show()
# plot 2: Month to standard deviation of temp
grouped = df_israel.groupby('Month').agg('std')
plt.bar(x=grouped.index, height=grouped.Temp)
plt.title("Standard deviation of daily Temperature by Month")
plt.xlabel("Month")
plt.ylabel("std (temp)")
plt.show()
# Question 3 - Exploring differences between countries
fig, ax = plt.subplots()
for country in df['Country'].unique():
df_country = df[df['Country'] == country]
std_group = df_country.groupby(['Country', 'Month']).agg('std')
avg_group = df_country.groupby(['Country', 'Month']).agg('mean')
ax.errorbar(np.arange(1, 13),
avg_group.Temp,
std_group.Temp,
capsize=4,
elinewidth=1,
markeredgewidth=1, label=country)
plt.title("Avg temperature by Month & Country, with Error Bars")
plt.xlabel("Month")
plt.ylabel("Average Temperature")
plt.legend()
plt.show()
# Question 4 - Fitting model for different values of `k`
# df_israel = df_israel.sample(frac=1) # shuffle all rows randomly
X = df_israel['DayOfYear']
y = df_israel['Temp']
train_X, train_y, test_X, test_y = split_train_test(X, y, 0.75)
results = []
for k in range(1, 11):
polyfit = PolynomialFitting(k)
polyfit.fit(train_X.to_numpy(), train_y.to_numpy())
error = round(polyfit.loss(test_X.to_numpy(), test_y.to_numpy()), 2)
results.append(error)
print(f"k = {k}, MSE = {error}")
plt.bar(np.arange(10), results)
plt.xticks(np.arange(10), np.arange(1, 11))
plt.title("Prediction Loss as a function of poly degree k")
plt.xlabel("k value")
plt.ylabel("MSE")
plt.show()
# Question 5 - Evaluating fitted model on different countries
polyfit = PolynomialFitting(5)
polyfit.fit(df_israel['DayOfYear'].to_numpy(), df_israel['Temp'].to_numpy())
results = []
countries = list(df['Country'].unique())
countries.remove('Israel')
colors = ['#1f77b4', '#ff7f0e', '#2ca02c']
for country in countries:
df_country = df[df['Country'] == country]
X = df_country['DayOfYear'].to_numpy()
y = df_country['Temp'].to_numpy()
results.append(polyfit.loss(X, y))
fig, ax = plt.subplots()
bars = ax.bar(countries, results, color=colors)
ax.bar_label(bars)
plt.ylabel("MSE")
plt.title("MSE by country from data learned on Israel")
plt.show()
|
{"hexsha": "83df1eebd6025e9d86dee8c7bf5b2b9d7585ae2c", "size": 4020, "ext": "py", "lang": "Python", "max_stars_repo_path": "exercises/city_temperature_prediction.py", "max_stars_repo_name": "wolfo1/IML.HUJI", "max_stars_repo_head_hexsha": "0b32e552774d0be747547ab8b3eedbcd19cc11e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exercises/city_temperature_prediction.py", "max_issues_repo_name": "wolfo1/IML.HUJI", "max_issues_repo_head_hexsha": "0b32e552774d0be747547ab8b3eedbcd19cc11e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exercises/city_temperature_prediction.py", "max_forks_repo_name": "wolfo1/IML.HUJI", "max_forks_repo_head_hexsha": "0b32e552774d0be747547ab8b3eedbcd19cc11e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.880733945, "max_line_length": 88, "alphanum_fraction": 0.6410447761, "include": true, "reason": "import numpy", "num_tokens": 1016}
|
function nl_eqs!(du,u,p,t)
nx::Int,ny::Int,A::Array{ComplexF64,1},B::Array{ComplexF64,2},Cp::Array{Float64,4},Cm::Array{Float64,4} = p
du .= 0.0 + 0.0im
# @views du[ny:end,1] = A[ny:end]
# constant terms
@inbounds for n=1:1:ny-1
du[n+ny,1] += A[n+ny]
end
# linear terms
@inbounds for m = 0:nx-1
nmin = m == 0 ? 1 : -(ny-1)
@inbounds for n=nmin:ny-1
du[n+ny,m+1] += B[n+ny,m+1]*u[n+ny,m+1]
end
end
# ++ interactions
@inbounds for m1=1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:min(m1,nx-1-m1)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,-(ny-1)-n1):min(ny-1,ny-1-n1)
m::Int = m1 + m2
n::Int = n1 + n2
du[n+ny,m+1] += Cp[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*u[n2+ny,m2+1]
end
end
end
end
# +- interactions
@inbounds for m1=1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:m1
n2min = m2 == 0 ? 1 : -(ny-1)
n2max = m2 == m1 ? n1 - 1 : ny-1
@inbounds for n2=max(n2min,n1-(ny-1)):min(n2max,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
du[n+ny,m+1] += Cm[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*conj(u[n2+ny,m2+1])
end
end
end
end
nothing
end
function gql_eqs!(du,u,p,t)
nx::Int,ny::Int,Λ::Int,A::Array{ComplexF64,1},B::Array{ComplexF64,2},Cp::Array{Float64,4},Cm::Array{Float64,4} = p
du .= 0.0 + 0.0im
# constant terms
@inbounds for n=1:ny-1
du[n+ny,1] += A[n+ny]
end
# linear terms
@inbounds for m = 0:nx-1
nmin = m == 0 ? 1 : -(ny-1)
@inbounds for n=nmin:ny-1
du[n+ny,m+1] += B[n+ny,m+1]*u[n+ny,m+1]
end
end
# L + L = L
@inbounds for m1=1:Λ
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:min(m1,Λ-m1)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,-(ny-1)-n1):min(ny-1,ny-1-n1)
m::Int = m1 + m2
n::Int = n1 + n2
du[n+ny,m+1] += Cp[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*u[n2+ny,m2+1]
end
end
end
end
# L - L = L
for m1=1:Λ
for n1=-(ny-1):ny-1
for m2=0:m1
n2min = m2 == 0 ? 1 : -(ny-1)
n2max = m2 == m1 ? n1 - 1 : ny-1
for n2=max(n2min,n1-(ny-1)):min(n2max,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
du[n+ny,m+1] += Cm[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*conj(u[n2+ny,m2+1])
end
end
end
end
# H - H = L
@inbounds for m1=Λ+1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=max(Λ+1,m1-Λ):m1
n2max = m2 == m1 ? n1 - 1 : ny-1
@inbounds for n2=max(-(ny-1),n1-(ny-1)):min(n2max,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
du[n+ny,m+1] += Cm[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*conj(u[n2+ny,m2+1])
end
end
end
end
# H + L = H
@inbounds for m1=Λ+1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:min(nx-1-m1,Λ)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,-(ny-1)-n1):min(ny-1,ny-1-n1)
m::Int = m1 + m2
n::Int = n1 + n2
du[n+ny,m+1] += Cp[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*u[n2+ny,m2+1]
end
end
end
end
# H - L = H
@inbounds for m1=Λ+1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:1:min(Λ,m1 - Λ - 1)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,n1-(ny-1)):1:min(ny-1,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
du[n+ny,m+1] += Cm[n2+ny,m2+1,n1+ny,m1+1]*u[n1+ny,m1+1]*conj(u[n2+ny,m2+1])
end
end
end
end
nothing
end
function gce2_eqs!(du,u,p,t)
nx::Int,ny::Int,Λ::Int,A::Array{ComplexF64,1},B::Array{ComplexF64,2},Cp::Array{Float64,4},Cm::Array{Float64,4},dx::Array{ComplexF64,2},dy::Array{ComplexF64,4},temp::Array{ComplexF64,4} = p
# low mode equations
# du.x[1] .= 0.0 + 0.0im
dx .= 0.0 + 0.0im
# constant terms
@inbounds for n=1:ny-1
dx[n+ny,1] += A[n+ny]
end
# linear terms: L
@inbounds for m = 0:Λ
nmin = m == 0 ? 1 : -(ny-1)
@inbounds for n=nmin:ny-1
dx[n+ny,m+1] += B[n+ny,m+1]*u.x[1][n+ny,m+1]
end
end
# L + L = L
@inbounds for m1=1:Λ
n1min = m1 == 0 ? 1 : -(ny-1)
@inbounds for n1=n1min:ny-1
@inbounds for m2=0:1:min(m1,Λ-m1)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,-(ny-1)-n1):min(ny-1,ny-1-n1)
m::Int = m1 + m2
n::Int = n1 + n2
dx[n+ny,m+1] += Cp[n2+ny,m2+1,n1+ny,m1+1]*u.x[1][n1+ny,m1+1]*u.x[1][n2+ny,m2+1]
end
end
end
end
# L - L = L
@inbounds for m1=1:Λ
n1min = m1 == 0 ? 1 : -(ny-1)
@inbounds for n1=n1min:ny-1
@inbounds for m2=0:m1
n2min = m2 == 0 ? 1 : -(ny-1)
n2max = m2 == m1 ? n1 - 1 : ny-1
@inbounds for n2=max(n2min,n1-(ny-1)):min(n2max,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
dx[n+ny,m+1] += Cm[n2+ny,m2+1,n1+ny,m1+1]*u.x[1][n1+ny,m1+1]*conj(u.x[1][n2+ny,m2+1])
end
end
end
end
# H - H = L
@inbounds for m1=Λ+1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=max(Λ+1,m1-Λ):m1
n2max = m2 == m1 ? n1 - 1 : ny-1
@inbounds for n2=max(-(ny-1),n1-(ny-1)):min(n2max,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
# note: u.x[2] contains H2*conj(H1) so H-H is conj(H2)*H1
dx[n+ny,m+1] += Cm[n2+ny,m2+1,n1+ny,m1+1]*conj(u.x[2][n2+ny,m2-Λ,n1+ny,m1-Λ])
end
end
end
end
du.x[1] .= dx
# field bilinear equations
dy .= 0.0 + 0.0im
# temp = fill!(similar(du.x[2]),0)
temp .= 0.0 + 0.0im
# H + L = H
@inbounds for m1=Λ+1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:1:min(nx-1-m1,Λ)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,-(ny-1)-n1):min(ny-1,ny-1-n1)
m::Int = m1 + m2
n::Int = n1 + n2
temp[n1+ny,m1-Λ,n+ny,m-Λ] += Cp[n2+ny,m2+1,n1+ny,m1+1]*u.x[1][n2+ny,m2+1]
end
end
end
end
# H - L = H
@inbounds for m1=Λ+1:nx-1
@inbounds for n1=-(ny-1):ny-1
@inbounds for m2=0:1:min(Λ,m1 - Λ - 1)
n2min = m2 == 0 ? 1 : -(ny-1)
@inbounds for n2=max(n2min,n1-(ny-1)):min(ny-1,n1+ny-1)
m::Int = m1 - m2
n::Int = n1 - n2
temp[n1+ny,m1-Λ,n+ny,m-Λ] += Cm[n2+ny,m2+1,n1+ny,m1+1]*conj(u.x[1][n2+ny,m2+1])
end
end
end
end
# H'*H
@inbounds for m3=Λ+1:nx-1
@inbounds for n3=-(ny-1):ny-1
@inbounds for m=Λ+1:nx-1
@inbounds for n=-(ny-1):ny-1
dy[n+ny,m-Λ,n3+ny,m3-Λ] += B[n+ny,m+1]*u.x[2][n+ny,m-Λ,n3+ny,m3-Λ]
accumulator::ComplexF64 = 0.0 + 0.0im
# from H+L
@inbounds for m1=max(Λ+1,m-Λ):min(nx-1,m)
n2min = m1 == m ? 1 : -(ny-1)
@inbounds for n1=max(-(ny-1),n-(ny-1)):min(n-n2min,ny-1)
accumulator += temp[n1+ny,m1-Λ,n+ny,m-Λ]*u.x[2][n1+ny,m1-Λ,n3+ny,m3-Λ]
end
end
# from H-L
@inbounds for m1=max(Λ+1,m):min(nx-1,m+Λ)
n2max = m1 == m ? -1 : ny-1
@inbounds for n1=max(-(ny-1),n-n2max):min(n+ny-1,ny-1)
accumulator += temp[n1+ny,m1-Λ,n+ny,m-Λ]*u.x[2][n1+ny,m1-Λ,n3+ny,m3-Λ]
end
end
dy[n+ny,m-Λ,n3+ny,m3-Λ] += accumulator
end
end
end
end
# permutedims!(temp,du.x[2],[3,4,1,2])
@inbounds for m3=Λ+1:nx-1
@inbounds for n3=-(ny-1):ny-1
@inbounds for m=Λ+1:nx-1
@inbounds for n=-(ny-1):ny-1
du.x[2][n+ny,m-Λ,n3+ny,m3-Λ] = dy[n+ny,m-Λ,n3+ny,m3-Λ] + conj(dy[n3+ny,m3-Λ,n+ny,m-Λ])
end
end
end
end
nothing
end
|
{"hexsha": "a76d565c51d892e2d2845c1e035e2e1376408876", "size": 9234, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/equations.jl", "max_stars_repo_name": "artagnon/ZonalFlow.jl", "max_stars_repo_head_hexsha": "e89d832e5c8ebf32db9195d9c1cd5b68d21c1c69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/equations.jl", "max_issues_repo_name": "artagnon/ZonalFlow.jl", "max_issues_repo_head_hexsha": "e89d832e5c8ebf32db9195d9c1cd5b68d21c1c69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/equations.jl", "max_forks_repo_name": "artagnon/ZonalFlow.jl", "max_forks_repo_head_hexsha": "e89d832e5c8ebf32db9195d9c1cd5b68d21c1c69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7652173913, "max_line_length": 192, "alphanum_fraction": 0.4049166125, "num_tokens": 3771}
|
import numpy as np
import pandas as pd
import ipywidgets as W
import plotly.express as px
from tqdm import tqdm
from IPython.display import display
from .io import ms_file_to_df
class ManualRetentionTimeOptimizer():
def __init__(self, mint):
self.df = pd.concat( [ms_file_to_df(fn).assign(ms_file=fn) for fn in tqdm(mint.ms_files)] )
self.out = W.Output()
self.mint = mint
self.w_rt_min = W.FloatText(
value=0,
description='RT min:',
disabled=False
)
self.w_rt_max = W.FloatText(
value=13,
description='RT max:',
disabled=False,
)
self.set_rt_button = W.Button(description='Set new RT')
self.delete_button = W.Button(description='Remove from peaklist')
self.menu = W.Dropdown(options=mint.peaklist.peak_label, value=None)
def update(*args):
peak_label = self.menu.value
self.plot(peak_label)
def update_rt(button):
rt_min, rt_max = self.w_rt_min.value, self.w_rt_max.value,
peak_label = self.menu.value
self.mint.peaklist.loc[
self.mint.peaklist.peak_label==peak_label, 'rt_min'] = rt_min
self.mint.peaklist.loc[
self.mint.peaklist.peak_label==peak_label, 'rt_max'] = rt_max
self.plot(peak_label)
def remove_peak(button):
peak_label = self.menu.value
mint.peaklist = mint.peaklist[mint.peaklist.peak_label!=peak_label]
new_options = mint.peaklist.peak_label
self.menu.options = new_options
self.menu.observe(update, names='value')
self.set_rt_button.on_click(update_rt)
self.delete_button.on_click(remove_peak)
self.layout = W.VBox( [self.menu,
self.w_rt_min,
self.w_rt_max,
self.set_rt_button,
self.out,
self.delete_button],
)
def plot(self, peak_label):
peak_data = self.mint.peaklist[self.mint.peaklist.peak_label==peak_label].T.iloc[:,0]
mz_mean, mz_width, rt_min, rt_max = peak_data[['mz_mean','mz_width', 'rt_min', 'rt_max']]
dmz = mz_mean*1e-6*mz_width
selection = self.df[np.abs(self.df['m/z array'] - mz_mean)<=dmz]
fig = px.line( data_frame=selection, x='retentionTime',
y='intensity array', color='ms_file', title=peak_label)
fig.update_layout(showlegend=False)
fig.update_layout(
hovermode= 'closest',
xaxis=dict(
range=[rt_min, rt_max]
))
self.out.clear_output()
with self.out:
display( fig )
self.w_rt_min.value, self.w_rt_max.value = rt_min, rt_max
def show(self):
return self.layout
|
{"hexsha": "81f13b4d484d442d39720b4cb9da6606ee026454", "size": 3051, "ext": "py", "lang": "Python", "max_stars_repo_path": "ms_mint/peak_optimization/ManualRetentionTimeOptimizer.py", "max_stars_repo_name": "luis-ponce/ms-mint", "max_stars_repo_head_hexsha": "cefd0d455c6658bf8c737160bd7253bb147c9c14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ms_mint/peak_optimization/ManualRetentionTimeOptimizer.py", "max_issues_repo_name": "luis-ponce/ms-mint", "max_issues_repo_head_hexsha": "cefd0d455c6658bf8c737160bd7253bb147c9c14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ms_mint/peak_optimization/ManualRetentionTimeOptimizer.py", "max_forks_repo_name": "luis-ponce/ms-mint", "max_forks_repo_head_hexsha": "cefd0d455c6658bf8c737160bd7253bb147c9c14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1630434783, "max_line_length": 99, "alphanum_fraction": 0.5568666011, "include": true, "reason": "import numpy", "num_tokens": 685}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest, catch_warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy.modeling import models
from ..core import SegmentationImage
from ..deblend import deblend_sources
from ..detect import detect_sources
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import skimage
HAS_SKIMAGE = True
except ImportError:
HAS_SKIMAGE = False
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.skipif('not HAS_SKIMAGE')
class TestDeblendSources(object):
def setup_class(self):
g1 = models.Gaussian2D(100, 50, 50, 5, 5)
g2 = models.Gaussian2D(100, 35, 50, 5, 5)
g3 = models.Gaussian2D(30, 70, 50, 5, 5)
y, x = np.mgrid[0:100, 0:100]
self.data = g1(x, y) + g2(x, y)
self.data3 = self.data + g3(x, y)
self.threshold = 10
self.npixels = 5
self.segm = detect_sources(self.data, self.threshold, self.npixels)
self.segm3 = detect_sources(self.data3, self.threshold, self.npixels)
@pytest.mark.parametrize('mode', ['exponential', 'linear'])
def test_deblend_sources(self, mode):
result = deblend_sources(self.data, self.segm, self.npixels,
mode=mode)
assert result.nlabels == 2
assert result.nlabels == len(result.slices)
mask1 = (result.data == 1)
mask2 = (result.data == 2)
assert_allclose(len(result.data[mask1]), len(result.data[mask2]))
assert_allclose(np.sum(self.data[mask1]), np.sum(self.data[mask2]))
assert_allclose(np.nonzero(self.segm), np.nonzero(result))
@pytest.mark.parametrize('mode', ['exponential', 'linear'])
def test_deblend_sources_norelabel(self, mode):
result = deblend_sources(self.data, self.segm, self.npixels,
mode=mode, relabel=False)
assert result.nlabels == 2
assert len(result.slices) <= result.max
assert len(result.slices) == 3 # label 1 is None
assert_allclose(np.nonzero(self.segm), np.nonzero(result))
@pytest.mark.parametrize('mode', ['exponential', 'linear'])
def test_deblend_three_sources(self, mode):
result = deblend_sources(self.data3, self.segm3, self.npixels,
mode=mode)
assert result.nlabels == 3
assert_allclose(np.nonzero(self.segm3), np.nonzero(result))
def test_deblend_sources_segm_array(self):
result = deblend_sources(self.data, self.segm.data, self.npixels)
assert result.nlabels == 2
def test_segment_img_badshape(self):
segm_wrong = np.zeros((2, 2))
with pytest.raises(ValueError):
deblend_sources(self.data, segm_wrong, self.npixels)
def test_invalid_nlevels(self):
with pytest.raises(ValueError):
deblend_sources(self.data, self.segm, self.npixels, nlevels=0)
def test_invalid_contrast(self):
with pytest.raises(ValueError):
deblend_sources(self.data, self.segm, self.npixels, contrast=-1)
def test_invalid_mode(self):
with pytest.raises(ValueError):
deblend_sources(self.data, self.segm, self.npixels,
mode='invalid')
def test_invalid_connectivity(self):
with pytest.raises(ValueError):
deblend_sources(self.data, self.segm, self.npixels,
connectivity='invalid')
def test_constant_source(self):
data = self.data.copy()
data[data.nonzero()] = 1.
result = deblend_sources(data, self.segm, self.npixels)
assert_allclose(result, self.segm)
def test_source_with_negval(self):
data = self.data.copy()
data -= 20
with catch_warnings(AstropyUserWarning) as warning_lines:
deblend_sources(data, self.segm, self.npixels)
assert ('contains negative values' in
str(warning_lines[0].message))
def test_source_zero_min(self):
data = self.data.copy()
data -= data[self.segm.data > 0].min()
result1 = deblend_sources(self.data, self.segm, self.npixels)
result2 = deblend_sources(data, self.segm, self.npixels)
assert_allclose(result1, result2)
def test_connectivity(self):
"""Regression test for #341."""
data = np.zeros((3, 3))
data[0, 0] = 2
data[1, 1] = 2
data[2, 2] = 1
segm = np.zeros_like(data)
segm[data.nonzero()] = 1
segm = SegmentationImage(segm)
data = data * 100.
segm_deblend = deblend_sources(data, segm, npixels=1, connectivity=8)
assert segm_deblend.nlabels == 1
with pytest.raises(ValueError):
deblend_sources(data, segm, npixels=1, connectivity=4)
|
{"hexsha": "962be479942f5c7f88b97d02a213cf140fc9d9bb", "size": 5051, "ext": "py", "lang": "Python", "max_stars_repo_path": "photutils/segmentation/tests/test_deblend.py", "max_stars_repo_name": "barentsen/photutils", "max_stars_repo_head_hexsha": "57cbe18c8c1b8b08c93daa3d5c8dd74c10c3daae", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "photutils/segmentation/tests/test_deblend.py", "max_issues_repo_name": "barentsen/photutils", "max_issues_repo_head_hexsha": "57cbe18c8c1b8b08c93daa3d5c8dd74c10c3daae", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "photutils/segmentation/tests/test_deblend.py", "max_forks_repo_name": "barentsen/photutils", "max_forks_repo_head_hexsha": "57cbe18c8c1b8b08c93daa3d5c8dd74c10c3daae", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.977443609, "max_line_length": 77, "alphanum_fraction": 0.6384874282, "include": true, "reason": "import numpy,from numpy,import scipy,from astropy", "num_tokens": 1246}
|
# ---
# title: 373. Find K Pairs with Smallest Sums
# id: problem373
# author: Indigo
# date: 2021-06-14
# difficulty: Medium
# categories: Heap
# link: <https://leetcode.com/problems/find-k-pairs-with-smallest-sums/description/>
# hidden: true
# ---
#
# You are given two integer arrays **nums1** and **nums2** sorted in ascending
# order and an integer **k**.
#
# Define a pair **(u,v)** which consists of one element from the first array and
# one element from the second array.
#
# Find the k pairs **(u 1,v1),(u2,v2) ...(uk,vk)** with the smallest sums.
#
# **Example 1:**
#
#
#
# Input: nums1 = [1,7,11], nums2 = [2,4,6], k = 3
# Output: [[1,2],[1,4],[1,6]]
# Explanation: The first 3 pairs are returned from the sequence:
# [1,2],[1,4],[1,6],[7,2],[7,4],[11,2],[7,6],[11,4],[11,6]
#
# **Example 2:**
#
#
#
# Input: nums1 = [1,1,2], nums2 = [1,2,3], k = 2
# Output: [1,1],[1,1]
# Explanation: The first 2 pairs are returned from the sequence:
# [1,1],[1,1],[1,2],[2,1],[1,2],[2,2],[1,3],[1,3],[2,3]
#
# **Example 3:**
#
#
#
# Input: nums1 = [1,2], nums2 = [3], k = 3
# Output: [1,3],[2,3]
# Explanation: All possible pairs are returned from the sequence: [1,3],[2,3]
#
#
#
## @lc code=start
using LeetCode
using DataStructures
function k_smallest_pairs(nums1::Vector{Int}, nums2::Vector{Int}, k::Int)
hp = Tuple{Int, Int}[]
odr = Base.Order.By(x -> -x[1] - x[2])
for n1 in nums1, n2 in nums2
heappush!(hp, (n1, n2), odr)
length(hp) > k && heappop!(hp, odr)
end
sort!(hp; by = x -> x[1] + x[2])
end
## @lc code=end
|
{"hexsha": "a86e8477552286a77009617fabdfdec3dad79a6c", "size": 1675, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/problems/373.find-k-pairs-with-smallest-sums.jl", "max_stars_repo_name": "jmmshn/LeetCode.jl", "max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z", "max_issues_repo_path": "src/problems/373.find-k-pairs-with-smallest-sums.jl", "max_issues_repo_name": "jmmshn/LeetCode.jl", "max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z", "max_forks_repo_path": "src/problems/373.find-k-pairs-with-smallest-sums.jl", "max_forks_repo_name": "jmmshn/LeetCode.jl", "max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z", "avg_line_length": 27.4590163934, "max_line_length": 84, "alphanum_fraction": 0.5486567164, "num_tokens": 622}
|
"""
Module to provide disaggregation functionality.
"""
from pathlib import Path
from datetime import datetime, timedelta
import os.path
import numpy as np
from netCDF4 import Dataset
from core import err_handler
test_enabled = True
def disaggregate_factory(ConfigOptions):
if len(ConfigOptions.supp_precip_forcings) == 1 and ConfigOptions.supp_precip_forcings[0] == 11:
return ext_ana_disaggregate
#Add new cases here
#elif condition:
else:
return no_op_disaggregate
def no_op_disaggregate(input_forcings, supplemental_precip, config_options, mpi_config):
pass
def ext_ana_disaggregate(input_forcings, supplemental_precip, config_options, mpi_config):
"""
Function for disaggregating 6hr SuppPcp data to 1hr Input data
:param input_forcings:
:param supplemental_precip:
:param config_options:
:param mpi_config:
:return:
"""
# Check to make sure we have valid grids.
if input_forcings.regridded_forcings2 is None or supplemental_precip.regridded_precip2 is None:
if mpi_config.rank == 0:
config_options.statusMsg = "Bypassing ext_ana_disaggregation routine due to missing input or supp pcp data"
err_handler.log_warning(config_options, mpi_config)
return
if supplemental_precip.ext_ana != "STAGE4":
if mpi_config.rank == 0:
config_options.statusMsg = f"Bypassing ext_ana_disaggregation routine due to supplemental_precip.ext_ana = {supplemental_precip.ext_ana}"
err_handler.log_warning(config_options, mpi_config)
return
#print("ext_ana_disaggregate RAINRATE input_forcings.regridded_forcings2[3,:,:]")
#print(input_forcings.regridded_forcings2[3,:,:])
#print("ext_ana_disaggregate supplemental_precip.regridded_precip2[:,:]")
#print(supplemental_precip.regridded_precip2[:,:])
#print("supplemental_precip.regridded_precip2[:,:].shape")
#print(supplemental_precip.regridded_precip2[:,:].shape)
read_hours = 0
found_target_hh = False
ana_data = []
if mpi_config.rank == 0:
target_hh = Path(input_forcings.file_in2).stem[-4:-2]
_,_,_,beg_hh,end_hh,yyyymmdd = Path(supplemental_precip.file_in2).stem.split('_')
date_iter = datetime.strptime(f"{yyyymmdd}{beg_hh}", '%Y%m%d%H')
end_date = date_iter + timedelta(hours=6)
#Advance the date_iter by 1 hour since the beginning of the Stage IV data in date range is excluded, the end is included
#(begin_date,end_date]
date_iter += timedelta(hours=1)
while date_iter <= end_date:
tmp_file = f"{input_forcings.inDir}/{date_iter.strftime('%Y%m%d%H')}/{date_iter.strftime('%Y%m%d%H')}00.LDASIN_DOMAIN1"
if os.path.exists(tmp_file):
config_options.statusMsg = f"Reading {input_forcings.netcdf_var_names[3]} from {tmp_file} for disaggregation"
err_handler.log_msg(config_options, mpi_config)
with Dataset(tmp_file,'r') as ds:
try:
#Read in rainrate
data = ds.variables[input_forcings.netcdf_var_names[3]][0, :, :]
data[data == config_options.globalNdv] = np.nan
ana_data.append(data)
read_hours += 1
if date_iter.hour == int(target_hh):
found_target_hh = True
except (ValueError, KeyError, AttributeError) as err:
config_options.errMsg = f"Unable to extract: RAINRATE from: {input_forcings.file_in2} ({str(err)})"
err_handler.log_critical(config_options, mpi_config)
else:
config_options.statusMsg = f"Input file missing {tmp_file}"
err_handler.log_warning(config_options, mpi_config)
date_iter += timedelta(hours=1)
found_target_hh = mpi_config.broadcast_parameter(found_target_hh, config_options, param_type=bool)
err_handler.check_program_status(config_options, mpi_config)
if not found_target_hh:
if mpi_config.rank == 0:
config_options.statusMsg = f"Could not find AnA target_hh = {target_hh} for disaggregation. Setting output values to {config_options.globalNdv}."
err_handler.log_warning(config_options, mpi_config)
supplemental_precip.regridded_precip2[:,:] = config_options.globalNdv
return
read_hours = mpi_config.broadcast_parameter(read_hours, config_options, param_type=int)
err_handler.check_program_status(config_options, mpi_config)
if read_hours != 6:
if mpi_config.rank == 0:
config_options.statusMsg = f"Could not find all 6 AnA files for disaggregation. Only found {read_hours} hours. Setting output values to {config_options.globalNdv}."
err_handler.log_warning(config_options, mpi_config)
supplemental_precip.regridded_precip2[:,:] = config_options.globalNdv
return
ana_sum = np.array([],dtype=np.float32)
target_data = np.array([],dtype=np.float32)
ana_all_zeros = np.array([],dtype=np.bool)
ana_no_zeros = np.array([],dtype=np.bool)
target_data_no_zeros = np.array([],dtype=np.bool)
if mpi_config.rank == 0:
config_options.statusMsg = f"Performing hourly disaggregation of {supplemental_precip.file_in2}"
err_handler.log_msg(config_options, mpi_config)
ana_sum = sum(ana_data)
target_data = ana_data[(int(target_hh)-1)%6]
ana_zeros = [(a == 0).astype(int) for a in ana_data]
target_data_zeros = (target_data == 0)
target_data_no_zeros = ~target_data_zeros
ana_zeros_sum = sum(ana_zeros)
ana_all_zeros = (ana_zeros_sum == 6)
ana_no_zeros = (ana_zeros_sum == 0)
err_handler.check_program_status(config_options, mpi_config)
ana_sum = mpi_config.scatter_array(input_forcings, ana_sum, config_options)
err_handler.check_program_status(config_options, mpi_config)
target_data = mpi_config.scatter_array(input_forcings, target_data, config_options)
err_handler.check_program_status(config_options, mpi_config)
ana_all_zeros = mpi_config.scatter_array(input_forcings, ana_all_zeros, config_options)
err_handler.check_program_status(config_options, mpi_config)
ana_no_zeros = mpi_config.scatter_array(input_forcings, ana_no_zeros, config_options)
err_handler.check_program_status(config_options, mpi_config)
target_data_no_zeros = mpi_config.scatter_array(input_forcings, target_data_no_zeros, config_options)
err_handler.check_program_status(config_options, mpi_config)
if mpi_config.comm.Get_size() == 1 and test_enabled:
test_file = f"{config_options.scratch_dir}/stage_4_A_PCP_GDS5_SFC_acc6h_{yyyymmdd}_{beg_hh}_{end_hh}.txt"
np.savetxt(test_file,supplemental_precip.regridded_precip2)
test_file = f"{config_options.scratch_dir}/disaggregation_factors_{target_hh}_{yyyymmdd}{beg_hh}_{end_date.strftime('%Y%m%d%H')}.txt"
np.savetxt(test_file,np.nan_to_num(np.select([ana_all_zeros,
(ana_no_zeros | target_data_no_zeros)],
[1/6.0*np.ones(supplemental_precip.regridded_precip2[:,:].shape),
target_data/ana_sum],
0),nan=config_options.globalNdv))
#supplemental_precip.regridded_precip2[(0.0 < supplemental_precip.regridded_precip2) & (supplemental_precip.regridded_precip2 < 0.00003)] = 0.0
supplemental_precip.regridded_precip2[:,:] = np.select([ana_all_zeros,
(ana_no_zeros | target_data_no_zeros)],
[1/6.0*supplemental_precip.regridded_precip2[:,:],
supplemental_precip.regridded_precip2[:,:] * target_data/ana_sum],
0)
np.nan_to_num(supplemental_precip.regridded_precip2[:,:], copy=False, nan=config_options.globalNdv)
if mpi_config.comm.Get_size() == 1 and test_enabled:
test_file = f"{config_options.scratch_dir}/stage_4_A_PCP_GDS5_SFC_acc6_disaggregation_{target_hh}_{yyyymmdd}{beg_hh}_{end_date.strftime('%Y%m%d%H')}.txt"
np.savetxt(test_file,supplemental_precip.regridded_precip2)
|
{"hexsha": "59018a011906775d1013ea968f45798a336b5686", "size": 8536, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/disaggregateMod.py", "max_stars_repo_name": "champham/WrfHydroForcing", "max_stars_repo_head_hexsha": "90f1cbcc233eb007818ae159be81814e5754f233", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/disaggregateMod.py", "max_issues_repo_name": "champham/WrfHydroForcing", "max_issues_repo_head_hexsha": "90f1cbcc233eb007818ae159be81814e5754f233", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/disaggregateMod.py", "max_forks_repo_name": "champham/WrfHydroForcing", "max_forks_repo_head_hexsha": "90f1cbcc233eb007818ae159be81814e5754f233", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.7333333333, "max_line_length": 176, "alphanum_fraction": 0.6715089035, "include": true, "reason": "import numpy", "num_tokens": 2013}
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits import mplot3d
def visualize_position(experiment_name):
output_folder = "Experiment_Output/" + experiment_name + "/"
f = open(output_folder + "positions.txt", "r")
T, X, Y, Z = [], [], [], []
first_line = True
first_ts = 0
for line in f.readlines():
split_line = line.split(',')
if first_line:
first_ts = int(split_line[0])
first_line = False
T.append(int(split_line[0]) - first_ts)
X.append(float(split_line[1]))
Y.append(float(split_line[2]))
Z.append(float(split_line[3]))
fig = plt.figure()
ax = plt.axes(projection='3d')
plt.title(experiment_name)
plt.tight_layout()
ax.plot3D(X, Y, Z, 'gray')
ax.scatter3D(X, Y, Z, c=Z, cmap='Greens')
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
plt.show()
|
{"hexsha": "4bc80d67f9f171d07bf8adb36642dec3809ac0c6", "size": 934, "ext": "py", "lang": "Python", "max_stars_repo_path": "VisualizePositions.py", "max_stars_repo_name": "SunBangjie/smartphone_pairing", "max_stars_repo_head_hexsha": "633f80961be1a213e82077d2e5fd08f0cdf2453b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "VisualizePositions.py", "max_issues_repo_name": "SunBangjie/smartphone_pairing", "max_issues_repo_head_hexsha": "633f80961be1a213e82077d2e5fd08f0cdf2453b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VisualizePositions.py", "max_forks_repo_name": "SunBangjie/smartphone_pairing", "max_forks_repo_head_hexsha": "633f80961be1a213e82077d2e5fd08f0cdf2453b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4705882353, "max_line_length": 64, "alphanum_fraction": 0.6049250535, "include": true, "reason": "import numpy", "num_tokens": 248}
|
using Revise
using Dice
using Dice: num_flips, num_nodes, ifelse
# Number of nodes SBK needs to model a distribution on n bits
sbk_num_nodes(n) = 2^n * (n - 1) + 3
function generate_code_sbk(p::Vector{Float64})
@dice begin
function helper(i)
if i == length(p)
DistInt(i - 1)
else
ifelse(flip(p[i] / sum(p[i:length(p)])),
DistInt(dicecontext(), i - 1),
helper(i + 1))
end
end
helper(1)
end
end
function test_sbk_num_nodes(p::Vector{Float64})
code = generate_code_sbk(p)
bdd = compile(code)
@assert infer(code, :bdd) ≈ p # Verify correctness
bdd_nodes = num_nodes(bdd)
num_bits = round(Int, log2(length(p)))
@assert bdd_nodes == sbk_num_nodes(num_bits) # Verify num bits
println("SBK for a $(num_bits)-bit distribution: $(bdd_nodes) bdd nodes, $(num_flips(bdd)) flips")
end
dist_2 = [0.5, 0.5]
dist_4 = [0.1, 0.2, 0.3, 0.4]
dist_8 = [0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.44]
dist_16 = [0.005, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.001, 0.11, 0.12, 0.13, 0.14, 0.022, 0.032]
for dist in [dist_2, dist_4, dist_8, dist_16]
test_sbk_num_nodes(dist)
end
# To investigate: the below discrete() fuction, which only meaningfully changes
# the order of flip introduction, takes fewer nodes (but still O(n2^n)).
#== vvvvvvvvvvvvvvv diff
sbk_num_nodes(n) = 2^n * (n - 1) + 3 - 2^(n - 1) + 1
function generate_code_sbk(p::Vector{Float64})
@dice begin
function discrete(p::Vector{Float64})
mb = length(p)
v = Vector(undef, mb)
sum = 1
for i=1:mb
v[i] = p[i]/sum
sum = sum - p[i]
end
ans = DistInt(dicecontext(), mb-1)
for i=mb-1:-1:1
ans = if flip(v[i]) DistInt(dicecontext(), i-1) else ans end
end
return ans
end
discrete(p)
end
end
==#
|
{"hexsha": "adcd0f32d4b209882bb518c1d8f4a822b3fc61fe", "size": 2057, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/sbk_num_nodes.jl", "max_stars_repo_name": "rtjoa/Dice.jl", "max_stars_repo_head_hexsha": "839b906edbe6a1b51c723211533b3145700406b6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/sbk_num_nodes.jl", "max_issues_repo_name": "rtjoa/Dice.jl", "max_issues_repo_head_hexsha": "839b906edbe6a1b51c723211533b3145700406b6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/sbk_num_nodes.jl", "max_forks_repo_name": "rtjoa/Dice.jl", "max_forks_repo_head_hexsha": "839b906edbe6a1b51c723211533b3145700406b6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9718309859, "max_line_length": 110, "alphanum_fraction": 0.5464268352, "num_tokens": 692}
|
from functools import cached_property
from sympy import symbols
from engine.functions import OrbitalFrame
class SymbolicOrbit:
def __init__(self, primary_body, secondary_body):
self.primary_body = primary_body
self.secondary_body = secondary_body
self.eccentricity = symbols(f"e_{secondary_body.name}")
self.semimajor_axis = symbols(f"a_{secondary_body.name}")
self.true_anomaly_at_epoch = symbols(f"ν_{secondary_body.name}")
self.inclination = symbols(f"i_{secondary_body.name}")
self.longitude_ascending_node = symbols(f"Ω_{secondary_body.name}")
self.argument_of_periapsis = symbols(f"ω_{secondary_body.name}")
@cached_property
def orbital_frame(self):
return OrbitalFrame(
f"O_{self.secondary_body.name}",
self.primary_body.equatorial_frame,
self.longitude_ascending_node,
self.inclination,
self.argument_of_periapsis,
)
|
{"hexsha": "a466d509d59f9b39d6e218e6bb3e2594cf73d257", "size": 983, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/engine/symbolic_orbit.py", "max_stars_repo_name": "RomainEndelin/keplerian_orbits", "max_stars_repo_head_hexsha": "3380e5d9a1006e73580cf3a86cb10845196c405d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/engine/symbolic_orbit.py", "max_issues_repo_name": "RomainEndelin/keplerian_orbits", "max_issues_repo_head_hexsha": "3380e5d9a1006e73580cf3a86cb10845196c405d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/engine/symbolic_orbit.py", "max_forks_repo_name": "RomainEndelin/keplerian_orbits", "max_forks_repo_head_hexsha": "3380e5d9a1006e73580cf3a86cb10845196c405d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8965517241, "max_line_length": 75, "alphanum_fraction": 0.6968463886, "include": true, "reason": "from sympy", "num_tokens": 212}
|
import sys
import os
import platform
import logging
import shutil
import time
import glob
import numpy as np
from Bio import SeqIO, pairwise2
from Bio.PDB import *
sys.path.append('../../')
from config import *
sys.path.append(scripts_dir)
from my_log import *
from classes import *
def prepare_executables():
if output_env == 'local':
os.chdir(dssr_dir)
os.system('chmod +x x3dna-dssr')
os.chdir(root_dir)
scanx_aln_executable_fn = 'align_ga'
if platform.system() == 'Darwin':
scanx_aln_executable_fn = 'align_ga.mac'
os.chdir(os.path.join(motifscanx_dir, 'bin'))
os.system('chmod +x ' + scanx_aln_executable_fn)
os.chdir(root_dir)
def wait_for_certain_time_according_to_wait_factor(n):
wait_time = n * wait_factor
wait_time = min(wait_time, max_wait_time)
time.sleep(wait_time)
# logger.info('waiting')
def is_all_files_generated(file_list):
for file in file_list:
if not os.path.isfile(file):
return False
return True
def wait_for_certain_files_to_be_generated(file_list, early_terminate=True):
cur_wait_time = 0.0
phase_wait = 10 * wait_factor
while not is_all_files_generated(file_list):
if early_terminate == True and cur_wait_time > wait_time:
return False
elif early_terminate == False and cur_wait_time > max_wait_time:
return False
logger.info('Waiting for files to be generated.')
time.sleep(phase_wait)
cur_wait_time += phase_wait
# logger.info('waiting')
return True
def create_directory(dir_to_create):
if not os.path.exists(dir_to_create):
os.makedirs(dir_to_create)
def delete_directory(dir_to_delete):
if os.path.exists(dir_to_delete):
shutil.rmtree(dir_to_delete)
def remove_all_from_dir(mypath, exception_dirs=[]):
for root, dirs, files in os.walk(mypath):
# for file in files: #files in all dirs
# if file not in exception_files:
# os.remove(os.path.join(root, file))
for dir in dirs:
if dir not in exception_dirs:
shutil.rmtree(os.path.join(root, dir))
def zscore(x, m, std):
if std == 0:
return 0
return (x-m)/std
def isClose(a, b, precision):
if abs(a-b) <= precision:
return True
return False
def csv_to_list(lines):
list_of_lists = []
for line in lines:
pieces = line.strip().split(',')
list_of_lists.append(list(map(lambda x: x.strip(), pieces)))
return list_of_lists
def base_abbreviation(fn):
"""get the abbreviation of the modified residues from the 3DNA baselist"""
ret = {}
# fn = os.path.join(os.path.dirname(os.path.abspath( __file__ )), fn)
fp = open(fn)
for line in fp.readlines():
line = line.rstrip()
if line.startswith("#") or len(line) == 0:
continue
else:
three_letter = line[:3].strip()
one_letter = line[8]
if one_letter == "T" or one_letter == "t":
one_letter = "U"
ret[three_letter] = one_letter.upper()
fp.close()
return ret
def amino_acid_collection(fn):
"""get the list of the amino acids from the file"""
ret = {}
# fn = os.path.join(os.path.dirname(os.path.abspath( __file__ )), fn)
fp = open(fn)
for line in fp.readlines():
line = line.rstrip()
if line.startswith("#") or len(line) == 0:
continue
else:
three_letter = line[:3].strip().upper()
one_letter = line[4]
ret[three_letter] = one_letter.upper()
fp.close()
return ret
def get_aln_mapping(aln_seq1, aln_seq2):
"""
:return ret1: dict[i]->j i in seq1; j in seq2
:return ret2: dict[j]->i j in seq2; i in seq1
"""
if len(aln_seq1) != len(aln_seq2):
return None
i = j = 0
ret1 = {}
ret2 = {}
for k in range(len(aln_seq1)):
if aln_seq1[k] == "-" and aln_seq2[k] != "-":
j += 1
elif aln_seq2[k] == "-" and aln_seq1[k] != "-":
i += 1
elif aln_seq1[k] != "-" and aln_seq2[k] != "-":
ret1[i] = j
ret2[j] = i
i += 1
j += 1
return ret1, ret2
# def download_single_pdbx_or_fasta_file(fname, file_ext):
# if file_ext == 'cif':
# urlretrieve(pdbx_url + fname, os.path.join(pdbx_dir, fname))
# fp = open(os.path.join(pdbx_dir, fname))
# lines = fp.readlines()
# fp.close()
# if lines[0].startswith('data_' + fname.strip().split('.')[0]):
# status = True
# elif file_ext == 'fasta':
# urlretrieve(fasta_url + fname.strip().split('.')[0], os.path.join(fasta_dir, fname))
# fp = open(os.path.join(fasta_dir, fname))
# lines = fp.readlines()
# fp.close()
# if lines[0].startswith('>' + fname.strip().split('.')[0]):
# status = True
# else:
# logger.error('Invalid file_type provided to download.')
# if status == False:
# logger.error(fname + ' download unsuccessful.')
def getDSSRseqnum(input, with_res_name=False):
seqnum = ''
icode = ''
resname = ''
if '/' in input:
temp = input.strip().split('/')
resname = temp[0]
seqnum = temp[1]
elif len(input) > 2 and input[:2] in ['DA', 'DT', 'DU', 'DG', 'DC']:
resname = input[:2]
seqnum = input[2:]
elif len(input) > 3 and input[2].isalpha():
resname = input[:3]
seqnum = input[3:]
else:
resname = input[0]
seqnum = input[1:]
if '^' in seqnum:
temp = seqnum.strip().split('^')
seqnum = temp[0]
icode = temp[1]
# fp = open("modified_residues.dat", "rb")
# modified_residues = pickle.load(fp)
# fp.close()
# if pdb_id in m.keys() and m[pdb_id]:
# print "got it"
if with_res_name:
return resname, seqnum, icode
return seqnum, icode
def get_loop_type(loop):
_, regions = loop.strip().split(':')
regions = regions.strip().split('_')
region_cnt = len(regions)
if region_cnt == 1:
return 'HL'
elif region_cnt == 2:
return 'IL'
elif region_cnt > 2:
return 'ML'
logger.error('Invalid loop')
return ''
def strToNode(loop_str):
(chain, region) = loop_str.split(':')
segments = region.split('_')
node = Node(chain, segments)
return node
def get_local_alignment_index(loop_index, alignment_region):
if len(alignment_region.strip()) == 0:
return loop_index
parts = loop_index.split(':')[1].strip().split('_')
indices = []
for part in parts:
s, e = part.split('-')
for i in range(int(s), int(e)+1):
indices.append(i)
converted_index = loop_index.split(':')[0] + ':'
parts = alignment_region.split(',')
for part in parts:
s,e = part.split('-')
if converted_index[-1] != ':':
converted_index += '_'
converted_index += str(indices[int(s)]) + '-' + str(indices[int(e)])
return converted_index
def parse_scanx_alignment_block(lines, line_index):
r1 = lines[line_index].split('::')[1].split(' and ')[0].strip().strip(':')
r2 = lines[line_index].split('::')[1].split(' and ')[1].strip().strip(':')
score_text = lines[line_index+1].split(':')[1].strip()
if score_text == '':
# score = -50.
logger.error('ERROR: No alignment score found for: ' + r1 + ' and ' + r2)
sys.exit()
else:
score = float(score_text)
cr1 = get_local_alignment_index(r1, lines[line_index+3].split(':')[1].strip())
cr2 = get_local_alignment_index(r2, lines[line_index+4].split(':')[1].strip())
aln1 = lines[line_index+6].strip()
aln2 = lines[line_index+7].strip()
return r1, r2, cr1, cr2, aln1, aln2, score
def parse_scanx_alignment_block_raw(lines, line_index):
r1 = lines[line_index].split('::')[1].split(' and ')[0].strip().strip(':')
r2 = lines[line_index].split('::')[1].split(' and ')[1].strip().strip(':')
score_text = lines[line_index+1].split(':')[1].strip()
if score_text == '':
# score = -50.
logger.error('ERROR: No alignment score found for: ' + r1 + ' and ' + r2)
sys.exit()
else:
score = float(score_text)
cr1 = lines[line_index+3].split(':')[1].strip()
cr2 = lines[line_index+4].split(':')[1].strip()
aln1 = lines[line_index+6].strip()
aln2 = lines[line_index+7].strip()
matching_bp_info = []
matching_stk_info = []
i = line_index+10
while not lines[i].startswith('# Matched base-stacking interactions: '):
matching_bp_info.append(list(map(lambda x: x.strip(), lines[i].strip().split('MATCHES'))))
i += 1
i += 1
while not lines[i].startswith('Total Elapsed Time :'):
matching_stk_info.append(list(map(lambda x: x.strip(), lines[i].strip().split('MATCHES'))))
i += 1
is_copied = False
if lines[i].strip().endswith('(copied)'):
is_copied = True
elapsed_time = lines[i].strip().split(':')[1].strip().split(' ')[0].strip()
return r1, r2, cr1, cr2, aln1, aln2, score, matching_bp_info, matching_stk_info, elapsed_time, is_copied, i
def get_loops_in_cluster(clusters):
loops_in_cluster = []
for c_id in clusters:
for i in range(len(clusters[c_id])):
node1 = strToNode(clusters[c_id][i])
if node1 not in loops_in_cluster:
loops_in_cluster.append(node1)
return loops_in_cluster
def find_nodes_in_cluster(node, cluster_alignment_data):
cid_nodelist_pair = []
for c_id in cluster_alignment_data:
if node in cluster_alignment_data[c_id]:
cid_nodelist_pair.append((c_id, cluster_alignment_data[c_id][node]))
return cid_nodelist_pair
def get_backbone_and_sugar_atoms():
backbone_atoms = ["C3'", "C4'", "C5'", "O3'", "O5'", "P"]
sugar_atoms = ["C1'", "C2'", "C3'", "C4'", "O4'"]
# base_atoms = ["C1'", "C2", "C4", "C5", "C6", "C8", "N1", "N3", "N7", "N9"]
return backbone_atoms, sugar_atoms
def get_z_scores(a_list, is_median=False):
list_with_zscore = []
mean = get_mean(a_list, is_median)
std = np.std(a_list)
for value in a_list:
z_value = zscore(float(value), float(mean), float(std))
list_with_zscore.append((value, z_value))
return list_with_zscore
def get_fasta_loop_length(loop_fasta):
pdb_chain, regions = loop_fasta.strip().split(':')
return get_loop_length(regions.strip().split('_'))
def get_loop_length(segments):
# will not work properly for pdb index
loop_length = 0
for segment in segments:
pcs = segment.split("-")
if pcs[0][-1].isalpha():
s = int(pcs[0].strip().split(".")[0].strip())
else:
s = int(pcs[0].strip())
if pcs[1][-1].isalpha():
e = int(pcs[1].strip().split(".")[0].strip())
else:
e = int(pcs[1].strip())
# s = int(pieces[0])
# e = int(pieces[1])
loop_length += (e-s+1)
return loop_length
def get_zscore_rank(zscore):
if zscore > 3.0:
return 1
elif zscore > 1.8:
return 2
elif zscore > 1.0:
return 3
elif zscore > 0.5:
return 4
elif zscore > 0.0:
return 5
else:
return 100
def get_rmsd_rank(rmsd, align_length, is_length_adjusted_score):
if is_length_adjusted_score:
rmsd = rmsd * math.sqrt(align_length)
if rmsd < 0.5:
return 1
elif rmsd < 1.0:
return 2
elif rmsd < 2.0:
return 3
elif rmsd < 4.0:
return 4
elif rmsd < 19.0:
return 5
else:
return 100
def get_mean(a_list, is_median=False):
if is_median:
return round(np.median(a_list), 1)
return round(np.mean(a_list), 1)
def print_a_dict_sorted(a_dict, fp=None, separator=": "):
if fp == None:
for key in sorted(a_dict):
print(str(key) + separator + str(a_dict[key]))
print('')
else:
for key in sorted(a_dict):
fp.write(str(key) + separator + str(a_dict[key]) + "\n")
fp.write("\n")
def print_a_list(a_list, fp=None):
for item in a_list:
if fp == None:
print(item)
else:
fp.write(str(item) + "\n")
if fp == None:
print('')
else:
fp.write("\n")
def get_motif_family_short_code(family_name):
if family_name.lower() in known_motif_shortcode:
return known_motif_shortcode[family_name.lower()]
else:
return family_name
def rotate(l, x):
return l[-x:] + l[:-x]
def get_all_loop_combination(loop):
loop_combinations = []
pdb_chain, regions = loop.strip().split(':')
regions = regions.strip().split('_')
loop = []
for region in regions:
s, e = region.strip().split('-')
loop.append((s, e))
for i in range(len(loop)):
loop_i = rotate(loop, i)
loop_combinations.append(pdb_chain + ':' + '_'.join(list(map(lambda x: '-'.join(x), loop_i))))
# print(loop_combinations)
return loop_combinations
def get_separated_index_icode(index):
ind = icode = None
if index[-1].isalpha():
ind = int(index[:-1])
icode = index[-1]
else:
ind = int(index)
icode = ' '
return ind, icode
def convert_a_cluster_from_FASTA_to_PDB(families):
families_pdb = {}
for family_id in families:
families_pdb[family_id] = []
loops = families[family_id]
for loop in loops:
loop_pdb = convert_a_loop_from_FASTA_to_PDB(loop)
families_pdb[family_id].append(loop_pdb)
return families_pdb
def convert_a_cluster_from_PDB_to_FASTA(families):
families_pdb = {}
for family_id in families:
families_pdb[family_id] = []
loops = families[family_id]
for loop in loops:
loop_pdb = convert_a_loop_from_PDB_to_FASTA(loop)
families_pdb[family_id].append(loop_pdb)
return families_pdb
def convert_a_loop_from_PDB_to_FASTA(loop):
pdb_chain, segments = loop.strip().split(':')
pdb_id, chain_id = pdb_chain.strip().split('_')
mapping_file_name = pdb_chain + '.rmsx.nch'
converter = PDB_FASTA_Index_Converter(pdb_fasta_mapping_dir, mapping_file_name)
segments = segments.strip().split('_')
converted_segments = []
for segment in segments:
a, b = segment.strip().split('-')
icode_a = ''
icode_b = ''
if '.' in a:
a, icode_a = a.strip().split('.')
if '.' in b:
b, icode_b = b.strip().split('.')
a_pdb = Chainindex(chain_id, int(a), icode_a)
b_pdb = Chainindex(chain_id, int(b), icode_b)
a_fasta = converter.convert_PDBindx_To_FASTAindx(a_pdb)
b_fasta = converter.convert_PDBindx_To_FASTAindx(b_pdb)
converted_segments.append(str(a_fasta) + '-' + str(b_fasta))
converted_segments = '_'.join(converted_segments)
converted_loop = pdb_chain + ':' + converted_segments
return converted_loop
def convert_a_loop_from_FASTA_to_PDB(loop):
pdb_chain, segments = loop.strip().split(':')
pdb_id, chain_id = pdb_chain.strip().split('_')
mapping_file_name = pdb_chain + '.rmsx.nch'
converter = PDB_FASTA_Index_Converter(pdb_fasta_mapping_dir, mapping_file_name)
segments = segments.strip().split('_')
converted_segments = []
for segment in segments:
a, b = segment.strip().split('-')
a_fasta = a
b_fasta = b
a_pdb = converter.convert_FASTAindx_To_PDBindx(a_fasta)
b_pdb = converter.convert_FASTAindx_To_PDBindx(b_fasta)
if len(a_pdb.icode) == 0:
a_pdb = str(a_pdb.seqnum)
else:
a_pdb = str(a_pdb.seqnum) + '.' + str(a_pdb.icode)
if len(b_pdb.icode) == 0:
b_pdb = str(b_pdb.seqnum)
else:
b_pdb = str(b_pdb.seqnum) + '.' + str(b_pdb.icode)
converted_segments.append(a_pdb + '-' + b_pdb)
converted_segments = '_'.join(converted_segments)
converted_loop = pdb_chain + ':' + converted_segments
return converted_loop
def get_loop_cluster_source(loop):
if loop != None:
pdb_chain = loop.strip().split(":")[0]
if show_cluster_source == True:
loop = strToNode(loop)
if loop in loop_cluster_source:
return loop_cluster_source[loop]
return "N/A"
def assign_cluster_source(filename, source_str):
global loop_cluster_source
fp = open(filename)
for line in fp.readlines():
pieces = line.strip().split(",")
for piece in pieces:
if ":" in piece:
loop_cluster_source[strToNode(piece.strip())] = source_str
def cleanup_output_directories(removable_text_file_list, superimposition_output_dir, representative_dir, progressive_dir, subfamilies_dir, superimposition_details_dir, pymol_session_dir, draw_figures):
# remove_all_from_dir(superimposition_output_dir, ['best_alignment_graph', 'subfamilywise_bp_ann', 'pymol_sessions', 'representatives', 'subfamily'])
# remove_all_from_dir(os.path.join(superimposition_output_dir, 'initial_loop_images'))
# remove_all_from_dir(os.path.join(superimposition_output_dir, 'rotated_loop_images'))
# print(os.walk(superimposition_output_dir))
# text_filelist = glob.glob(os.path.join(superimposition_output_dir, '*.txt'))
# for file in text_filelist:
for file in removable_text_file_list:
# if 'representatives' not in file:
if os.path.isfile(file):
os.remove(file)
# time.sleep(5)
wait_for_certain_time_according_to_wait_factor(1)
# remove_all_from_dir(superimposition_output_dir, ['subfamilywise_bp_ann', os.path.basename(superimposition_details_dir), os.path.basename(pymol_session_dir), os.path.basename(representative_dir), os.path.basename(progressive_dir), os.path.basename(subfamilies_dir)])
delete_directory(os.path.join(superimposition_output_dir, 'best_alignment_graph'))
delete_directory(os.path.join(superimposition_output_dir, 'initial_loop_images'))
delete_directory(os.path.join(superimposition_output_dir, 'rotated_loop_images'))
delete_directory(os.path.join(superimposition_output_dir, 'subfamily'))
delete_directory(temp_dir)
# os.remove(file)
# for root, dirs, files in os.walk(superimposition_output_dir):
# for file in files:
# if file.endswith('.txt') and 'representatives' not in file:
# os.remove(os.path.join(root, file))
def create_required_directories(partial_pdbx_dir, alignment_dir, superimposition_output_dir, subfamily_details_dir, summary_dir, superimposition_details_dir, representative_dir, pymol_session_dir, pickles_dir, set_view_manually):
# create_directory(data_dir)
# create_directory(views_dir)
create_directory(pdbx_dir)
create_directory(partial_pdbx_dir)
create_directory(fasta_dir)
create_directory(loop_dir)
create_directory(pdb_fasta_mapping_dir)
create_directory(alignment_dir)
create_directory(annotation_dir)
create_directory(pickles_dir)
create_directory(subfamily_details_dir)
if set_view_manually == False:
create_directory(superimposition_output_dir)
create_directory(summary_dir)
create_directory(superimposition_details_dir)
create_directory(representative_dir)
if save_pymol_session == True:
create_directory(pymol_session_dir)
# if len(loop_type) > 0 or use_pickle_file == True:
# create_directory(os.path.join(data_dir, 'pickles', loop_type + '_Pickles'))
def get_string_equivalent_index(current_cumulative_index):
id_str = ''
current_cumulative_index += 1
while current_cumulative_index:
current_cumulative_index, mod_val = divmod(current_cumulative_index - 1, 26)
id_str = chr(ord('a') + mod_val) + id_str
return id_str
def get_loop_region_identifier_line(aligned_seq, regions, ext_len_list, delim):
line = ''
ch_ind_list = []
for i in range(len(aligned_seq)):
if aligned_seq[i] != '-':
ch_ind_list.append(i)
loop_indices = reduce(lambda y, z: y+z, map(lambda x: list(range(x[0], x[1]+1)), regions))
# loop_indices_extended = reduce(lambda x, y: range(x[0]-extension_length, x[1]+1+extension_length)+range(y[0]-extension_length, y[1]+1+extension_length), regions)
loop_indices_extended = []
prev_ext_e = -1
for i, (s, e) in enumerate(regions):
ext_s = s - ext_len_list[i][0]
ext_e = e + ext_len_list[i][1]
if prev_ext_e != -1 and ext_s <= prev_ext_e:
ext_s = prev_ext_e + 1
loop_indices_extended += range(ext_s, ext_e+1)
prev_ext_e = ext_e
loop_indices = sorted(list(set(loop_indices)))
loop_indices_extended = sorted(list(set(loop_indices_extended)))
last_ind = 0
in_loop_indices = False
last_i = -1
loop_character_indices = []
for i, index in enumerate(loop_indices_extended):
if in_loop_indices == False and index in loop_indices:
in_loop_indices = True
line += ' ' * (ch_ind_list[i] - last_ind)
last_ind = ch_ind_list[i]
last_i = i
if in_loop_indices == True and index not in loop_indices:
in_loop_indices = False
line += delim * (ch_ind_list[i-1] - last_ind + 1)
last_ind = ch_ind_list[i-1] + 1
loop_character_indices += ch_ind_list[last_i:i]
last_i = i
if in_loop_indices == True:
line += delim * (len(aligned_seq) - 1 - last_ind + 1)
loop_character_indices += ch_ind_list[last_i:len(aligned_seq)]
# print(loop_character_indices)
return line, loop_character_indices
def update_homolog_data(homolog_set_list, l1, l2):
set_found = False
set_ind_for_l1 = -1
set_ind_for_l2 = -1
for i, homolog_set in enumerate(homolog_set_list):
if l1 in homolog_set:
set_ind_for_l1 = i
if l2 in homolog_set:
set_ind_for_l2 = i
if set_ind_for_l1 > -1 and set_ind_for_l2 > -1:
break
if set_ind_for_l1 > -1 and set_ind_for_l2 > -1:
if set_ind_for_l1 != set_ind_for_l2:
set_for_l1 = homolog_set_list[set_ind_for_l1]
set_for_l2 = homolog_set_list[set_ind_for_l2]
homolog_set_list = [homolog_set for i, homolog_set in enumerate(homolog_set_list) if i != set_ind_for_l1 and i != set_ind_for_l2]
homolog_set_list.append(set_for_l1 | set_for_l2)
elif set_ind_for_l1 == -1 and set_ind_for_l2 == -1:
new_homolog_set = set()
new_homolog_set.add(l1)
new_homolog_set.add(l2)
homolog_set_list.append(new_homolog_set)
else:
if set_ind_for_l1 == -1:
homolog_set_list[set_ind_for_l2].add(l1)
else:
homolog_set_list[set_ind_for_l1].add(l2)
def generate_sequence_alignment_for_all_pairs(family_id, loop_list, extension_length=50):
homolog_set_list = []
loop_node_list = map(lambda x: strToNode(x), loop_list)
pdb_list = map(lambda x: x.strip().split(':')[0].strip().split('_')[0], loop_list)
pdb_list = sorted(list(set(pdb_list)))
loop_node_list_str = sorted(map(lambda x: str(x), list(set(loop_node_list))))
ref_seq_dict = {}
for pdb_id in pdb_list:
fasta_fn = os.path.join(fasta_dir, pdb_id + '.fasta')
if os.path.isfile(fasta_fn):
ref_seq_dict[pdb_id] = {}
for record in SeqIO.parse(fasta_fn, 'fasta'):
# ref_seq_dict[pdb_id][record.id.strip().split('|')[0].strip().split(':')[1]] = str(record.seq)
chain_ids = record.description.strip().split('|')[1].strip().split(' ')[1].strip().split(',')
for chain_id in chain_ids:
ref_seq_dict[chain_id] = str(record.seq)
for i in range(len(loop_node_list_str)):
l1 = loop_node_list_str[i]
pdb_chain1, regions1 = l1.strip().split(':')
pdb_id1, chain_id1 = pdb_chain1.strip().split('_')
fasta_seq1 = ref_seq_dict[pdb_id1][chain_id1]
regions1 = regions1.strip().split('_')
regions1 = sorted(map(lambda x: tuple(map(lambda y: int(y), x.strip().split('-'))), regions1))
ext_len_list1 = []
for ii, (s, e) in enumerate(regions1):
left_range = 0
if ii > 0:
left_range = s - (s - (regions1[ii-1][1] + 1) + 1) / 2
right_range = len(fasta_seq1) - 1
if ii < len(regions1) - 1:
right_range = e + ((regions1[ii+1][0] - 1) - e + 1) / 2
left_ext_length = min(extension_length, s - left_range)
right_ext_length = min(extension_length, right_range - e)
ext_len_list1.append((left_ext_length, right_ext_length))
for j in range(i+1, len(loop_node_list_str)):
l2 = loop_node_list_str[j]
pdb_chain2, regions2 = l2.strip().split(':')
pdb_id2, chain_id2 = pdb_chain2.strip().split('_')
fasta_seq2 = ref_seq_dict[pdb_id2][chain_id2]
regions2 = regions2.strip().split('_')
regions2 = sorted(map(lambda x: tuple(map(lambda y: int(y), x.strip().split('-'))), regions2))
ext_len_list2 = []
for jj, (s, e) in enumerate(regions2):
left_range = 0
if jj > 0:
left_range = s - (s - (regions2[jj-1][1] + 1) + 1) / 2
right_range = len(fasta_seq2) - 1
if jj < len(regions2) - 1:
right_range = e + ((regions2[jj+1][0] - 1) - e + 1) / 2
left_ext_length = min(extension_length, s - left_range)
right_ext_length = min(extension_length, right_range - e)
ext_len_list2.append((left_ext_length, right_ext_length))
if len(ext_len_list1) != len(ext_len_list2):
print('Comparing different type of loops is not feasible. Exitting.')
sys.exit()
ext_len_list = []
for i in range(len(ext_len_list1)):
left1, right1 = ext_len_list1[i]
left2, right2 = ext_len_list2[i]
left = min(left1, left2)
right = min(right1, right2)
ext_len_list.append((left, right))
prev_e1 = -1
prev_e2 = -1
seq1 = ''
seq2 = ''
for i in range(len(ext_len_list)):
left, right = ext_len_list[i]
s1 = regions1[i][0] - left
e1 = regions1[i][1] + right
s2 = regions2[i][0] - left
e2 = regions2[i][1] + right
if prev_e1 != -1 and s1 <= prev_e1:
s1 = prev_e1 + 1
if prev_e2 != -1 and s2 <= prev_e2:
s2 = prev_e2 + 1
seq1 += fasta_seq1[s1 : e1 + 1]
seq2 += fasta_seq2[s2 : e2 + 1]
prev_e1 = e1
prev_e2 = e2
# aln = pairwise2.align.globalms(seq1, seq2, 5, -3, -10, -1)
aln = pairwise2.align.globalxx(seq1, seq2)
# (aln_seq1, aln_seq2, _, _, _) = aln[0]
aln = sorted(aln, key=lambda x: x[4])
lines = pairwise2.format_alignment(*aln[0]).strip().split('\n')
identifier_line1, loop1_character_indices = get_loop_region_identifier_line(lines[0], regions1, ext_len_list, 'v')
identifier_line2, loop2_character_indices = get_loop_region_identifier_line(lines[2], regions2, ext_len_list, '^')
# new_lines = identifier_line1 + '\n' + '\n'.join(lines[:3]) + '\n' + identifier_line2 + '\n' + lines[3] + '\n'
# if len(loop_list) <= 50:
# fp_output = open(output_fname1, 'a')
# fp_output.write(str(family_id) + '\n')
# fp_output.write(l1 + ' and ' + l2 + '\n')
# fp_output.write('l1 seq length: ' + str(len(seq1)) + '\nl2 seq length: ' + str(len(seq2)) + '\n')
# # fp_output.write(pairwise2.format_alignment(*aln[0]))
# fp_output.write(new_lines)
# fp_output.write('\n')
# fp_output.close()
score = aln[0][-3]
min_seq_len = min(len(seq1), len(seq2))
percentage1 = ((min_seq_len - score) * 100.0) / min_seq_len
loop1_character_indices = set(loop1_character_indices)
loop2_character_indices = set(loop2_character_indices)
common_character_indices = loop1_character_indices & loop2_character_indices
min_character_indices_len = min(len(loop1_character_indices), len(loop2_character_indices))
percentage2 = (len(common_character_indices) * 100.0) / min_character_indices_len
percentage2 = 100.0 - percentage2
if percentage2 == 0:
# fp_output = open(output_fname2, 'a')
# fp_output.write(str(family_id) + '\n')
# fp_output.write(l1 + ' and ' + l2 + '\n')
# fp_output.write('l1 seq length: ' + str(len(seq1)) + '\nl2 seq length: ' + str(len(seq2)) + '\n')
# # fp_output.write(pairwise2.format_alignment(*aln[0]))
# fp_output.write(new_lines)
# fp_output.write('\n\n')
# fp_output.close()
if percentage1 <= 5:
update_homolog_data(homolog_set_list, l1, l2)
# elif percentage2 <= percentage_threshold_for_nearly_homologs:
# fp_output = open(output_fname3, 'a')
# fp_output.write(str(family_id) + '\n')
# fp_output.write(l1 + ' and ' + l2 + '\n')
# fp_output.write('l1 seq length: ' + str(len(seq1)) + '\nl2 seq length: ' + str(len(seq2)) + '\n')
# # fp_output.write(pairwise2.format_alignment(*aln[0]))
# fp_output.write(new_lines)
# fp_output.write('\n\n')
# fp_output.close()
# if filter_nearly_homologs == True:
# update_homolog_data(homolog_set_list, l1, l2)
return homolog_set_list
def get_homolog_filtered_families(families):
logger.info('Filtering out the subfamilies with all homologs ...')
new_families = {}
for family_id in sorted(families):
new_family_id = family_id.strip().split('-Sub')[0].strip()
if new_family_id not in new_families:
new_families[new_family_id] = []
loop_list = families[family_id]
print('Checking ' + family_id + '(' + str(len(loop_list)) + ' loops)')
# full sequence match in smaller loop region and at least 95% alignment score for extended sequence
homolog_set_list = generate_sequence_alignment_for_all_pairs(family_id, loop_list)
if len(homolog_set_list) == 1:
logger.info('Filtering ' + family_id)
new_families[new_family_id].append(homolog_set_list[0].pop())
else:
new_families[new_family_id] += families[family_id]
# print(families)
# print(new_families)
# sys.exit()
return new_families
|
{"hexsha": "4357e470ccbfd02f100f37a07e9798f61a22ab9d", "size": 31492, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/scripts/utils.py", "max_stars_repo_name": "ucfcbb/RNAMotifContrast", "max_stars_repo_head_hexsha": "a5e643a760a9f2f2c7fab76f65617e4f1f66eeb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-14T15:13:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T08:54:27.000Z", "max_issues_repo_path": "src/scripts/utils.py", "max_issues_repo_name": "ucfcbb/RNAMotifContrast", "max_issues_repo_head_hexsha": "a5e643a760a9f2f2c7fab76f65617e4f1f66eeb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/scripts/utils.py", "max_forks_repo_name": "ucfcbb/RNAMotifContrast", "max_forks_repo_head_hexsha": "a5e643a760a9f2f2c7fab76f65617e4f1f66eeb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1865921788, "max_line_length": 271, "alphanum_fraction": 0.6037723866, "include": true, "reason": "import numpy", "num_tokens": 8205}
|
# standard lib
import copy
import random
import itertools
from collections.abc import Sequence
# 3rd-parth lib
import numpy as np
# local lib
from .base_model_sampler import MODEL_SAMPLERS, BaseModelSampler
@MODEL_SAMPLERS.register_module('range')
class RangeModelSampler(BaseModelSampler):
""" Range model sampler.
Args:
start (list): start values of items.
end (list): end values of items
step (list): step of items.
ascending (bool): the latter elements should always be larger if True
"""
def __init__(self, key, start, end, step, ascending=False, depth_uniform=False, mode='sample'):
super(RangeModelSampler, self).__init__(mode)
if 'depth' not in key:
assert depth_uniform is False, "depth_uniform can't be used here"
one_dim = True
# infer ndim
if isinstance(start, Sequence):
one_dim = False
assert isinstance(end, Sequence)
assert isinstance(step, Sequence)
assert len(start) == len(end) == len(step), f'`start, end, step`' \
f'should share the same length, got {len(start)}, {len(end)}, {len(step)}'
self.ndim = 1 if one_dim else 2
self.key = key
self.start = start
self.end = end
self.step = step
self.ascending = ascending
self.depth_uniform = depth_uniform
if self.depth_uniform:
self.depth_cands = self.enumeration()
if self.ndim == 2:
if self.ascending:
n = 0
candidates = []
for start, end, step in zip(self.start, self.end, self.step):
candidates.append(list(range(start, end+1, step)))
candidates = itertools.product(*candidates)
for c in candidates:
valid = True
prev = 0
for v in c:
if v < prev:
valid = False
break
else:
prev = v
if valid:
n += 1
else:
n = 1
for start, end, step in zip(self.start, self.end, self.step):
n *= (end - start + 1) // step
self._traverse_length = n
else:
self._traverse_length = (self.end - self.start + 1) // self.step
def _sample_len(self):
return 1
def _traverse_len(self):
return self._traverse_length
def sample(self):
if self.depth_uniform:
return {self.key: random.choice(random.choice(self.depth_cands))}
if self.ndim == 1:
cands = list(range(self.start, self.end+1, self.step))
return {self.key: random.choice(cands)}
pivot = 0
values = []
for start, end, step in zip(self.start, self.end, self.step):
start = max(start, pivot)
cands = list(range(start, end+1, step))
v = random.choice(cands)
if self.ascending:
pivot = v
values.append(v)
return {self.key: values}
def traverse(self):
if self.ndim == 1:
cands = list(range(self.start, self.end+1, self.step))
for v in cands:
yield {self.key: v}
else:
candidates = []
for start, end, step in zip(self.start, self.end, self.step):
candidates.append(list(range(start, end+1, step)))
candidates = itertools.product(*candidates)
if self.ascending:
for values in candidates:
valid = True
prev = 0
for v in values:
if v < prev:
valid = False
break
else:
prev = v
if valid:
yield {self.key: values}
else:
continue
else:
for values in candidates:
yield {self.key: values}
def enumeration(self):
min_depth = np.sum(self.start)
max_depth = np.sum(self.end)
bin_num = (max_depth - min_depth) + 1
depth_cands = [[] for _ in range(bin_num)]
candidates = []
for start, end, step in zip(self.start, self.end, self.step):
candidates.append(list(range(start, end+1, step)))
candidates = itertools.product(*candidates)
for each_candidate in candidates:
temp_length = np.sum(each_candidate)
depth_cands[temp_length-min_depth].append(each_candidate)
return depth_cands
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += f'key={self.key}, '
format_string += f'start={self.start}, '
format_string += f'end={self.end}, '
format_string += f'step={self.step}, '
format_string += f'mode={self._mode}, '
format_string += ')'
return format_string
@MODEL_SAMPLERS.register_module('candidate')
class CandidateModelSampler(BaseModelSampler):
""" Range model sampler.
Args:
key (str): name of model space dimension
candidates (list[list]): list of candidates
"""
def __init__(self, key, candidates, mode='sample'):
assert isinstance(candidates, Sequence)
super(CandidateModelSampler, self).__init__(mode)
self.key = key
# infer ndim
one_dim = True
for c in candidates:
if isinstance(c, Sequence):
one_dim = False
self.ndim = 1 if one_dim else 2
if self.ndim == 2:
rectified_cands = []
for c in candidates:
if not isinstance(c, Sequence):
rectified_cands.append([c])
else:
rectified_cands.append(c)
self.candidates = rectified_cands
self._traverse_length = np.prod([len(c) for c in rectified_cands])
else:
self.candidates = candidates
self._traverse_length = len(candidates)
def _sample_len(self):
return 1
def _traverse_len(self):
return self._traverse_length
def sample(self):
if self.ndim == 1:
return {self.key: random.choice(self.candidates)}
elif self.ndim == 2:
values = []
for cs in self.candidates:
values.append(random.choice(cs))
return {self.key: values}
else:
raise NotImplementedError
def traverse(self):
if self.ndim == 1:
for v in self.candidates:
yield {self.key: v}
elif self.ndim == 2:
comb = product(*candidates)
for v in comb:
yield {self.key: v}
else:
raise NotImplementedError
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += f'key={self.key}, '
format_string += f'candidates={self.candidates}, '
format_string += f'mode={self._mode}, '
format_string += ')'
return format_string
@MODEL_SAMPLERS.register_module('flops')
class FlopsModelSampler(BaseModelSampler):
""" Flops model sampler.
Args:
key (str): the json which record the input_size, architecture and flops
Examples:
dict(
type='flops',
key="/data/GAIA-seg/hubs/flops/flops.json",
input_size="3,512,1024",
bin_num=5
)
"""
def __init__(self, key, input_size, bin_num=5, mode='sample'):
super(FlopsModelSampler, self).__init__(mode)
self.key = key
self.input_size = input_size
if isinstance(self.input_size,int):
self.input_size = f"3,{self.input_size},{self.input_size}"
self.bin_num = bin_num
self.ndim = 1
self.candidates = self.get_candidates()
self._traverse_length = 0
for each_candidate in self.candidates:
self._traverse_length += len(each_candidate)
def _sample_len(self):
return 1
def _traverse_len(self):
return self._traverse_length
def sample(self):
return random.choice(random.choice(self.candidates))[1]
def traverse(self):
for each_candidates in self.candidates:
for candidate in each_candidates:
yield candidate[1]
def get_candidates(self):
flops_json_file = open(self.key,'r')
#pdb.set_trace()
candidates = []
for each_row in flops_json_file:
info_dict = eval(each_row.strip())
temp_input_shape = info_dict["data"]["input_shape"]
if isinstance(temp_input_shape, int):
temp_input_shape = f"3,{temp_input_shape},{temp_input_shape}"
if temp_input_shape != self.input_size:
continue
temp_candidate = []
temp_candidate.append(info_dict["overhead"]["flops"])
temp_dict = {}
for key,value in unfold_dict(info_dict).items():
if 'arch' in key:
temp_dict[key] = value
temp_candidate.append(temp_dict)
candidates.append(temp_candidate)
candidates = sorted(candidates, key=lambda x:x[0])
assert len(candidates) > 0, "This flops json file doesn't contain this input size"
n = int(math.ceil(len(candidates) / float(self.bin_num)))
candidates = [candidates[i:i + n] for i in range(0, len(candidates), n)]
return candidates
def __repr__(self):
format_string = self.__class__.__name__ + '('
format_string += f'key={self.key}, '
format_string += f'candidates={self.candidates}, '
format_string += f'mode={self._mode}, '
format_string += ')'
return format_string
|
{"hexsha": "1b90de3c5846d8cc0cffed2ebf8cca531e7212b5", "size": 10166, "ext": "py", "lang": "Python", "max_stars_repo_path": "gaiavision/model_space/model_samplers/random_model_sampler.py", "max_stars_repo_name": "NickChang97/GAIA-cv", "max_stars_repo_head_hexsha": "b691af89813ffa6a1d1e1719c6dd0ec4c253d2bf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gaiavision/model_space/model_samplers/random_model_sampler.py", "max_issues_repo_name": "NickChang97/GAIA-cv", "max_issues_repo_head_hexsha": "b691af89813ffa6a1d1e1719c6dd0ec4c253d2bf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gaiavision/model_space/model_samplers/random_model_sampler.py", "max_forks_repo_name": "NickChang97/GAIA-cv", "max_forks_repo_head_hexsha": "b691af89813ffa6a1d1e1719c6dd0ec4c253d2bf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5782312925, "max_line_length": 99, "alphanum_fraction": 0.5440684635, "include": true, "reason": "import numpy", "num_tokens": 2184}
|
// Copyright 2014 Quartz Technologies, Ltd. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Quartz Technologies Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef v8bridge_native_endpoint_hpp
#define v8bridge_native_endpoint_hpp
#include <v8bridge/detail/prefix.hpp>
#include <iostream>
#include <boost/type_traits/is_same.hpp>
#include <boost/mpl/vector.hpp>
#include <boost/mpl/at.hpp>
namespace v8
{
namespace bridge
{
using namespace v8;
/**
* A native-endpoint abstraction class
*/
class V8_DECL NativeEndpoint
{
public:
inline Isolate *getIsolationScope() { return this->m_isolationScope; }
protected:
NativeEndpoint(Isolate *isolationScope) : m_isolationScope(isolationScope) { }
Isolate *m_isolationScope;
template <class TSignature>
struct resolve_directly_passed_args : boost::is_same<
const v8::FunctionCallbackInfo<Value> &,
typename boost::mpl::at_c<TSignature, 1>::type > {};
};
}
}
#endif /* defined(__V8Bridge__NativeEndpoint__) */
|
{"hexsha": "942fbb10178826ae17f433cfa710cd4f76a571c4", "size": 2601, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "v8bridge/native/native_endpoint.hpp", "max_stars_repo_name": "QuartzTechnologies/v8bridge", "max_stars_repo_head_hexsha": "5e2f2d6b93adae25295b88c0c4e0eb4f93e22057", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 17.0, "max_stars_repo_stars_event_min_datetime": "2015-01-16T15:39:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T15:41:40.000Z", "max_issues_repo_path": "v8bridge/native/native_endpoint.hpp", "max_issues_repo_name": "QuartzTechnologies/v8bridge", "max_issues_repo_head_hexsha": "5e2f2d6b93adae25295b88c0c4e0eb4f93e22057", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-09-18T13:23:33.000Z", "max_issues_repo_issues_event_max_datetime": "2015-09-18T13:23:33.000Z", "max_forks_repo_path": "v8bridge/native/native_endpoint.hpp", "max_forks_repo_name": "QuartzTechnologies/v8bridge", "max_forks_repo_head_hexsha": "5e2f2d6b93adae25295b88c0c4e0eb4f93e22057", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2016-04-05T09:55:05.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-02T15:27:40.000Z", "avg_line_length": 41.2857142857, "max_line_length": 90, "alphanum_fraction": 0.708958093, "num_tokens": 535}
|
#!/usr/bin/env python
# coding: utf-8
'''
code for arid6 dataset
you should change
X = np.load('./X_17296_new.npy')
y = np.load('./y_17296_new.npy')
to your own dataset path
than shell 'python mgrForest_arid5.py'
'''
import numpy as np
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from itertools import product
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.model_selection import cross_val_predict as cvp
import random
from functools import reduce
class MultiGrainedScaner():
def __init__(self, base_estimator, params_list, sliding_ratio = 0.25, k_fold = 2):
if k_fold > 1: #use cv
self.params_list = params_list
else:#use oob
self.params_list = [params.update({'oob_score':True}) or params for params in params_list]
self.sliding_ratio = sliding_ratio
self.k_fold = k_fold
self.base_estimator = base_estimator
klass = self.base_estimator.__class__
self.estimators = [klass(**params) for params in self.params_list]
#generate scaned samples, X is not None, X[0] is no more than 3d
def _sample_slicer(self,X,y):
data_shape = X[0].shape
stride = 3
window_shape = [max(int(data_size * self.sliding_ratio),1) for data_size in data_shape]
scan_round_axis = [int((data_shape[i]-window_shape[i])/stride+1) for i in range(2)]
scan_round_total = reduce(lambda acc,x: acc*x,scan_round_axis)
if len(data_shape) == 1:
newX = np.array([x[beg * window_shape[0]:(beg+1)*window_shape[0]]
for x in X
for beg in range(scan_round_axis[0])])
elif len(data_shape) == 2: #ravel 拉伸
newX = np.array([x[beg0*stride:beg0*stride+window_shape[0],beg1*stride:beg1*stride+window_shape[1]].ravel()
for x in X
for beg0 in range(scan_round_axis[0])
for beg1 in range(scan_round_axis[1])])
elif len(data_shape) == 3:
newX = np.array([x[beg0 * stride:beg0 * stride + window_shape[0],beg1 * stride:beg1*stride + window_shape[1]].ravel()
for x in X
for beg0 in range(scan_round_axis[0])
for beg1 in range(scan_round_axis[1])])
newy = y.repeat(scan_round_total)
return newX,newy,scan_round_total
#generate new sample vectors
def scan_fit(self,X,y):
self.n_classes = len(np.unique(y))
newX,newy,scan_round_total = self._sample_slicer(X,y)
sample_vector_list = []
for estimator in self.estimators:
estimator.fit(newX, newy)
if self.k_fold > 1:# use cv
predict_ = cvp(estimator, newX, newy, cv=self.k_fold, n_jobs = -1)
else:#use oob
predict_ = estimator.oob_decision_function_
#fill default value if meet nan
inds = np.where(np.isnan(predict_))
predict_[inds] = 1./self.n_classes
sample_vector = predict_.reshape((len(X),scan_round_total*self.n_classes))
sample_vector_list.append(sample_vector)
return np.hstack(sample_vector_list)
def scan_predict(self,X):
newX,newy,scan_round_total = self._sample_slicer(X,np.zeros(len(X)))
sample_vector_list = []
for estimator in self.estimators:
predict_ = estimator.predict(newX)
sample_vector = predict_.reshape((len(X),scan_round_total*self.n_classes))
sample_vector_list.append(sample_vector)
return np.hstack(sample_vector_list)
# cascade_params_list = [cascade_forest_params1,cascade_forest_params2]*2
# def calc_accuracy(pre,y):
# return float(sum(pre==y))/len(y)
class ProbRandomForestClassifier(RandomForestClassifier):
def predict(self, X):
return RandomForestClassifier.predict_proba(self, X)
class RFLayer_RAND(object):
def __init__(self, n_estimators, classifier=True , md=None, mss=10):
self.n_estimators = n_estimators
self.max_depth = md
self.min_samples_split = mss
self.classifier = classifier
def fit(self, X_train, y_train, kfold=5, k=1, n_jobs=-1): # kfold = 5 yields 80/20 split, k will be the number of times we run validation
if kfold > 1:
kf = KFold(kfold, shuffle=True)
else:
raise ValueError('Need to pass kfold something greater than 1 so can do cross validation')
models = []
best_score = 0
best_ind = 0
count = 0
# split training data into training and estimating sets via quasi kfold validation routine
for tr_ind, est_ind in kf.split(X_train, y_train):
# instantiate the layer of decision trees
models.append(RandomForestClassifier(self.n_estimators, criterion='gini', max_depth=self.max_depth,
min_samples_split=self.min_samples_split,min_samples_leaf = 1,
max_features = 'sqrt',
n_jobs=n_jobs))
# for tree in models[count].self.estimators_: # make half of the trees completely random Decision Trees
# for tree in models[count].:
# if np.random.rand() <= .5:
# tree.splitter = 'random'
# get the split of the training data
X_tr, y_tr = X_train[tr_ind,:], y_train[tr_ind]
# train the layer on this split
models[count].fit(X_tr, y_tr)
X_tr, y_tr = 0, 0
# check accuracy on the estimation set
X_est, y_est = X_train[est_ind,:], y_train[est_ind]
y_pred = models[count].predict(X_est)
acc_score = accuracy_score(y_pred, y_est)
X_est, y_est = 0, 0 # memory
y_pred = 0 # memory
if acc_score > best_score: # with k > 1 we compare to see which is best layer trained
best_score = acc_score
best_ind = count
count += 1
if count >= k:
break
# save the best layer
self.L = models[best_ind]
self.n_classes = self.L.n_classes_
self.val_score = best_score
def predict(self, X_test):
return self.L.predict(X_test)
def push_thru_data(self, X):
n_samples, dim_data = X.shape
X_push = np.empty((n_samples, self.n_estimators*self.n_classes))
# push the data X through this layer
i = 0
for tree in self.L.estimators_:
if self.classifier:
X_push[:,i*self.n_classes:(i+1)*self.n_classes] = tree.predict_proba(X)
i += 1
X_a = np.concatenate((X_push,X[:,:self.n_estimators*self.n_classes]),axis = 1)
return X_a
# In[ ]:
def main():
X = np.load('./X_17296_new.npy')
y = np.load('./y_17296_new.npy')
# X = X.astype('float32') / 255.
aa=15000
X_train = X[:aa]
y_train = y[:aa]
X_test = X[aa:]
y_test = y[aa:]
X = 0
y = 0
scan_forest_params1 = RandomForestClassifier(n_estimators=10,min_samples_split=21,max_features='sqrt',n_jobs=-1).get_params()
scan_forest_params2 = ExtraTreesClassifier(n_estimators = 10,min_samples_split=21, n_jobs=-1).get_params()
scan_params_list = [scan_forest_params1,scan_forest_params2]
Scaner1 = MultiGrainedScaner(ProbRandomForestClassifier(), scan_params_list, sliding_ratio = 1./2)
Scaner2 = MultiGrainedScaner(ProbRandomForestClassifier(), scan_params_list, sliding_ratio = 1./4)
# Scaner3 = MultiGrainedScaner(ProbRandomForestClassifier(), scan_params_list, sliding_ratio = 1./16)
print('start training samples scanning.....')
import time
st = time.time()
X_train_scan =np.hstack([scaner.scan_fit(X_train, y_train)
for scaner in [Scaner1,Scaner2]])
print(' training samples:',X_train_scan.shape)
# train the next layers on multigrained scanning data
print( 'RF Layer training:')
# parameters for the building of the next layers
n = 500# num trees in each layer
min_gain = 0.01
verbose = True
max_layers = 6
md = None
mss = 21
n_jobs = -1
# dictionary where layers of decision trees will be stored
Layers = {}
prev_score = -1.0 # instantiate prev_score
# build the layers
for i in range(max_layers):
print (X_train_scan.shape)
RFL = RFLayer_RAND(n, md=md, mss=mss)
RFL.fit(X_train_scan, y_train, 3, 1, n_jobs)
Layers[i] = RFL
# if verbose, print out the estimation accuracy for this layer
if verbose:
print ('Layer ' + str(i+1))
print ('acc: ' + str(RFL.val_score))
# check to see if we have improved enough going one more layer
rel_gain = (RFL.val_score - prev_score)/float(abs(prev_score))
if rel_gain < min_gain or RFL.val_score == 1.0 :
print ('Converged! Stopping building layers')
print
break
prev_score = RFL.val_score
# if moving on to another level, push the data through
X_train_scan = RFL.push_thru_data(X_train_scan)
print ('Going to another layer')
print
et = time.time()
print('training time:',et - st)
print ('Loading in testing data')
import time
st = time.time()
X_a_scan = np.hstack([scaner.scan_predict(X_test)
for scaner in [Scaner1,Scaner2]])
print ('Load over')
# push test data thru FTDRF layers
for i in range(len(Layers.keys())-1):
X_a_scan = Layers[i].push_thru_data(X_a_scan)
last = len(Layers.keys())-1
y_pred = Layers[last].predict(X_a_scan)
np.save('./pred.npy',y_pred)
et = time.time()
print('testing time:',et - st)
print ('Statistics:')
print ('The accuracy was:')
print (accuracy_score(y_pred, y_test))
print ('Params:')
print ('num_tres in each layer = ' + str(n))
print ('md =' + str(md))
print ('mss = ' + str(mss))
if __name__ == "__main__":
main()
|
{"hexsha": "73d99b98027c6e87baf5caadad7e727248ee6ced", "size": 10464, "ext": "py", "lang": "Python", "max_stars_repo_path": "mgrForest_arid5.py", "max_stars_repo_name": "qianmingduowan/A-Multi-dimensional-Multi-grained-Residual-Forest", "max_stars_repo_head_hexsha": "e38f2fd3d6b30853df816e9478c02163cb4023f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-29T13:48:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-29T13:48:58.000Z", "max_issues_repo_path": "mgrForest_arid5.py", "max_issues_repo_name": "qianmingduowan/A-Multi-dimensional-Multi-grained-Residual-Forest", "max_issues_repo_head_hexsha": "e38f2fd3d6b30853df816e9478c02163cb4023f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mgrForest_arid5.py", "max_forks_repo_name": "qianmingduowan/A-Multi-dimensional-Multi-grained-Residual-Forest", "max_forks_repo_head_hexsha": "e38f2fd3d6b30853df816e9478c02163cb4023f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-26T02:51:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-26T02:51:47.000Z", "avg_line_length": 38.1897810219, "max_line_length": 141, "alphanum_fraction": 0.6122897554, "include": true, "reason": "import numpy", "num_tokens": 2569}
|
import torch
from torch.utils.data import DataLoader
import data
import models
import configargparse
from tensorboardX import SummaryWriter
import os
from output import OutputWriter
import numpy as np
import random
try:
from tqdm import tqdm
except ImportError:
def tqdm(sequence, *args, **kwargs):
return sequence
if __name__ == "__main__":
tasks = [
"pos", "vua", "snli"
]
parser = configargparse.ArgumentParser(config_file_parser_class=configargparse.YAMLConfigFileParser)
parser.add_argument(
"--config", "-C", type=str, is_config_file=True, required=False,
help="The config specifying arguments to run with."
)
parser.add_argument(
"--output", "-o", type=str, required=True,
help="The directory to store all the output."
)
parser.add_argument(
"--learning-rate", "-l", type=float, required=False, default=0.01,
help="The learning rate to use during training."
)
parser.add_argument(
"--no-cuda", action="store_true", required=False, default=False,
help="Disable the use of cuda during training."
)
parser.add_argument(
"--tasks", "-t", type=str, nargs="+", default=tasks, choices=tasks, required=False,
help="The tasks to perform during training and the order in which they are performed in an epoch."
)
parser.add_argument(
"--epochs", "-e", type=int, default=20, required=False,
help="The amount of epochs to train for."
)
parser.add_argument(
"--num_workers", "-n", type=int, default=0, required=False,
help="The number of workers to use for data loading."
)
parser.add_argument(
"--epsilon", type=float, default=1.0, required=False,
help="The base epsilon used for the learning rate schedule."
)
parser.add_argument(
"--rho", type=float, default=0.3, required=False,
help="The rho parameter used for the learning rate decay."
)
parser.add_argument(
"--delta-classifier", type=float, default=1e-2, required=False,
help="The delta parameter used for succesive regularization applied on the classifier layers."
)
parser.add_argument(
"--delta-lstm", type=float, default=1e-3, required=False,
help="The delta parameter used for succesive regularization applied on the lstm layers."
)
parser.add_argument(
"--embedding-model", type=str, choices=["ELMo3+GloVe", "ELMo2+GloVe", "bert-base-cased", "bert-large-cased"], default="ELMo3+GloVe", required=False,
help="The embedding model to use to generate the contextual word embeddings."
)
parser.add_argument(
'--seed', type=int, required=True, help="Seed for training"
)
args = parser.parse_args()
arguments = {
"learning-rate": args.learning_rate,
"tasks": args.tasks,
"epochs": args.epochs,
"num-workers": args.num_workers,
"epsilon": args.epsilon,
"rho": args.rho,
"delta-classifier": args.delta_classifier,
"delta-lstm": args.delta_lstm,
"embedding-model": args.embedding_model,
"seed": args.seed,
}
# Set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.output:
arguments["output"] = args.output
if args.no_cuda:
arguments["no-cuda"] = True
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
print("Creating model.")
model = models.JMTModel(device, pos_classes=17, embedding_model=args.embedding_model)
model.to(device)
print("Loading datasets.")
if "pos" in args.tasks:
pos_dataset = {
"train": data.VuaPosDataset("train"),
"validation": data.VuaPosDataset("validation"),
"test": data.VuaPosDataset("test")
}
pos_loaders = {
name: DataLoader(
dataset,
shuffle=(name == "train"),
batch_size=64 if name == "train" else 128,
num_workers=args.num_workers,
collate_fn=data.penn_collate_fn
) for name, dataset in pos_dataset.items()
}
else:
pos_loaders = {}
if "snli" in args.tasks:
snli_dataset = {
"train": data.SnliDataset(os.path.join("data", "snli", "snli_1.0_train.jsonl")),
"validation": data.SnliDataset(os.path.join("data", "snli", "snli_1.0_dev.jsonl")),
"test": data.SnliDataset(os.path.join("data", "snli", "snli_1.0_test.jsonl"))
}
snli_loaders = {
name: DataLoader(
dataset,
shuffle=(name == "train"),
batch_size=64 if name == "train" else 128,
num_workers=args.num_workers,
collate_fn=data.snli_collate_fn
) for name, dataset in snli_dataset.items()
}
else:
snli_loaders = {}
if "vua" in args.tasks:
vua_dataset = {
"train": data.VuaSequenceDataset(split="train"),
"validation": data.VuaSequenceDataset(split="validation"),
"test": data.VuaSequenceDataset(split="test")
}
vua_loaders = {
name: DataLoader(
dataset,
shuffle=(name == "train"),
batch_size=64 if name == "train" else 128,
num_workers=args.num_workers,
collate_fn=data.vua_sequence_collate_fn
) for name, dataset in vua_dataset.items()
}
else:
vua_loaders = {}
lr_function = lambda epoch: args.epsilon / (1.0 + args.rho * (epoch))
# Standard optimizer parameter groups
pos_optimizer_group =\
[
{"params": model.pos_lstm.parameters(), "weight_decay": 1e-6, "lr": 1.0},
{"params": model.pos_classifier.parameters(), "weight_decay": 1e-5, "lr": 1.0},
]
vua_optimizer_group =\
[
{"params": model.pos_lstm.parameters(), "weight_decay": 1e-6, "lr": (1.0 - args.delta_lstm) if "pos" in args.tasks else 1.0},
{"params": model.pos_classifier.parameters(), "weight_decay": 1e-5, "lr": (1.0 - args.delta_classifier) if "pos" in args.tasks else 1.0},
{"params": model.metaphor_lstm.parameters(), "weight_decay": 1e-6, "lr": 1.0},
{"params": model.metaphor_classifier.parameters(), "weight_decay": 1e-5, "lr": 1.0}
]
snli_optimizer_group =\
[
{"params": model.metaphor_lstm.parameters(), "weight_decay": 1e-6, "lr": (1.0 - args.delta_lstm) if "pos" in args.tasks or "vua" in args.tasks else 1.0},
{"params": model.metaphor_classifier.parameters(), "weight_decay": 1e-5, "lr": (1.0 - args.delta_classifier) if "pos" in args.tasks or "vua" in args.tasks else 1.0},
{"params": model.pos_lstm.parameters(), "weight_decay": 1e-6, "lr": (1.0 - args.delta_lstm) if "vua" in args.tasks else 1.0},
{"params": model.pos_classifier.parameters(), "weight_decay": 1e-5, "lr": (1.0 - args.delta_classifier) if "vua" in args.tasks else 1.0},
{"params": model.snli_lstm.parameters(), "weight_decay": 1e-6, "lr": 1.0},
{"params": model.snli_classifier.parameters(), "weight_decay": 1e-5, "lr": 1.0}
]
# Adding ELMo embedding mixing params to optimizers if applicable
if arguments['embedding-model'] in ["ELMo3+GloVe","ELMo2+GloVe"]: # train ELMo mixing params
pos_optimizer_group.append(
{"params": model.embedding.elmo.parameters(), "weight_decay": 1e-6, "lr": 1.0}
)
vua_optimizer_group.append(
{"params": model.embedding.elmo.parameters(), "weight_decay": 1e-6, "lr": (1.0 - args.delta_lstm) if "pos" in args.tasks else 1.0}
)
snli_optimizer_group.append(
{"params": model.embedding.elmo.parameters(), "weight_decay": 1e-6, "lr": (1.0 - args.delta_lstm) if "pos" in args.tasks or "vua" in args.tasks else 1.0},
)
# Construct optimizers
pos_optimizer = torch.optim.SGD(pos_optimizer_group)
vua_optimizer = torch.optim.SGD(vua_optimizer_group,lr=1)
snli_optimizer = torch.optim.SGD(snli_optimizer_group,)
# Learning rate schedules
vua_lr_schedule = torch.optim.lr_scheduler.LambdaLR(
vua_optimizer, lr_function
)
pos_lr_schedule = torch.optim.lr_scheduler.LambdaLR(
pos_optimizer, lr_function
)
snli_lr_schedule = torch.optim.lr_scheduler.LambdaLR(
snli_optimizer, lr_function
)
task_objects = {
"pos": (model, model.pos_forward, pos_optimizer, pos_lr_schedule, pos_loaders, torch.nn.CrossEntropyLoss()),
"vua": (model, model.metaphor_forward, vua_optimizer, vua_lr_schedule, vua_loaders, torch.nn.CrossEntropyLoss()),
"snli": (model, model.snli_forward, snli_optimizer, snli_lr_schedule, snli_loaders, torch.nn.CrossEntropyLoss())
}
writer = OutputWriter(args.output, custom_saving=args.embedding_model in ["ELMo2+GloVe","ELMo3+GloVe"])
writer.save_arguments(arguments)
for epoch in tqdm(range(args.epochs), "Epoch"):
for task in tqdm(args.tasks, "Tasks"):
model, fn, optimizer, lr_scheduler, loaders, criterion = task_objects[task]
lr_scheduler.step()
model.train()
for i, batch in tqdm(enumerate(loaders["train"]), total=len(loaders["train"])):
optimizer.zero_grad()
inputs = tuple(b.to(device) if type(b) != list else b for b in batch[:-1])
targets = batch[-1].to(device)
output = fn(*inputs)
if task == "snli":
loss = criterion(output, targets)
accuracy = torch.sum(torch.argmax(output, dim=1) == targets).item() / targets.size(0)
else:
loss = criterion(output.view(-1, output.size(2)), targets.view(-1))
amount = (targets != -100).nonzero().size(0)
accuracy = torch.sum((torch.argmax(output, dim=2) == targets) & (targets != -100)).item() / amount
loss.backward()
optimizer.step()
writer.add_scalar(
f"{task}/train/loss", loss.item(), global_step=len(loaders["train"]) * epoch + i + 1
)
writer.add_scalar(
f"{task}/train/accuracy", accuracy, global_step=len(loaders["train"]) * epoch + i + 1
)
model.eval()
if "validation" in loaders:
with torch.no_grad():
accuracies = []
losses = []
batch_sizes = []
for batch in tqdm(loaders["validation"], total=len(loaders["validation"])):
inputs = tuple(b.to(device) if type(b) != list else b for b in batch[:-1])
targets = batch[-1].to(device)
output = fn(*inputs)
if task == "snli":
loss = criterion(output, targets)
accuracy = torch.sum(torch.argmax(output, dim=1) == targets).item() / targets.size(0)
else:
loss = criterion(output.view(-1, output.size(2)), targets.view(-1))
amount = (targets != -100).nonzero().size(0)
accuracy = torch.sum((torch.argmax(output, dim=2) == targets) & (targets != -100)).item() / amount
losses.append(loss.item())
accuracies.append(accuracy)
batch_sizes.append(targets.size(0))
loss = np.average(losses, weights=batch_sizes)
accuracy = np.average(accuracies, weights=batch_sizes)
writer.add_scalar(
f"{task}/validation/loss", loss.item(), global_step=len(loaders["train"]) * (epoch + 1)
)
writer.add_scalar(
f"{task}/validation/accuracy", accuracy, global_step=len(loaders["train"]) * (epoch + 1)
)
writer.save_model(model, os.sep.join(["{}_epoch{:02d}".format(task, epoch+1), str(args.seed)]))
|
{"hexsha": "29e77600817e98018b114b36a024cb73a5c55934", "size": 12442, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_jmt.py", "max_stars_repo_name": "Vansil/SMNLS", "max_stars_repo_head_hexsha": "0d0118927d209ebe8d4ff0b1f73a90e9519edde9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_jmt.py", "max_issues_repo_name": "Vansil/SMNLS", "max_issues_repo_head_hexsha": "0d0118927d209ebe8d4ff0b1f73a90e9519edde9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_jmt.py", "max_forks_repo_name": "Vansil/SMNLS", "max_forks_repo_head_hexsha": "0d0118927d209ebe8d4ff0b1f73a90e9519edde9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.660130719, "max_line_length": 177, "alphanum_fraction": 0.580774795, "include": true, "reason": "import numpy", "num_tokens": 2930}
|
!***********************************************************************
! *
SUBROUTINE MANEIG(IATJPO, IASPAR)
! *
! This module manages the operation of the eigensolvers and the *
! storage of the eigenpairs. There are two principal branches: *
! *
! (1) Matrix of order 1: the trivial case *
! (2) Matrix of order greater than 1: there are two branches *
! (i) Matrices of order less than or equal to IOLPCK: *
! eigenpairs are found using LAPACK SUBROUTINEs *
! (ii) Matrices of order greater than IOLPCK: eigenpairs *
! are found using DVDSON; this involves up to three *
! steps: *
! (a) The matrix is analysed to determine its *
! block structure (only irreducibe matrices *
! are correctly treated by DVDSON) *
! (b) Eigenpairs are extracted for each block *
! (c) The appropriate eigenpairs are selected and *
! stored *
! Different methods of storage and different *
! versions of the matrix-vector multiply are used *
! depending upon the order and density of the matrix *
! *
! Call(s) to: [LIB92]: ALLOC, DALLOC, ISPAR, ITJPO, posfile, *
! RALLOC. *
! [RCI92]: DNICMV, SPICMV2, SPODMV. *
! [DVDSON]: DVDSON. *
! [AUXBLAS]: DINIT/SINIT. *
! [BLAS]: DCOPY/SCOPY, DSWAP/SSWAP. *
! [LAPACK]: DSPEVX/SSPEVX. *
! *
! Written by Farid A. Parpia Last revision: 27 Sep 1993 *
! Modified Misha Saparov Feb 1997 *
! Charlotte F. Fischer May 1997 *
! Except for the disk version, all matrices have *
! diagonals shifted by EAV *
! All arrays allocated here are de-allocated except pneval and pnevec *
! which will be de-allocated in matrix. *
! Block Version By Xinghong He Last revision: 18 Jun 1998 *
! *
!***********************************************************************
!...Translated by Pacific-Sierra Research 77to90 4.3E 14:07:38 1/ 5/07
!...Modified by Charlotte Froese Fischer
! Gediminas Gaigalas 10/05/17
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE vast_kind_param, ONLY: DOUBLE, LONG
USE memory_man
USE eigv_C
USE fposition_C
USE hmat_C
USE orb_C, ONLY: ncf, nw, iqa
USE prnt_C
USE where_C
USE WCHBLK_C
USE iounit_C
!-----------------------------------------------
! I n t e r f a c e B l o c k s
!-----------------------------------------------
USE dnicmv_I
USE spicmv2_I
USE spodmv_I
USE posfile_I
USE dinit_I
USE dspevx_I
USE iniestsd_I
USE gdvd_I
USE iniest2_I
USE iniestdm_I
USE itjpo_I
USE ispar_I
IMPLICIT NONE
!-----------------------------------------------
! E x t e r n a l F u n c t i o n s
!-----------------------------------------------
REAL(DOUBLE) :: DLAMCH
EXTERNAL :: DLAMCH
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER, INTENT(OUT) :: IATJPO
INTEGER, INTENT(OUT) :: IASPAR
!-----------------------------------------------
! L o c a l P a r a m e t e r s
!-----------------------------------------------
!cjb INTEGER, PARAMETER :: IOLPCK = 1000
INTEGER, PARAMETER :: IOLPCK = 2000
! GG REAL(DOUBLE), PARAMETER :: ABSTOL = 1.0D-10
!cjb NINCOR
!cjb INTEGER, PARAMETER :: NINCOR = 1 ! To enforce DISK
INTEGER, PARAMETER :: NINCOR = 268435456 ! = 2 GB or memory
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER(LONG) :: NSTORE
INTEGER :: MYID, NPROCS, NROWS, I, NDENSE, NCFDUM, ICCUTDUM, MYIDDUM, &
NPROCSDUM, IOFSET, NELC, IR, NVECMN, NVEX, M, INFO, LOC, NBRKEV, &
IMV, NDENSE_L, LIM, LWORK, LIWORK, MAXITR, MBLOCK, NEND, &
ILOW, IHIGH, NIV, NLOOPS, NMV, IERR, J, IA
REAL(DOUBLE) :: ELSTO, DUMMY, &
DIATMP, CRITE, CRITC, CRITR, ORTHO, DMUNGO, AMAX, WA, ABSTOL
! GG DIATMP, CRITE, CRITC, CRITR, ORTHO, DMUNGO, AMAX, WA
LOGICAL :: HIEND, LDISC, SPARSE
CHARACTER(LEN=8) :: CNUM
REAL(DOUBLE), DIMENSION(:), pointer :: w, z, work, diag
INTEGER, DIMENSION(:), pointer :: iwork, ifail, jwork
!-----------------------------------------------------------------------
ABSTOL = 2*DLAMCH('S')
MYID = 0
NPROCS = 1
!IF (MYID == 0) WRITE (6, *) 'Calling maneig...'
! (nrows+1) is the number of records of the present block's .res file
NROWS = (NCF - MYID - 1 + NPROCS)/NPROCS
IF (NCF < NPROCS) NROWS = NCF/(MYID + 1)
!CALL posfile (1, imcdf, nrows+1)
CALL POSFILE (0, IMCDF, NPOSITION)
IF (NCF == 1) THEN
!-----------------------------------------------------------------------
!
! (1) - Trivial ncf = 1
!
!-------------------------------------------------------
WRITE (24, *) 'Trivial eigenvalue problem.'
! Matrix of order 1: the trivial case; we assume that the value
! of EAV is available
CALL ALLOC (EVAL, 1,'EVAL', 'MANEIG' )
CALL ALLOC (EVEC, 1, 'EVECO', 'MANEIG')
EVAL(1) = 0.D0
EVEC(1) = 1.D0
! Still read through the .res file
!GG
!GG Gediminas NIST 2005.11.03
!GG READ (imcdf)
DO I = 1, NROWS + 1
READ (IMCDF)
END DO
ELSE !if-2
!-----------------------------------------------------------------------
!
! (2) - Non trivial
!
!-------------------------------------------------------
!
! Matrix of order greater than 1; how many elements in a triangle?
!
NDENSE = (NCF*(NCF + 1))/2
IF (NCF <= IOLPCK) THEN
!-----------------------------------------------------------------------
!
! (2.1) - LAPACK Dense, Memory,
!
!-------------------------------------------------------
IF (MYID == 0) THEN
WRITE (6, *) &
'LAPACK routine DSPEVX selected for eigenvalue problem.'
WRITE (24, *) &
'LAPACK routine DSPEVX selected for eigenvalue problem.'
ENDIF
! Allocate storage for the dense representation of the matrix
! and initialize emt
CALL ALLOC (EMT, NDENSE, 'EMT', 'MANEIG')
CALL DINIT (NDENSE, 0.0D00, EMT, 1)
! Read the matrix into position from the disc file; it's already
! been properly positioned.
CALL ALLOC (WORK, NCF,'WORK', 'MANEIG' )
CALL ALLOC (IROW, NCF, 'IROW', 'MANEIG')
READ (IMCDF) NCFDUM, ICCUTDUM, MYIDDUM, NPROCSDUM
IF (NCF/=NCFDUM .OR. MYID/=MYIDDUM .OR. NPROCSDUM/=NPROCS) STOP &
'maneig:1'
DO I = MYID + 1, NCF, NPROCS
IOFSET = (I*(I - 1))/2
READ (IMCDF) NELC, ELSTO, (WORK(IR),IR=1,NELC), (IROW(IR),IR=1,&
NELC)
! In the row-mode of the lower triangle,
! diagonal is the last one
DO IR = 1, NELC - 1
EMT(IOFSET+IROW(IR)) = WORK(IR)
END DO
EMT(IOFSET+IROW(NELC)) = WORK(NELC) - EAV
END DO
CALL DALLOC (WORK, 'WORK', 'MANEIG')
CALL DALLOC (IROW, 'IROW', 'MANEIG')
! Find the eigenpairs
!
! ivec() - serial numbers of eigenstates of the current block
! iccmin() - serial numbers of eigenstates of all blocks.
! nvecmn - minimum serial number of the eigenstates of the block
! nvecmx - maximum .............
! nvex - clear from def: NVECMX-NVECMN+1
NVECMN = NCF
DO I = 1, NVEC
NVECMN = MIN(NVECMN,IVEC(I))
END DO
NVEX = NVECMX - NVECMN + 1
CALL ALLOC (W, NVEX, 'W', 'MANEIG')
CALL ALLOC (Z, NCF*NVEX,'Z', 'MANEIG' )
CALL ALLOC (WORK, NCF*8,'WORK', 'MANEIG' )
CALL ALLOC (IWORK, NCF*5,'IWORK', 'MANEIG' )
! GG CALL ALLOC (IFAIL, NVEX, 'IFAIL', 'MANEIG')
CALL ALLOC (IFAIL, NCF, 'IFAIL', 'MANEIG')
CALL DSPEVX ('V', 'I', 'U', NCF, EMT, DUMMY, DUMMY, NVECMN, NVECMX&
, ABSTOL, M, W, Z, NCF, WORK, IWORK, IFAIL, INFO)
IF (INFO /= 0) STOP 'maneig: Failure in DSPEVX [LAPACK]'
CALL DALLOC (WORK, 'WORK', 'MANEIG')
CALL DALLOC (IWORK, 'IWORK', 'MANEIG')
CALL DALLOC (IFAIL, 'IFAIL', 'MANEIG')
CALL DALLOC (EMT, 'EMT', 'MANEIG')
! Store the eigenpairs in their proper positions EVAL() and EVEC()
CALL ALLOC (EVAL, NVEC,'EVAL', 'MANEIG' )
CALL ALLOC (EVEC, NCF*NVEC, 'EVEC', 'MANEIG')
DO I = 1, NVEC
LOC = IVEC(I)
EVAL(I) = W(LOC - NVECMN + 1)
IOFSET = NCF*(I - 1)
LOC = NCF*(LOC - NVECMN)
CALL DCOPY (NCF, Z(LOC + 1), 1, EVEC(IOFSET+1), 1)
END DO
CALL DALLOC (W, 'W', 'MANEIG')
CALL DALLOC (Z, 'Z', 'MANEIG')
ELSE
!-----------------------------------------------------------------------
! (2.2) - DVDSON --- preparation work
!
!-------------------------------------------------------
WRITE (24,*)'DVDSON routine selected for eigenvalue problem;'
! Sparse or dense matrix multiply? On disc or in core?
!GG NBRKEV = (NCF + 1)*(NCF + 1)/3 ! Normal
!NBRKEV = 1 ! To enforce DENSE
!NBRKEV = (NCF*(NCF+1)) / 2 + 1 ! To enforde SPARSE
!--------------------------------------------------------------
! Uncomment out the following to force sparse storage
!
!
!--------------------------------------------------------------
SPARSE = .TRUE.
NSTORE = NELMNT + NELMNT/2 + (NCF + 1)/2
CALL ALLOC (DIAG, NCF, 'DIAG', 'MANEIG')
IF (NSTORE > NINCOR) THEN
!-----------------------------------------------------------------------
!
! (2.2.1) - DVDSON --- Disk, load diagonal
!
!-------------------------------------------------------
WRITE (24, *) ' matrix stored on disc;'
! Disk storage; necessarily sparse; one column of the matrix in
! memory
LDISC = .TRUE.
SPARSE = .TRUE.
IMV = 1
! Load diagonal - Each node will have the same, complete copy
! after this if block
READ (IMCDF) NCFDUM, ICCUTDUM, MYIDDUM, NPROCSDUM
IF (NCF/=NCFDUM .OR. MYID/=MYIDDUM .OR. NPROCSDUM/=NPROCS) STOP &
'maneig:2'
DO I = MYID + 1, NCF, NPROCS
READ (IMCDF) NELC, ELSTO, (DUMMY,IR=2,NELC), DIATMP
DIAG(I) = DIATMP - EAV
END DO
ELSE
!-----------------------------------------------------------------------
!
! (2.2.2) - DVDSON --- Memory, load all
!
!-------------------------------------------------------
!
! Core storage; load matrix into memory
!
LDISC = .FALSE.
IF (SPARSE) THEN
!-----------------------------------------------------------------------
! (2.2.2.1) - DVDSON --- Memory, load all, sparse
!
!-------------------------------------------------------
IF (MYID == 0) WRITE (24, *) &
' matrix stored in sparse representation in core;'
IMV = 2
WRITE (6, *) 'nelmnt = ', NELMNT
CALL ALLOC (EMT, NELMNT, 'EMT', 'MANEIG')
CALL ALLOC (IROW, NELMNT, 'IROW', 'MANEIG')
CALL ALLOC (IENDC, 0, NCF , 'IENDC', 'MANEIG')
IOFSET = 0
IENDC(0) = 0
READ (IMCDF) NCFDUM, ICCUTDUM, MYIDDUM, NPROCSDUM
IF (NCF/=NCFDUM .OR. MYID/=MYIDDUM .OR. NPROCSDUM/=NPROCS) &
STOP 'maneig:3'
DO I = MYID + 1, NCF, NPROCS
READ (IMCDF) NELC, ELSTO, (EMT(IR+IOFSET),IR=1,NELC), (&
IROW(IR + IOFSET),IR=1,NELC)
EMT(NELC+IOFSET) = EMT(NELC+IOFSET) - EAV
DIAG(I) = EMT(NELC+IOFSET)
IOFSET = IOFSET + NELC
IENDC(I) = IOFSET
! WRITE (31 + MYID, *) I, IENDC(I), DIAG(I)
END DO
ELSE
!-----------------------------------------------------------------------
!
! (2.2.2.2) - DVDSON --- Memory, load all, dense
!
!-------------------------------------------------------
WRITE (24, *) &
' matrix stored in full representation in core;'
IMV = 3
! Find NDENSE_L, the number of elements on the node (dense form)
NDENSE_L = 0
DO I = MYID + 1, NCF, NPROCS
NDENSE_L = NDENSE_L + I
END DO
CALL ALLOC (EMT, NDENSE_L, 'EMT', 'MANEIG')
CALL DINIT (NDENSE_L, 0.0D00, EMT, 1)
CALL ALLOC (WORK, NCF, 'WORK', 'MANEIG')
CALL ALLOC (IROW, NCF, 'IROW', 'MANEIG')
READ (IMCDF) NCFDUM, ICCUTDUM, MYIDDUM, NPROCSDUM
IF (NCF/=NCFDUM .OR. MYID/=MYIDDUM .OR. NPROCSDUM/=NPROCS) &
STOP 'maneig:4'
IOFSET = 0
DO I = MYID + 1, NCF, NPROCS
READ (IMCDF) NELC, ELSTO, (WORK(IR),IR=1,NELC), (IROW(IR),&
IR=1,NELC)
WORK(NELC) = WORK(NELC) - EAV
DIAG(I) = WORK(NELC)
DO IR = 1, NELC
EMT(IOFSET+IROW(IR)) = WORK(IR)
END DO
IOFSET = IOFSET + I
END DO
CALL DALLOC (WORK, 'WORK', 'MANEIG')
CALL DALLOC (IROW, 'IROW', 'MANEIG')
ENDIF
! ...Memory mode - sparse or dense
!-----------------------------------------------------------------------
! (2.2.2.3e) *** E n d o f D V D S O N m e m o r y
!-----------------------------------------------------------------------
ENDIF
! ...Disk or Memory
!-----------------------------------------------------------------------
! (2.2.3e) *** E n d o f D V D S O N
!-----------------------------------------------------------------------
!
! Allocate storage for workspace; see the header of DVDSON for
! the expression below; the value of LIM can be reduced to NVECMX
! plus a smaller number if storage is severely constrained
!
LIM = MIN(NCF,2*NVECMX + 60)
! lwork = 2*ncf*lim + lim*lim + (nvecmx+10)*lim + nvecmx
LWORK = 2*NCF*LIM + LIM*LIM*2 + 11*LIM + NVECMX
CALL ALLOC (WORK, LWORK, 'WORK', 'MANEIG')
LIWORK = 6*LIM + NVECMX
CALL ALLOC (IWORK, LIWORK, 'IWORK', 'MANEIG')
!*changed by Misha 02/12/97
CRITE = 1.0D-17
CRITC = 1.0D-09
CRITR = 1.0D-09
ORTHO = MAX(1D-9,CRITR)
! end of changes
! maxitr = MAX (nvecmx*100, ncf/10)
MAXITR = MAX(NVECMX*200,NCF/10)
!maxitr = MIN (nvect*100, ncf) ! FROM RSCFVU !!!
CALL ALLOC (JWORK, LIM,'JWORK', 'MANEIG' )
CALL ALLOC (EVAL, NVECMX, 'EVAL', 'MANEIG')
CALL ALLOC (EVEC, NCF*NVECMX, 'EVEC', 'MANEIG')
DMUNGO = 10.D99
CALL DINIT (NVECMX, DMUNGO, EVAL, 1)
! Compute the eigenpairs in each block
NVEX = NVECMX
IF (LDISC) THEN
MBLOCK = NVEX
ELSE
MBLOCK = 1
ENDIF
NEND = NCF*NVEX
ILOW = 1
IHIGH = NVEX
NIV = NVEX
!************************************************************************
!
! Call Davidson eigensolver
!
SELECT CASE (IMV)
CASE (1)
!******************** sparse and matrix on disk **********************
WRITE (6, *) ' Sparse - Disk, iniestsd'
CALL POSFILE (0, IMCDF, NPOSITION)! was within iniestsd before
CALL INIESTSD (2000, NCF, MYID, NPROCS, NIV, WORK, IMCDF, EAV)
!NIV = 0 ! Why equal 0 ???
!WRITE (6, *) ' Calling gdvd(spodmv,...'
CALL GDVD (SPODMV, NCF, LIM, DIAG, ILOW, IHIGH, JWORK, NIV, &
MBLOCK, CRITE, CRITC, CRITR, ORTHO, MAXITR, WORK, LWORK, &
IWORK, LIWORK, HIEND, NLOOPS, NMV, IERR)
CASE (2)
!******************** sparse and matrix in memory ********************
WRITE (6, *) ' Sparse - Memory, iniest2'
! CALL INIEST2 (1000, NCF,NIV,WORK,EMT,IENDC,IROW)
CALL INIEST2 (2000, NCF, NIV, WORK, EMT, IENDC, IROW)
!WRITE (*, *) NCF, NIV, (WORK(I),I=NCF*NIV + 1,NCF*NIV + NIV)
!WRITE (*, *) LIM, ILOW, IHIGH, MBLOCK, MAXITR, LWORK, LIWORK
!WRITE (*, *) IERR
CALL GDVD (SPICMV2, NCF, LIM, DIAG, ILOW, IHIGH, JWORK, NIV, &
MBLOCK, CRITE, CRITC, CRITR, ORTHO, MAXITR, WORK, LWORK, &
IWORK, LIWORK, HIEND, NLOOPS, NMV, IERR)
!WRITE (*, *) 'after gdvd...'
!WRITE (*, *) NCF, NIV, (WORK(I),I=NCF*NIV + 1,NCF*NIV + NIV)
!WRITE (*, *) LIM, ILOW, IHIGH, MBLOCK, MAXITR, LWORK, LIWORK
!WRITE (*, *) HIEND, NLOOPS, NMV, IERR
CALL DALLOC (EMT, 'EMT', 'MANEIG')
CALL DALLOC (IROW, 'IROW', 'MANEIG')
CALL DALLOC (IENDC, 'IENDC', 'MANEIG')
CASE (3)
!*************************** dense and in memory **********************
WRITE (6, *) ' Dense - Memory, iniestdm'
! CALL INIESTDM (1000,NCF,NIV,WORK,EMT)
CALL INIESTDM (2000, NCF, NIV, WORK, EMT)
CALL GDVD (DNICMV, NCF, LIM, DIAG, ILOW, IHIGH, JWORK, NIV, &
MBLOCK, CRITE, CRITC, CRITR, ORTHO, MAXITR, WORK, LWORK, &
IWORK, LIWORK, HIEND, NLOOPS, NMV, IERR)
CALL DALLOC (EMT, 'EMT', 'MANEIG')
END SELECT
!************************************************************************
CALL DALLOC (DIAG, 'DIAG', 'MANEIG')
CALL DALLOC (IWORK, 'IWORK', 'MANEIG')
CALL DALLOC (JWORK, 'JWORK', 'MANEIG')
WRITE (24, *) ' ', NLOOPS, ' iterations;'
WRITE (24, *) ' ', NMV, ' matrix-vector multiplies.'
IF (IERR /= 0) THEN
WRITE (ISTDE, *) 'MANEIG: Returned from DVDSON with'
WRITE (ISTDE, *) ' IERR = ', IERR, '.'
STOP 'maneig: DVDSON wrong'
ENDIF
! Put the eigenpairs in order, overwriting as necessary
CALL DCOPY (NVEX, WORK(NEND+1), 1, EVAL, 1)
CALL DCOPY (NCF*NVEX, WORK(1), 1, EVEC, 1)
CALL DALLOC (WORK, 'WORK', 'MANEIG')
! Rearrange and reallocate storage for the eigenpairs
! as necessary
IF (NVEC < NVECMX) THEN
CALL ALLOC (IWORK, NVECMX, 'IWORK', 'MANEIG')
DO I = 1, NVECMX
IWORK(I) = I
END DO
DO I = 1, NVEC
IOFSET = IVEC(I)
LOC = IWORK(I)
IF (IOFSET == LOC) CYCLE
CALL DSWAP (1, EVAL(IOFSET), 1, EVAL(I), 1)
IWORK(I) = IWORK(IOFSET)
IWORK(IOFSET) = LOC
IOFSET = NCF*(IOFSET - 1)
LOC = NCF*(I - 1)
CALL DSWAP (NCF, EVEC(IOFSET+1), 1, EVEC(LOC+1), 1)
END DO
CALL DALLOC (IWORK, 'IWORK', 'MANEIG')
CALL RALLOC (EVAL, NVEC, 'EVAL', 'MANEIG')
CALL RALLOC (EVEC, NCF*NVEC, 'EVEC', 'MANEIG' )
ENDIF
ENDIF
! (2.3e) *** E N D O F N O N - T R I V I A L C A S E
ENDIF
! (3e) *** E N D O F A L L
!--------------------------------------------------------------------
! Only the following quantities are needed after this routine is
! finished:
! eval(), evec(), iatjpo, iaspar
!--------------------------------------------------------------------
!
! Clean up eigenvectors; determine their J/P values
!
DO J = 1, NVEC
! Find the dominant component of each eigenvector
IOFSET = (J - 1)*NCF
AMAX = 0.D0
DO I = 1, NCF
WA = ABS(EVEC(I+IOFSET))
IF (WA <= AMAX) CYCLE
AMAX = WA
IA = I
END DO
! Find the angular momentum and parity of the dominant component
IATJPO = ITJPO(IA)
IASPAR = ISPAR(IA)
! Change sign of eigenvactor if dominant component is negative
IF (EVEC(IA+IOFSET) >= 0.D0) CYCLE
EVEC(1+IOFSET:NCF+IOFSET) = -EVEC(1+IOFSET:NCF+IOFSET)
END DO
RETURN
END SUBROUTINE MANEIG
|
{"hexsha": "2a1ee57d6d1a15d117eb9d5afa5b230ab5422ebd", "size": 22246, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/appl/rci90/maneig.f90", "max_stars_repo_name": "sylas/grasp-continuum", "max_stars_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-03-10T04:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:01:15.000Z", "max_issues_repo_path": "src/appl/rci90/maneig.f90", "max_issues_repo_name": "sylas/grasp-continuum", "max_issues_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2019-03-07T17:56:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T16:45:24.000Z", "max_forks_repo_path": "src/appl/rci90/maneig.f90", "max_forks_repo_name": "sylas/grasp-continuum", "max_forks_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-10T04:00:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:06:40.000Z", "avg_line_length": 39.7960644007, "max_line_length": 80, "alphanum_fraction": 0.4152207138, "num_tokens": 6290}
|
###############################################################################
# Copyright 2018 Google LLC #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# https://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
# Additional explanatory comments added by Sarah Gillespie. #
# https://github.com/SarahGillespie #
# 7 Jan 2022 #
# #
# Most comments are regarding the fit() function. #
###############################################################################
import numpy as np
from sklearn.neighbors import KDTree, KNeighborsClassifier
class TrustScore:
"""
Trust Score: a measure of classifier uncertainty based on nearest neighbors.
"""
def __init__(self, k=10, alpha=0.0, filtering="none", min_dist=1e-12):
"""
k and alpha are the tuning parameters for the filtering,
filtering: method of filtering. option are "none", "density",
"uncertainty"
min_dist: some small number to mitigate possible division by 0.
"""
self.k = k
self.filtering = filtering
self.alpha = alpha
self.min_dist = min_dist
def filter_by_density(self, X: np.array):
"""Filter out points with low kNN density.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
original points of kNN density.
"""
kdtree = KDTree(X)
knn_radii = kdtree.query(X, k=self.k)[0][:, -1]
eps = np.percentile(knn_radii, (1 - self.alpha) * 100)
return X[np.where(knn_radii <= eps)[0], :]
def filter_by_uncertainty(self, X: np.array, y: np.array):
"""Filter out points with high label disagreement amongst its
kNN neighbors.
Args:
X: an array of sample points.
Returns:
A subset of the array without points in the bottom alpha-fraction of
samples with highest disagreement amongst its k nearest neighbors.
"""
neigh = KNeighborsClassifier(n_neighbors=self.k)
neigh.fit(X, y)
confidence = neigh.predict_proba(X)
cutoff = np.percentile(confidence, self.alpha * 100)
unfiltered_idxs = np.where(confidence >= cutoff)[0]
return X[unfiltered_idxs, :], y[unfiltered_idxs]
def fit(self, X: np.array, y: np.array):
"""Initialize trust score precomputations with training data.
WARNING: assumes that the labels are 0-indexed (i.e.
0, 1,..., n_labels-1).
Args:
X: an array of sample points.
y: corresponding labels. # as a nupy array! - Sarah Gillespie
"""
print("Fitting the trustscores model...")
# Inform the user about progress.
# - Sarah Gillespie
# the y MUST be a numpy array. Convert it with np.array(y_penguins)
# - Sarah Gillespie
# this function to uses numpy arrays as its inputted data structure for both the x and y inputs.
# the outputted object is also a numpy array.
# - Sarah Gillespie
self.n_labels = np.max(y) + 1
# self.n_labels is defined as:
# the number of rows in the target numpy array plus 1
# should be an integer
# - Sarah Gillespie
self.kdtrees = [None] * self.n_labels
# [None] is a list with a "None" object in it,
# of type "NoneType".
# from this StackOverflow explanation:
# https://stackoverflow.com/questions/36928602/what-is-difference-between-none-and-in-python
# - Sarah Gillespie
# kdtrees is a list of many None objects.
# if kdtrees was 5, then kdtrees = [None] * 5
# which would calculate to be kdtrees = [None, None, None, None, None]
# - Sarah Gillespie
if self.filtering == "uncertainty":
X_filtered, y_filtered = self.filter_by_uncertainty(X, y)
for label in range(self.n_labels):
if self.filtering == "none":
X_to_use = X[np.where(y == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "density":
X_to_use = self.filter_by_density(X[np.where(y == label)[0]])
self.kdtrees[label] = KDTree(X_to_use)
elif self.filtering == "uncertainty":
X_to_use = X_filtered[np.where(y_filtered == label)[0]]
self.kdtrees[label] = KDTree(X_to_use)
if len(X_to_use) == 0:
print(
"Filtered too much or missing examples from a label! Please lower "
"alpha or check data."
)
print("Completed model fitting.")
# Inform the user about progress.
# - Sarah Gillespie
def get_score(self, X: np.array, y_pred: np.array):
"""Compute the trust scores.
Given a set of points, determines the distance to each class.
Args:
X: an array of sample points.
y_pred: The predicted labels for these points.
Returns:
The trust score, which is ratio of distance to closest class that was not
the predicted class to the distance to the predicted class.
"""
# the X argument should be a numpy array
# the y argument is not specified here. The example code had y_pred be a numpy array.
# - Sarah Gillespie
print("Calculating the trustscores...")
# Inform the user about progress.
# - Sarah Gillespie
d = np.tile(None, (X.shape[0], self.n_labels))
for label_idx in range(self.n_labels):
d[:, label_idx] = self.kdtrees[label_idx].query(X, k=2)[0][:, -1]
sorted_d = np.sort(d, axis=1)
d_to_pred = d[range(d.shape[0]), y_pred]
d_to_closest_not_pred = np.where(
sorted_d[:, 0] != d_to_pred, sorted_d[:, 0], sorted_d[:, 1]
)
print("Trustscores calculation completed.")
# Inform the user about progress.
# - Sarah Gillespie
return d_to_closest_not_pred / (d_to_pred + self.min_dist)
class KNNConfidence:
"""Baseline which uses disagreement to kNN classifier.
"""
def __init__(self, k=10):
self.k = k
def fit(self, X, y):
self.kdtree = KDTree(X)
self.y = y
def get_score(self, X, y_pred):
knn_idxs = self.kdtree.query(X, k=self.k)[1]
knn_outputs = self.y[knn_idxs]
return np.mean(
knn_outputs == np.transpose(np.tile(y_pred, (self.k, 1))), axis=1
)
|
{"hexsha": "2ff837d458a0a273bf6d983d4779a0ff4599b868", "size": 7834, "ext": "py", "lang": "Python", "max_stars_repo_path": "trustscore_annotated.py", "max_stars_repo_name": "SarahGillespie/R_trustscores", "max_stars_repo_head_hexsha": "8b08b7a4fbe684eabf88ddaff52a73e6a4c8bc3a", "max_stars_repo_licenses": ["Apache-2.0", "Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trustscore_annotated.py", "max_issues_repo_name": "SarahGillespie/R_trustscores", "max_issues_repo_head_hexsha": "8b08b7a4fbe684eabf88ddaff52a73e6a4c8bc3a", "max_issues_repo_licenses": ["Apache-2.0", "Unlicense"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2022-01-11T02:39:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T03:36:27.000Z", "max_forks_repo_path": "trustscore_annotated.py", "max_forks_repo_name": "SarahGillespie/R_trustscores", "max_forks_repo_head_hexsha": "8b08b7a4fbe684eabf88ddaff52a73e6a4c8bc3a", "max_forks_repo_licenses": ["Apache-2.0", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3668341709, "max_line_length": 104, "alphanum_fraction": 0.5342098545, "include": true, "reason": "import numpy", "num_tokens": 1673}
|
import ctypes.util
from ctypes import *
import networkx as nx
import numpy as np
import os
from .TACSim import node_edge_adjacency, normalized
__all__ = ['tacsim_in_C', 'tacsim_combined_in_C']
def find_clib():
# Find and load tacsim library
tacsimlib = ctypes.util.find_library('tacsim')
if not tacsimlib:
try:
install_lib_dir = os.getenv('LIBTACSIM_LIB_DIR', '/usr/local/lib/')
libc = ctypes.cdll.LoadLibrary(os.path.join(install_lib_dir, 'libtacsim.so'))
except:
raise RuntimeError("Can't find libtacsim. Please install it first.")
else:
libc = CDLL(tacsimlib, mode=ctypes.RTLD_GLOBAL)
return libc
def graph_properties(G, node_attribute='weight', edge_attribute='weight',
min_node_weight=1e-4, min_edge_weight=1e-4):
nodes = G.nodes()
edges = G.edges()
V = len(nodes)
E = len(edges)
nnadj = np.zeros((V, V), dtype=np.int)
nnadj.fill(-1)
node_weight_vec = np.ones(V, dtype=np.double)
edge_weight_vec = np.ones(E, dtype=np.double)
node_id_lookup_tbl = {}
for i, n in enumerate(nodes):
node_id_lookup_tbl[n] = i
nw = max(min_node_weight, G.node[n][node_attribute])
node_weight_vec[i] = nw
edges = [(node_id_lookup_tbl[e[0]], node_id_lookup_tbl[e[1]], e[2]) for e in G.edges(data=True)]
sorted(edges, key=lambda x: (x[0], x[1]))
for i in range(len(edges)):
src, dst, weight = edges[i]
nnadj[src][dst] = i
edge_weight_vec[i] = max(min_edge_weight, weight[edge_attribute])
return nnadj, node_weight_vec, edge_weight_vec, V, E
def matrix_to_cpointer(arr, shape, dtype=c_double):
row, col = shape
DTARR = dtype * row
PTR_DT = POINTER(dtype)
PTR_DTARR = PTR_DT * col
ptr = PTR_DTARR()
for i in range(row):
ptr[i] = DTARR()
for j in range(col):
ptr[i][j] = arr[i][j]
return ptr
def vector_to_cpointer(vec, vlen, dtype=c_double):
DTARR = dtype * vlen
ptr = DTARR()
for i in range(vlen):
ptr[i] = vec[i]
return ptr
def cpointer_to_matrix(ptr, shape):
mat = np.empty(shape)
for i in range(shape[0]):
for j in range(shape[1]):
mat[i][j] = ptr[i][j]
return mat
def cpointer_to_ndarray(ptr, size, dtype, shape):
""" Reverse of ndarray.ctypes.data_as. There are still
some problems to use this method.
"""
buf = np.core.multiarray.int_asbuffer(
ctypes.addressof(ptr.contents), np.dtype(dtype).itemsize * size)
arr = np.ndarray(shape, dtype=dtype, buffer=buf)
return arr
def tacsim_in_C(G1, G2=None, node_attribute='weight', edge_attribute='weight',
min_node_weight=1e-4, min_edge_weight=1e-4,
max_iter=100, eps=1e-4, tol=1e-6):
libc = find_clib()
nsim = POINTER(POINTER(c_double))()
esim = POINTER(POINTER(c_double))()
if G2 is None:
# Calculate self-similarity of an attributed graph
calculate_tacsim_self = libc.calculate_tacsim_self
calculate_tacsim_self.argtypes = [
POINTER(POINTER(c_int)),
POINTER(c_double),
POINTER(c_double), c_int, c_int,
POINTER(POINTER(POINTER(c_double))),
POINTER(POINTER(POINTER(c_double))),
c_int, c_double, c_double
]
calculate_tacsim_self.restype = c_int
# Convert graph attributes to ctypes
nnadj, nwgt, ewgt, nlen, elen = graph_properties(G1,
node_attribute, edge_attribute, min_node_weight,
min_edge_weight)
calculate_tacsim_self(
matrix_to_cpointer(nnadj, (nlen, nlen), dtype=c_int),
vector_to_cpointer(nwgt, nlen, dtype=c_double),
vector_to_cpointer(ewgt, elen, dtype=c_double),
c_int(nlen), c_int(elen),
byref(nsim), byref(esim),
c_int(max_iter), c_double(eps), c_double(tol)
)
nsim2 = cpointer_to_matrix(nsim, (nlen, nlen))
esim2 = cpointer_to_matrix(esim, (elen, elen))
else:
# Calculate similarity of two attributed graphs
calculate_tacsim = libc.calculate_tacsim
calculate_tacsim.argtypes = [
POINTER(POINTER(c_int)),
POINTER(c_double),
POINTER(c_double), c_int, c_int,
POINTER(POINTER(c_int)),
POINTER(c_double),
POINTER(c_double), c_int, c_int,
POINTER(POINTER(POINTER(c_double))),
POINTER(POINTER(POINTER(c_double))),
c_int, c_double, c_double
]
calculate_tacsim.restype = c_int
nnadj, nwgt, ewgt, nlen, elen = graph_properties(G1,
node_attribute, edge_attribute, min_node_weight,
min_edge_weight)
nnadj2, nwgt2, ewgt2, nlen2, elen2 = graph_properties(G2,
node_attribute, edge_attribute, min_node_weight,
min_edge_weight)
calculate_tacsim(
matrix_to_cpointer(nnadj, (nlen, nlen), dtype=c_int),
vector_to_cpointer(nwgt, nlen, dtype=c_double),
vector_to_cpointer(ewgt, elen, dtype=c_double),
c_int(nlen), c_int(elen),
matrix_to_cpointer(nnadj2, (nlen2, nlen2), dtype=c_int),
vector_to_cpointer(nwgt2, nlen2, dtype=c_double),
vector_to_cpointer(ewgt2, elen2, dtype=c_double),
c_int(nlen2), c_int(elen2),
byref(nsim), byref(esim),
c_int(max_iter), c_double(eps), c_double(tol)
)
nsim2 = cpointer_to_matrix(nsim, (nlen, nlen2))
esim2 = cpointer_to_matrix(esim, (elen, elen2))
return nsim2, esim2
def tacsim_combined_in_C(G1, G2=None, node_attribute='weight', edge_attribute='weight', lamb=0.5, norm=True):
""" Combined similarity based on original tacsim scores. Refer to paper Mesos.
"""
# X: node similarity; Y: edge similarity
X, Y = tacsim_in_C(G1, G2, node_attribute, edge_attribute)
As, At = node_edge_adjacency(G1)
if G2 is None:
Bs, Bt = As, At
else:
Bs, Bt = node_edge_adjacency(G2)
Z = Y + lamb * np.dot(np.dot(As.T, X), Bs) + (1 - lamb) * np.dot(np.dot(At.T, X), Bt)
if norm:
return normalized(Z)
else:
return Z
if __name__ == '__main__':
G1 = nx.DiGraph()
G1.add_weighted_edges_from([(1, 0, 8), (0, 2, 12), (1, 2, 10), (2, 3, 15)])
G1.node[0]['weight'] = 1
G1.node[1]['weight'] = 1
G1.node[2]['weight'] = 5
G1.node[3]['weight'] = 1
G2 = nx.DiGraph()
G2.add_weighted_edges_from([(0, 1, 15), (1, 2, 10)])
G2.node[0]['weight'] = 1
G2.node[1]['weight'] = 3
G2.node[2]['weight'] = 1
print(tacsim_in_C(G1, G2))
print(tacsim_in_C(G1))
print(tacsim_combined_in_C(G1, G2))
|
{"hexsha": "6eb2651586188c2cb7163e17ca088f8ceabce04f", "size": 7083, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphsim/iter/TACSim_in_C.py", "max_stars_repo_name": "vishalbelsare/graphsim", "max_stars_repo_head_hexsha": "1ecd23608fe562d5f363cae2323c1916e82ba4e9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 153, "max_stars_repo_stars_event_min_datetime": "2015-11-04T15:37:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T14:15:58.000Z", "max_issues_repo_path": "graphsim/iter/TACSim_in_C.py", "max_issues_repo_name": "syyunn/graphsim", "max_issues_repo_head_hexsha": "1ecd23608fe562d5f363cae2323c1916e82ba4e9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-03-18T03:31:24.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-06T17:34:01.000Z", "max_forks_repo_path": "graphsim/iter/TACSim_in_C.py", "max_forks_repo_name": "syyunn/graphsim", "max_forks_repo_head_hexsha": "1ecd23608fe562d5f363cae2323c1916e82ba4e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2016-02-26T16:23:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-17T08:57:22.000Z", "avg_line_length": 32.9441860465, "max_line_length": 110, "alphanum_fraction": 0.5887335875, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1921}
|
# coding: utf8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import numpy as np
import math
import cv2
import argparse
from paddlex.seg import transforms
import paddlex as pdx
METER_SHAPE = 512
CIRCLE_CENTER = [256, 256]
CIRCLE_RADIUS = 250
PI = 3.1415926536
LINE_HEIGHT = 120
LINE_WIDTH = 1570
TYPE_THRESHOLD = 40
METER_CONFIG = [{
'scale_value': 25.0 / 50.0,
'range': 25.0,
'unit': "(MPa)"
}, {
'scale_value': 1.6 / 32.0,
'range': 1.6,
'unit': "(MPa)"
}]
def parse_args():
parser = argparse.ArgumentParser(description='Meter Reader Infering')
parser.add_argument(
'--detector_dir',
dest='detector_dir',
help='The directory of models to do detection',
type=str)
parser.add_argument(
'--segmenter_dir',
dest='segmenter_dir',
help='The directory of models to do segmentation',
type=str)
parser.add_argument(
'--image_dir',
dest='image_dir',
help='The directory of images to be infered',
type=str,
default=None)
parser.add_argument(
'--image',
dest='image',
help='The image to be infered',
type=str,
default=None)
parser.add_argument(
'--use_camera',
dest='use_camera',
help='Whether use camera or not',
action='store_true')
parser.add_argument(
'--camera_id',
dest='camera_id',
type=int,
help='The camera id',
default=0)
parser.add_argument(
'--use_erode',
dest='use_erode',
help='Whether erode the predicted lable map',
action='store_true')
parser.add_argument(
'--erode_kernel',
dest='erode_kernel',
help='Erode kernel size',
type=int,
default=4)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the inference results',
type=str,
default='./output/result')
parser.add_argument(
'--score_threshold',
dest='score_threshold',
help="Detected bbox whose score is lower than this threshlod is filtered",
type=float,
default=0.5)
parser.add_argument(
'--seg_batch_size',
dest='seg_batch_size',
help="Segmentation batch size",
type=int,
default=2)
return parser.parse_args()
def is_pic(img_name):
valid_suffix = ['JPEG', 'jpeg', 'JPG', 'jpg', 'BMP', 'bmp', 'PNG', 'png']
suffix = img_name.split('.')[-1]
if suffix not in valid_suffix:
return False
return True
class MeterReader:
def __init__(self, detector_dir, segmenter_dir):
if not osp.exists(detector_dir):
raise Exception("Model path {} does not exist".format(
detector_dir))
if not osp.exists(segmenter_dir):
raise Exception("Model path {} does not exist".format(
segmenter_dir))
self.detector = pdx.load_model(detector_dir)
self.segmenter = pdx.load_model(segmenter_dir)
# Because we will resize images with (METER_SHAPE, METER_SHAPE) before fed into the segmenter,
# here the transform is composed of normalization only.
self.seg_transforms = transforms.Compose([transforms.Normalize()])
def predict(self,
im_file,
save_dir='./',
use_erode=True,
erode_kernel=4,
score_threshold=0.5,
seg_batch_size=2):
if isinstance(im_file, str):
im = cv2.imread(im_file).astype('float32')
else:
im = im_file.copy()
# Get detection results
det_results = self.detector.predict(im)
# Filter bbox whose score is lower than score_threshold
filtered_results = list()
for res in det_results:
if res['score'] > score_threshold:
filtered_results.append(res)
resized_meters = list()
for res in filtered_results:
# Crop the bbox area
xmin, ymin, w, h = res['bbox']
xmin = max(0, int(xmin))
ymin = max(0, int(ymin))
xmax = min(im.shape[1], int(xmin + w - 1))
ymax = min(im.shape[0], int(ymin + h - 1))
sub_image = im[ymin:(ymax + 1), xmin:(xmax + 1), :]
# Resize the image with shape (METER_SHAPE, METER_SHAPE)
meter_shape = sub_image.shape
scale_x = float(METER_SHAPE) / float(meter_shape[1])
scale_y = float(METER_SHAPE) / float(meter_shape[0])
meter_meter = cv2.resize(
sub_image,
None,
None,
fx=scale_x,
fy=scale_y,
interpolation=cv2.INTER_LINEAR)
meter_meter = meter_meter.astype('float32')
resized_meters.append(meter_meter)
meter_num = len(resized_meters)
seg_results = list()
for i in range(0, meter_num, seg_batch_size):
im_size = min(meter_num, i + seg_batch_size)
meter_images = list()
for j in range(i, im_size):
meter_images.append(resized_meters[j - i])
result = self.segmenter.batch_predict(
transforms=self.seg_transforms,
img_file_list=meter_images)
if use_erode:
kernel = np.ones((erode_kernel, erode_kernel), np.uint8)
for i in range(len(result)):
result[i]['label_map'] = cv2.erode(result[i]['label_map'],
kernel)
seg_results.extend(result)
results = list()
for i, seg_result in enumerate(seg_results):
result = self.read_process(seg_result['label_map'])
results.append(result)
meter_values = list()
for i, result in enumerate(results):
if result['scale_num'] > TYPE_THRESHOLD:
value = result['scales'] * METER_CONFIG[0]['scale_value']
else:
value = result['scales'] * METER_CONFIG[1]['scale_value']
meter_values.append(value)
print("-- Meter {} -- result: {} --\n".format(i, value))
# visualize the results
visual_results = list()
for i, res in enumerate(filtered_results):
# Use `score` to represent the meter value
res['score'] = meter_values[i]
visual_results.append(res)
pdx.det.visualize(im_file, visual_results, -1, save_dir=save_dir)
def read_process(self, label_maps):
# Convert the circular meter into rectangular meter
line_images = self.creat_line_image(label_maps)
# Convert the 2d meter into 1d meter
scale_data, pointer_data = self.convert_1d_data(line_images)
# Fliter scale data whose value is lower than the mean value
self.scale_mean_filtration(scale_data)
# Get scale_num, scales and ratio of meters
result = self.get_meter_reader(scale_data, pointer_data)
return result
def creat_line_image(self, meter_image):
line_image = np.zeros((LINE_HEIGHT, LINE_WIDTH), dtype=np.uint8)
for row in range(LINE_HEIGHT):
for col in range(LINE_WIDTH):
theta = PI * 2 / LINE_WIDTH * (col + 1)
rho = CIRCLE_RADIUS - row - 1
x = int(CIRCLE_CENTER[0] + rho * math.cos(theta) + 0.5)
y = int(CIRCLE_CENTER[1] - rho * math.sin(theta) + 0.5)
line_image[row, col] = meter_image[x, y]
return line_image
def convert_1d_data(self, meter_image):
scale_data = np.zeros((LINE_WIDTH), dtype=np.uint8)
pointer_data = np.zeros((LINE_WIDTH), dtype=np.uint8)
for col in range(LINE_WIDTH):
for row in range(LINE_HEIGHT):
if meter_image[row, col] == 1:
pointer_data[col] += 1
elif meter_image[row, col] == 2:
scale_data[col] += 1
return scale_data, pointer_data
def scale_mean_filtration(self, scale_data):
mean_data = np.mean(scale_data)
for col in range(LINE_WIDTH):
if scale_data[col] < mean_data:
scale_data[col] = 0
def get_meter_reader(self, scale_data, pointer_data):
scale_flag = False
pointer_flag = False
one_scale_start = 0
one_scale_end = 0
one_pointer_start = 0
one_pointer_end = 0
scale_location = list()
pointer_location = 0
for i in range(LINE_WIDTH - 1):
if scale_data[i] > 0 and scale_data[i + 1] > 0:
if scale_flag == False:
one_scale_start = i
scale_flag = True
if scale_flag:
if scale_data[i] == 0 and scale_data[i + 1] == 0:
one_scale_end = i - 1
one_scale_location = (one_scale_start + one_scale_end) / 2
scale_location.append(one_scale_location)
one_scale_start = 0
one_scale_end = 0
scale_flag = False
if pointer_data[i] > 0 and pointer_data[i + 1] > 0:
if pointer_flag == False:
one_pointer_start = i
pointer_flag = True
if pointer_flag:
if pointer_data[i] == 0 and pointer_data[i + 1] == 0:
one_pointer_end = i - 1
pointer_location = (
one_pointer_start + one_pointer_end) / 2
one_pointer_start = 0
one_pointer_end = 0
pointer_flag = False
scale_num = len(scale_location)
scales = -1
ratio = -1
if scale_num > 0:
for i in range(scale_num - 1):
if scale_location[
i] <= pointer_location and pointer_location < scale_location[
i + 1]:
scales = i + (pointer_location - scale_location[i]) / (
scale_location[i + 1] - scale_location[i] + 1e-05) + 1
ratio = (pointer_location - scale_location[0]) / (
scale_location[scale_num - 1] - scale_location[0] + 1e-05)
result = {'scale_num': scale_num, 'scales': scales, 'ratio': ratio}
return result
def infer(args):
image_lists = list()
if args.image is not None:
if not osp.exists(args.image):
raise Exception("Image {} does not exist.".format(args.image))
if not is_pic(args.image):
raise Exception("{} is not a picture.".format(args.image))
image_lists.append(args.image)
elif args.image_dir is not None:
if not osp.exists(args.image_dir):
raise Exception("Directory {} does not exist.".format(
args.image_dir))
for im_file in os.listdir(args.image_dir):
if not is_pic(im_file):
continue
im_file = osp.join(args.image_dir, im_file)
image_lists.append(im_file)
meter_reader = MeterReader(args.detector_dir, args.segmenter_dir)
if len(image_lists) > 0:
for im_file in image_lists:
meter_reader.predict(im_file, args.save_dir, args.use_erode,
args.erode_kernel, args.score_threshold,
args.seg_batch_size)
elif args.use_camera:
cap_video = cv2.VideoCapture(args.camera_id)
if not cap_video.isOpened():
raise Exception(
"Error opening video stream, please make sure the camera is working"
)
while cap_video.isOpened():
ret, frame = cap_video.read()
if ret:
meter_reader.predict(frame, args.save_dir, args.use_erode,
args.erode_kernel, args.score_threshold,
args.seg_batch_size)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap_video.release()
if __name__ == '__main__':
args = parse_args()
infer(args)
|
{"hexsha": "58108bbacfc02422bd9e80dd93d53252694cec1e", "size": 12916, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/meter_reader/reader_infer.py", "max_stars_repo_name": "yaoshanliang/PaddleX", "max_stars_repo_head_hexsha": "fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-30T08:47:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-06T07:38:03.000Z", "max_issues_repo_path": "examples/meter_reader/reader_infer.py", "max_issues_repo_name": "yaoshanliang/PaddleX", "max_issues_repo_head_hexsha": "fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/meter_reader/reader_infer.py", "max_forks_repo_name": "yaoshanliang/PaddleX", "max_forks_repo_head_hexsha": "fe40b6d10db0e4d46f3a73cc5e83c3236d6a5842", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5892351275, "max_line_length": 102, "alphanum_fraction": 0.5679777021, "include": true, "reason": "import numpy", "num_tokens": 2894}
|
"""
Test of integrating torch Conv2d and LSTM modules
"""
from numbers import Number
import numpy as np
import torch
import torch.nn as nn
if __name__ == "__main__":
MAX_LENGTH = 100
MIN_LENGTH = 10
NUM_SAMPLES = 45
CHANNELS = 3
WIDTH = 128
HEIGHT = 128
HIDDEN_SIZE = 32
HIDDEN_LAYERS = 2
CONV_FILTERS = [
(8, (5, 5), 2),
(8, (5, 5), 2),
(8, (5, 5), 2)
]
# Generate random sequence data
samples = []
for _ in range(NUM_SAMPLES):
seq_len = np.random.randint(MIN_LENGTH, MAX_LENGTH)
seq = np.random.uniform(0.0, 0.1, (seq_len, CHANNELS, WIDTH, HEIGHT))
samples.append(seq)
# Construct model
input_channels = CHANNELS
image_shape = [WIDTH, HEIGHT]
layers = []
for l, (channels, kernel, stride) in enumerate(CONV_FILTERS):
kernel = list(kernel)
padding = []
for d in range(len(kernel)):
kernel[d] += (kernel[d] + 1) % 2
padding.append(kernel[d] // 2)
layers.append(nn.Conv2d(
input_channels,
channels,
kernel,
stride=stride,
padding=padding
))
input_channels = channels
if isinstance(stride, Number):
stride = [stride] * len(image_shape)
for d, s in enumerate(stride):
image_shape[d] = -(-image_shape[d] // s)
print(f"Conv2D: predicted output shape: {image_shape}")
if l < len(CONV_FILTERS) - 1:
layers.append(nn.ReLU())
conv = nn.Sequential(*layers)
num_features = int(input_channels * np.prod(image_shape))
lstm = nn.LSTM(num_features, HIDDEN_SIZE, HIDDEN_LAYERS)
# Process sequences
seq_lens = [sample.shape[0] for sample in samples]
samples = [torch.as_tensor(sample, dtype=torch.float32) for sample in samples]
samples = nn.utils.rnn.pad_sequence(samples)
print(f"\nPadded sequence shape: {samples.shape}")
samples = nn.utils.rnn.pack_padded_sequence(samples, seq_lens, enforce_sorted=False)
print(f"\nPacked sequence type: {samples.data.shape}")
print(f"packed type: {type(samples)}")
data = conv(samples.data)
print(f"\nConv output shape: {data.shape}")
data = torch.flatten(data, start_dim=1)
samples = nn.utils.rnn.PackedSequence(data, samples.batch_sizes, samples.sorted_indices, samples.unsorted_indices)
print(f"flattened shape: {samples.data.shape}")
hidden_shape = (HIDDEN_LAYERS, NUM_SAMPLES, HIDDEN_SIZE)
state = (torch.zeros(hidden_shape, dtype=torch.float32), torch.zeros(hidden_shape, dtype=torch.float32))
output, (hidden, cell) = lstm(samples, state)
print(f"\nLSTM output shape: {output.data.shape}")
print(f"hidden shape: {hidden.shape}")
print(f"cell shape: {cell.shape}")
output, _ = nn.utils.rnn.pad_packed_sequence(output)
print(f"\nFinal output shape: {output.shape}")
|
{"hexsha": "207c650667b984ea44940e465f0f1eab71467767", "size": 2956, "ext": "py", "lang": "Python", "max_stars_repo_path": "junk/conv_lstm_test.py", "max_stars_repo_name": "oliehoek-research/interactive_agents", "max_stars_repo_head_hexsha": "fddf99fed8e6aaf213c658897c2e232fe5323053", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "junk/conv_lstm_test.py", "max_issues_repo_name": "oliehoek-research/interactive_agents", "max_issues_repo_head_hexsha": "fddf99fed8e6aaf213c658897c2e232fe5323053", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2022-03-11T07:58:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T12:57:26.000Z", "max_forks_repo_path": "junk/conv_lstm_test.py", "max_forks_repo_name": "oliehoek-research/interactive_agents", "max_forks_repo_head_hexsha": "fddf99fed8e6aaf213c658897c2e232fe5323053", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-11T19:28:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T19:28:53.000Z", "avg_line_length": 30.4742268041, "max_line_length": 118, "alphanum_fraction": 0.6255074425, "include": true, "reason": "import numpy", "num_tokens": 758}
|
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2017 Viktor Csomor <viktor.csomor@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include "main.h"
#include <Eigen/CXX11/Tensor>
#include <utility>
using Eigen::Tensor;
using Eigen::RowMajor;
static void calc_indices(int i, int& x, int& y, int& z)
{
x = i / 4;
y = (i % 4) / 2;
z = i % 2;
}
static void test_move()
{
int x;
int y;
int z;
Tensor<int,3> tensor1(2, 2, 2);
Tensor<int,3,RowMajor> tensor2(2, 2, 2);
for (int i = 0; i < 8; i++)
{
calc_indices(i, x, y, z);
tensor1(x,y,z) = i;
tensor2(x,y,z) = 2 * i;
}
// Invokes the move constructor.
Tensor<int,3> moved_tensor1 = std::move(tensor1);
Tensor<int,3,RowMajor> moved_tensor2 = std::move(tensor2);
VERIFY_IS_EQUAL(tensor1.size(), 0);
VERIFY_IS_EQUAL(tensor2.size(), 0);
for (int i = 0; i < 8; i++)
{
calc_indices(i, x, y, z);
VERIFY_IS_EQUAL(moved_tensor1(x,y,z), i);
VERIFY_IS_EQUAL(moved_tensor2(x,y,z), 2 * i);
}
Tensor<int,3> moved_tensor3(2,2,2);
Tensor<int,3,RowMajor> moved_tensor4(2,2,2);
moved_tensor3.setZero();
moved_tensor4.setZero();
// Invokes the move assignment operator.
moved_tensor3 = std::move(moved_tensor1);
moved_tensor4 = std::move(moved_tensor2);
VERIFY_IS_EQUAL(moved_tensor1.size(), 8);
VERIFY_IS_EQUAL(moved_tensor2.size(), 8);
for (int i = 0; i < 8; i++)
{
calc_indices(i, x, y, z);
VERIFY_IS_EQUAL(moved_tensor1(x,y,z), 0);
VERIFY_IS_EQUAL(moved_tensor2(x,y,z), 0);
VERIFY_IS_EQUAL(moved_tensor3(x,y,z), i);
VERIFY_IS_EQUAL(moved_tensor4(x,y,z), 2 * i);
}
}
EIGEN_DECLARE_TEST(cxx11_tensor_move)
{
CALL_SUBTEST(test_move());
}
|
{"hexsha": "0ab2b778662c5bfb648599d9883f72e8b75cd647", "size": 1915, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "third_party/eigen3/include/unsupported/test/cxx11_tensor_move.cpp", "max_stars_repo_name": "Shamraev/motion_imitation", "max_stars_repo_head_hexsha": "9b9166436e4996e2a03b36d19f4f5422cde9c21e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2111.0, "max_stars_repo_stars_event_min_datetime": "2019-01-29T07:01:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T06:48:14.000Z", "max_issues_repo_path": "third_party/eigen3/include/unsupported/test/cxx11_tensor_move.cpp", "max_issues_repo_name": "Shamraev/motion_imitation", "max_issues_repo_head_hexsha": "9b9166436e4996e2a03b36d19f4f5422cde9c21e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 131.0, "max_issues_repo_issues_event_min_datetime": "2019-02-18T10:56:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-27T12:07:00.000Z", "max_forks_repo_path": "third_party/eigen3/include/unsupported/test/cxx11_tensor_move.cpp", "max_forks_repo_name": "Shamraev/motion_imitation", "max_forks_repo_head_hexsha": "9b9166436e4996e2a03b36d19f4f5422cde9c21e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 421.0, "max_forks_repo_forks_event_min_datetime": "2019-02-12T07:59:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T05:22:01.000Z", "avg_line_length": 23.3536585366, "max_line_length": 69, "alphanum_fraction": 0.6501305483, "num_tokens": 635}
|
#%%
import numpy as np
def VAR(r,alpha):
return -np.quantile(r,alpha)
def CVAR(r,alpha):
return -np.mean(r[r <= np.quantile(r,alpha)])
|
{"hexsha": "337c2eb4f646a5f8e5adff635383500432a36526", "size": 151, "ext": "py", "lang": "Python", "max_stars_repo_path": "value_at_risk/functions.py", "max_stars_repo_name": "dylan-lee94/statistics", "max_stars_repo_head_hexsha": "0808c7e86ca752774edbbe3bc504d8338cc5f2ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-13T14:53:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T14:53:36.000Z", "max_issues_repo_path": "value_at_risk/functions.py", "max_issues_repo_name": "dylan-lee94/statistics", "max_issues_repo_head_hexsha": "0808c7e86ca752774edbbe3bc504d8338cc5f2ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "value_at_risk/functions.py", "max_forks_repo_name": "dylan-lee94/statistics", "max_forks_repo_head_hexsha": "0808c7e86ca752774edbbe3bc504d8338cc5f2ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.875, "max_line_length": 50, "alphanum_fraction": 0.6092715232, "include": true, "reason": "import numpy", "num_tokens": 42}
|
import os
import numpy as np
import Bio.PDB as PDB
from .utilities.metric import get_residues_nearby
from .FileNormalizer import FileNormalizer
from .FileNormalizer import UpdatePDBNormalizer
class LoopFileNormalizer(FileNormalizer):
'''LoopFileNormalizer creates Rosetta loop files based on the
candidate_loop_list of structures. Also create a pymol script
file for selecting these loops.
'''
def __init__(self):
pass
def normalize_one_file(self, loop_path, script_path, candidate_loop_list):
with open(loop_path, 'w') as f:
for loop in candidate_loop_list:
f.write('LOOP {0} {1} {2} 0 1\n'.format(loop.begin, loop.end, loop.end))
cmd = 'select loops,'
for loop in candidate_loop_list:
cmd += ' res {0}-{1} and chain {2}'.format(loop.begin, loop.end, loop.chain)
cmd += '\n'
cmd += 'hide all\n'
cmd += 'show cartoon\n'
cmd += 'color magenta, loops and name c*\n'
with open(script_path, 'w') as f:
f.write(cmd)
def normalize_adjacent_loop_pairs(self, loop_path, adjacent_loop_pair_set):
with open(loop_path, 'w') as f:
for lp in adjacent_loop_pair_set:
f.write('LOOP {0} {1} {2} 0 1\nLOOP {3} {4} {5} 0 1\n\n'.format(lp[0].begin,
lp[0].end, lp[0].end, lp[1].begin, lp[1].end, lp[1].end))
def apply(self, info_dict):
for structure_dict in info_dict['candidate_list']:
d = os.path.dirname(structure_dict['path'])
if 'candidate_loop_list' in structure_dict.keys():
nl = '.'.join([structure_dict['name'], 'loop'])
ns = structure_dict['name'] + '_select_loop.pml'
self.normalize_one_file(os.path.join(d, nl), os.path.join(d, ns),
structure_dict['candidate_loop_list'])
if 'adjacent_loop_pair_set' in structure_dict.keys():
nlp = structure_dict['name'] + '_adjacent_pairs.loop'
self.normalize_adjacent_loop_pairs(os.path.join(d, nlp), structure_dict['adjacent_loop_pair_set'])
class LoopTrimNormalizer(UpdatePDBNormalizer):
'''LoopTrimNormalizer creates a new PDB file whose loop is replaced by a
straight line and the side chains of residues within a cutoff from
the loop are trimed.
'''
def __init__(self, cutoff):
self.cutoff = cutoff
def trim_one_residue(self, residue, atom_list):
new_res = PDB.Residue.Residue(residue.get_id(), residue.get_resname(), residue.get_segid())
for keep_atom in atom_list:
if keep_atom in residue:
new_res.add(residue[keep_atom])
chain = residue.get_parent()
chain.detach_child(residue.get_id())
chain.add(new_res)
def get_orthogonal_vector(self, vect):
'''Get an normalized othorgonal vector the the given vector.'''
oth_vect = np.cross(vect, np.array([1, 0, 0]))
if 0 == np.dot(oth_vect, oth_vect):
oth_vect = np.cross(vect, np.array([0, 1, 0]))
return oth_vect / np.linalg.norm(oth_vect)
def straightify_loop(self, structure, loop):
line_begin = structure[loop.model][loop.chain][loop.begin]['CA'].coord
line_end = structure[loop.model][loop.chain][loop.end]['CA'].coord
line_vect = line_end - line_begin
seg_vect = line_vect / (loop.end - loop.begin)
oth_vect = self.get_orthogonal_vector(line_vect)
structure[loop.model][loop.chain][loop.begin]['C'].coord = line_begin \
+ 1.0/3 * seg_vect
for seqpos in range(loop.begin + 1, loop.end):
structure[loop.model][loop.chain][seqpos]['CA'].coord = line_begin \
+ (seqpos - loop.begin) * seg_vect
structure[loop.model][loop.chain][seqpos]['C'].coord = line_begin \
+ (seqpos - loop.begin + 1.0/3 ) * seg_vect + 0.5 * oth_vect
structure[loop.model][loop.chain][seqpos]['N'].coord = line_begin \
+ (seqpos - loop.begin - 1.0/3 ) * seg_vect - oth_vect
structure[loop.model][loop.chain][loop.end]['N'].coord = line_end \
- 1.0/3 * seg_vect
def normalize_one_loop(self, structure, loop):
loop_residues = [structure[loop.model][loop.chain][seqpos] for seqpos in range(loop.begin, loop.end + 1)]
# Remove the side chains of residues within self.cutoff
nearby_residues = get_residues_nearby(loop_residues, structure, self.cutoff)
for res in nearby_residues:
self.trim_one_residue(res, ['CA', 'C', 'N', 'O', 'H'])
# Only keep the mainchain atoms of the loop
for res in loop_residues:
self.trim_one_residue(res, ['CA', 'C', 'N'])
# Make the loop a straight line
self.straightify_loop(structure, loop)
def apply(self, info_dict):
parser = PDB.PDBParser()
io = PDB.PDBIO()
for structure_dict in info_dict['candidate_list']:
structure = parser.get_structure('', structure_dict['path'])
for loop in structure_dict['candidate_loop_list']:
self.normalize_one_loop(structure, loop)
io.set_structure(structure)
tmp_pdb = os.path.join(os.path.dirname(structure_dict['path']), structure_dict['name'] + '_trimed.pdb')
io.save(tmp_pdb)
self.update_pdb(tmp_pdb,structure_dict['path'])
|
{"hexsha": "4aa104981ac71f4809bb83fab40678856ccdba45", "size": 5162, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark_constructor/file_normalizers/LoopFileNormalizer.py", "max_stars_repo_name": "Kortemme-Lab/benchmark_set_construct", "max_stars_repo_head_hexsha": "ee6c9e097ff49d370936b41f102ada006fb4441a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "benchmark_constructor/file_normalizers/LoopFileNormalizer.py", "max_issues_repo_name": "Kortemme-Lab/benchmark_set_construct", "max_issues_repo_head_hexsha": "ee6c9e097ff49d370936b41f102ada006fb4441a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark_constructor/file_normalizers/LoopFileNormalizer.py", "max_forks_repo_name": "Kortemme-Lab/benchmark_set_construct", "max_forks_repo_head_hexsha": "ee6c9e097ff49d370936b41f102ada006fb4441a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1366906475, "max_line_length": 109, "alphanum_fraction": 0.6582719876, "include": true, "reason": "import numpy", "num_tokens": 1385}
|
module Structure.Operator.Field.VectorSpace where
import Lvl
open import Structure.Setoid
open import Structure.Operator.Field
open import Structure.Operator.Properties using (associativity ; identityₗ ; distributivityᵣ)
open import Structure.Operator.Vector
open import Structure.Operator
open import Type
private variable ℓ ℓₑ ℓₗ ℓₗₑ : Lvl.Level
private variable T : Type{ℓ}
private variable _+_ _⋅_ : T → T → T
module _
⦃ equiv : Equiv{ℓₑ}(T) ⦄
⦃ [+]-oper : BinaryOperator(_+_) ⦄
⦃ [⋅]-oper : BinaryOperator(_⋅_) ⦄
(field-structure : Field{T = T}(_+_)(_⋅_))
where
open Field(field-structure)
fieldVectorSpace : VectorSpace(_+_)(_⋅_)(_+_)(_⋅_)
VectorSpace.scalarField fieldVectorSpace = field-structure
VectorSpace.[⋅ₛᵥ]-binaryOperator fieldVectorSpace = [⋅]-oper
VectorSpace.[⋅ₛ][⋅ₛᵥ]-compatibility fieldVectorSpace = associativity(_⋅_)
VectorSpace.[⋅ₛᵥ][+ᵥ]-distributivityₗ fieldVectorSpace = [⋅][+]-distributivityₗ
VectorSpace.[⋅ₛᵥ][+ₛ][+ᵥ]-distributivityᵣ fieldVectorSpace = distributivityᵣ(_⋅_)(_+_)
|
{"hexsha": "74210b12b4aec24d32a9c3bd9163a46cd6d49820", "size": 1077, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Structure/Operator/Field/VectorSpace.agda", "max_stars_repo_name": "Lolirofle/stuff-in-agda", "max_stars_repo_head_hexsha": "70f4fba849f2fd779c5aaa5af122ccb6a5b271ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-07T17:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T06:53:22.000Z", "max_issues_repo_path": "Structure/Operator/Field/VectorSpace.agda", "max_issues_repo_name": "Lolirofle/stuff-in-agda", "max_issues_repo_head_hexsha": "70f4fba849f2fd779c5aaa5af122ccb6a5b271ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Structure/Operator/Field/VectorSpace.agda", "max_forks_repo_name": "Lolirofle/stuff-in-agda", "max_forks_repo_head_hexsha": "70f4fba849f2fd779c5aaa5af122ccb6a5b271ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1379310345, "max_line_length": 93, "alphanum_fraction": 0.7121634169, "num_tokens": 384}
|
import copy
import numpy as np
import opytimizer.math.random as r
import opytimizer.utils.history as h
import opytimizer.utils.logging as l
from opytimizer.core.optimizer import Optimizer
logger = l.get_logger(__name__)
class ABC(Optimizer):
"""An ABC class, inherited from Optimizer.
This will be the designed class to define ABC-related
variables and methods.
References:
D. Karaboga and B. Basturk. A powerful and efficient algorithm for numerical function optimization: Artificial bee colony (ABC) algorithm. Journal of Global Optimization (2007).
"""
def __init__(self, algorithm='ABC', hyperparams=None):
"""Initialization method.
Args:
algorithm (str): A string holding optimizer's algorithm name.
hyperparams (dict): An hyperparams dictionary containing key-value
parameters to meta-heuristics.
"""
logger.info('Overriding class: Optimizer -> ABC.')
# Override its parent class with the receiving hyperparams
super(ABC, self).__init__(algorithm=algorithm)
# Number of trial limits
self._n_trials = 10
# Now, we need to build this class up
self._build(hyperparams)
logger.info('Class overrided.')
@property
def n_trials(self):
"""int: Number of trial limits.
"""
return self._n_trials
@n_trials.setter
def n_trials(self, n_trials):
self._n_trials = n_trials
def _build(self, hyperparams):
"""This method will serve as the object building process.
One can define several commands here that does not necessarily
needs to be on its initialization.
Args:
hyperparams (dict): An hyperparams dictionary containing key-value
parameters to meta-heuristics.
"""
logger.debug('Running private method: build().')
# We need to save the hyperparams object for faster looking up
self.hyperparams = hyperparams
# If one can find any hyperparam inside its object,
# set them as the ones that will be used
if hyperparams:
if 'n_trials' in hyperparams:
self.n_trials = hyperparams['n_trials']
# Set built variable to 'True'
self.built = True
# Logging attributes
logger.debug(
f'Algorithm: {self.algorithm} | Hyperparameters: n_trials = {self.n_trials}.')
def _evaluate_location(self, agent, neighbour, function, trial):
"""Evaluates a food source location and update its value if possible.
Args:
agent (Agent): An agent.
neighbour (Agent): A neightbour agent.
function (Function): A function object.
trial (int): A trial counter.
Returns:
The number of trials for the current food source.
"""
# Generates an uniform random number
r1 = r.generate_uniform_random_number(-1, 1)
# Copies actual food source location
a = copy.deepcopy(agent)
# Change its location according to equation 2.2
a.position = agent.position + \
(agent.position - neighbour.position) * r1
# Evaluating its fitness
a.fit = function.pointer(a.position)
# Check if fitness is improved
if a.fit < agent.fit:
# If yes, reset the number of trials for this particular food source
trial = 0
# Copies the new position
agent.position = copy.deepcopy(a.position)
# And also the new fitness
agent.fit = copy.deepcopy(a.fit)
# If not
else:
# We increse the trials counter
trial += 1
return trial
def _send_employee(self, agents, function, trials):
"""Sends employee bees onto food source to evaluate its nectar.
Args:
agents (list): List of agents.
function (Function): A function object.
trials (np.array): Array of trials counter.
"""
# Iterate through all food sources
for i, agent in enumerate(agents):
# Gathering a random source to be used
source = int(r.generate_uniform_random_number(0, len(agents)))
# Measuring food source location
trials[i] = self._evaluate_location(
agent, agents[source], function, trials[i])
def _send_onlooker(self, agents, function, trials):
"""Sends onlooker bees to select new food sources.
Args:
agents (list): List of agents.
function (Function): A function object.
trials (np.array): Array of trials counter.
"""
# Calculating the fitness somatory
total = sum(agent.fit for agent in agents)
# Defining food sources' counter
k = 0
# While counter is less than the amount of food sources
while k < len(agents):
# We iterate through every agent
for i, agent in enumerate(agents):
# Creates a random uniform number
r1 = r.generate_uniform_random_number(0, 1)
# Calculates the food source's probability
probs = (agent.fit / (total + 1e-10)) + 0.1
# If the random number is smaller than food source's probability
if r1 < probs:
# We need to increment the counter
k += 1
# Gathers a random source to be used
source = int(
r.generate_uniform_random_number(0, len(agents)))
# Evaluate its location
trials[i] = self._evaluate_location(
agent, agents[source], function, trials[i])
def _send_scout(self, agents, function, trials):
"""Sends scout bees to scout for new possible food sources.
Args:
agents (list): List of agents.
function (Function): A function object.
trials (np.array): Array of trials counter.
"""
# Calculating the maximum trial counter value and index
max_trial, max_index = np.max(trials), np.argmax(trials)
# If maximum trial is bigger than number of possible trials
if max_trial > self.n_trials:
# Resets the trial counter
trials[max_index] = 0
# Copies the current agent
a = copy.deepcopy(agents[max_index])
# Updates its position with a random shakeness
a.position += r.generate_uniform_random_number(-1, 1)
# Recalculates its fitness
a.fit = function.pointer(a.position)
# If fitness is better
if a.fit < agents[max_index].fit:
# We copy the temporary agent to the current one
agents[max_index] = copy.deepcopy(a)
def _update(self, agents, function, trials):
"""Method that wraps the update pipeline over all agents and variables.
Args:
agents (list): List of agents.
function (Function): A function object.
trials (np.array): Array of trials counter.
"""
# Sending employee bees step
self._send_employee(agents, function, trials)
# Sending onlooker bees step
self._send_onlooker(agents, function, trials)
# Sending scout bees step
self._send_scout(agents, function, trials)
def run(self, space, function):
"""Runs the optimization pipeline.
Args:
space (Space): A Space object that will be evaluated.
function (Function): A Function object that will be used as the objective function.
Returns:
A History object holding all agents' positions and fitness achieved during the task.
"""
# Instanciating array of trials counter
trials = np.zeros(space.n_agents)
# Initial search space evaluation
self._evaluate(space, function)
# We will define a History object for further dumping
history = h.History()
# These are the number of iterations to converge
for t in range(space.n_iterations):
logger.info(f'Iteration {t+1}/{space.n_iterations}')
# Updating agents
self._update(space.agents, function, trials)
# Checking if agents meets the bounds limits
space.check_bound_limits(space.agents, space.lb, space.ub)
# After the update, we need to re-evaluate the search space
self._evaluate(space, function)
# Every iteration, we need to dump the current space agents
history.dump(space.agents, space.best_agent)
logger.info(f'Fitness: {space.best_agent.fit}')
logger.info(f'Position: {space.best_agent.position}')
return history
|
{"hexsha": "bc7f2660f740addba844d6a4cd90509e74a1d213", "size": 8994, "ext": "py", "lang": "Python", "max_stars_repo_path": "opytimizer/optimizers/abc.py", "max_stars_repo_name": "macoldibelli/opytimizer", "max_stars_repo_head_hexsha": "ca0574d520ecc17b1ac875bc6271d466c88d18ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opytimizer/optimizers/abc.py", "max_issues_repo_name": "macoldibelli/opytimizer", "max_issues_repo_head_hexsha": "ca0574d520ecc17b1ac875bc6271d466c88d18ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opytimizer/optimizers/abc.py", "max_forks_repo_name": "macoldibelli/opytimizer", "max_forks_repo_head_hexsha": "ca0574d520ecc17b1ac875bc6271d466c88d18ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6690140845, "max_line_length": 186, "alphanum_fraction": 0.6020680454, "include": true, "reason": "import numpy", "num_tokens": 1850}
|
# Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
from typing import List
from typing import Tuple
from numpy import *
from craid.club.regions.Region import Region
from craid.club.regions.SphericalRegion import SphericalRegion
class MultiSphericalRegion(Region):
def __init__(self, myName, num, theData: List[Tuple[str, float, float, float]], r: float, color):
super().__init__(myName, num, color)
self.points: List[SphericalRegion] = []
ct: int = 0
for point in theData:
theName = point[0]
px = point[1]
py = point[2]
pz = point[3]
theNumber = -1
reg = SphericalRegion(theName, theNumber, px, py, pz, r, color)
ct = ct + 1
self.points.append(reg)
def contains(self, x, y, z):
for reg in self.points:
if reg.contains(x, y, z):
return True
return False
def distanceFrom(self, x, y, z):
dist = 1000000000
for reg in self.points:
d2 = reg.distanceFrom(x, y, z)
if d2 < dist:
dist = d2
if dist == 0:
return 0
return dist
def getSurface(self):
ret = []
for reg in self.points:
ret.append(reg.getSurface())
return ret
def __str__(self):
ret = f"Multisphere: {self._name}"
for reg in self.points:
msg2 = str(reg)
ret = ret + "\n" + msg2
return ret
def getVolume(self):
vol = 0.0
for reg in self.points:
vol = vol + reg.getVolume()
return vol
|
{"hexsha": "bae93db3beaf6ae266b9fab594de507b56ed8712", "size": 1741, "ext": "py", "lang": "Python", "max_stars_repo_path": "craid/club/regions/MultiSphericalRegion.py", "max_stars_repo_name": "HausReport/ClubRaiders", "max_stars_repo_head_hexsha": "88bd64d2512302ca2b391b48979b6e88b092eb92", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "craid/club/regions/MultiSphericalRegion.py", "max_issues_repo_name": "HausReport/ClubRaiders", "max_issues_repo_head_hexsha": "88bd64d2512302ca2b391b48979b6e88b092eb92", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-05-28T13:30:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-02T14:12:04.000Z", "max_forks_repo_path": "craid/club/regions/MultiSphericalRegion.py", "max_forks_repo_name": "HausReport/ClubRaiders", "max_forks_repo_head_hexsha": "88bd64d2512302ca2b391b48979b6e88b092eb92", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6029411765, "max_line_length": 101, "alphanum_fraction": 0.5456634118, "include": true, "reason": "from numpy", "num_tokens": 446}
|
import os
import torch
from ncc import (tasks, LOGGER)
from ncc.utils import utils
from ncc.utils.checkpoint_utils import load_checkpoint_to_cpu
from ncc.utils.file_ops.yaml_io import (
recursive_contractuser,
recursive_expanduser,
)
def load_state(model_path):
state = load_checkpoint_to_cpu(model_path, arg_overrides={})
args = state["args"]
args = recursive_contractuser(args)
args = recursive_expanduser(args)
task = tasks.setup_task(args) # load src/tgt dicts
model = task.build_model(args)
model.load_state_dict(state["model"])
use_cuda = torch.cuda.is_available() and not args['common']['cpu']
if args['common']['fp16'] and use_cuda:
model.half()
if use_cuda:
torch.cuda.empty_cache()
torch.cuda.set_device(torch.cuda.device_count() - 1)
model.cuda()
model.eval()
del state
return args, task, model, use_cuda
def generation_task(args, task, model, use_cuda, input):
generator = task.build_generator(args)
# encode input (and feed into gpu)
input = task.encode_input(input)
if use_cuda:
input = utils.move_to_cuda(input)
# feed input into model
output = generator.generate(models=[model], sample=input)
# decode
# from ipdb import set_trace
# set_trace()
output = task.decode_output(output)
del task, model # to release memory in cpu/gpu
return output
def retrieval_task(args, task, model, use_cuda, input):
# load code_tokens dataset
code_dataset = task.load_search_dataset(split=args['dataset']['gen_subset'])
# construct similarities
import numpy as np
similarities = np.zeros(shape=len(code_dataset), dtype=np.float16)
# query embeddding
query_tokens = task.encode_query_input(input)
if use_cuda:
query_tokens = utils.move_to_cuda(query_tokens)
query_tokens = model.tgt_encoder(query_tokens)
# code embeddding
for idx, code_tokens in enumerate(code_dataset):
if use_cuda:
code_tokens = utils.move_to_cuda(code_tokens)
code_tokens = code_tokens.unsqueeze(dim=0)
code_tokens = model.src_encoder(code_tokens)
sim = (query_tokens * code_tokens).sum()
similarities[idx] = sim.item()
max_idx = np.argmax(similarities)
with open(args['eval']['code_file'], 'r') as reader:
for idx, line in enumerate(reader):
if idx == max_idx:
break
return line
def main(model_path, input):
args, task, model, use_cuda = load_state(model_path)
if args['common']['task'] in ['summarization', 'be_summarization', 'completion']:
return generation_task(args, task, model, use_cuda, input)
elif args['common']['task'] in ['retrieval']:
return retrieval_task(args, task, model, use_cuda, input)
else:
raise NotImplementedError(args['common']['task'])
def cli_main():
# summarization
# modal_path = '~/.ncc/demo/summarization/neural_transformer/python_wan.pt'
# modal_path = '~/.ncc/demo/summarization/seq2seq/python_wan.pt'
# code = "def positional(max_positional_args):\n\tdef positional_decorator(wrapped):\n\t\t@functools.wraps(wrapped)\n\t\tdef positional_wrapper(*args, **kwargs):\n\t\t\tif (len(args) > max_posi tional_args):\n\t\t\t\tplural_s = ''\n\t\t\t\tif (max_positional_args != 1):\n\t\t\t\t\tplural_s = 's'\n\t\t\t\tmessage = ('%s()\ttakes\tat\tmost\t%d\tpositional\targument%s\t(%d\tgive n)' % (wrapped.__name__, max_positional_args, plural_s, len(args)))\n\t\t\t\tif (positional_parameters_enforcement == POSITIONAL_EXCEPTION):\n\t\t\t\t\traise TypeError(message)\n\t\t\t \telif (positional_parameters_enforcement == POSITIONAL_WARNING):\n\t\t\t\t\tlogger.warning(message)\n\t\t\t\telse:\n\t\t\t\t\tpass\n\t\t\treturn wrapped(*args, **kwargs)\n\t\treturn p ositional_wrapper\n\tif isinstance(max_positional_args, six.integer_types):\n\t\treturn positional_decorator\n\telse:\n\t\t(args, _, _, defaults) = inspect.getargspec(max_positional_ar gs)\n\t\treturn positional((len(args) - len(defaults)))(max_positional_args)"
# ground truth: "a decorator to declare that only the first n arguments my be positional ."
# completion
# modal_path = '~/.ncc/demo/completion/seqrnn/py150.pt'
# code = "body_content = self._serialize.body(parameters, 'ServicePrincipalCreateParameters')\nrequest = self._client.post(url, query_parameters)\nresponse = self._client.send( request, header_parameters, body_content, operation_config)"
# ground truth: "(request, header_parameters, body_content, **operation_config)"
# # retrieval
# modal_path = "~/.ncc/code_search_net/retrieval/ruby/data-mmap/nbow/checkpoints/checkpoint_best.pt"
# code = "Create a missing file if the path is valid."
import argparse
parser = argparse.ArgumentParser(description="Command Interface")
parser.add_argument("--model", "-m", type=str, help="pytorch model path")
parser.add_argument("--input", "-i", type=str, help="model input")
args = parser.parse_args()
args.model = os.path.expanduser(args.model)
model_output = main(args.model, args.input)
LOGGER.info(model_output)
if __name__ == '__main__':
cli_main()
|
{"hexsha": "002c049b6ff26bde9dcf7ffef4b42d42de8e017e", "size": 5237, "ext": "py", "lang": "Python", "max_stars_repo_path": "cli/predictor.py", "max_stars_repo_name": "CGCL-codes/naturalcc", "max_stars_repo_head_hexsha": "7bab9a97331fafac1235fb32de829ff8d572320f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2020-12-04T02:18:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:19:50.000Z", "max_issues_repo_path": "cli/predictor.py", "max_issues_repo_name": "CGCL-codes/naturalcc", "max_issues_repo_head_hexsha": "7bab9a97331fafac1235fb32de829ff8d572320f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-10T17:48:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-13T10:42:22.000Z", "max_forks_repo_path": "cli/predictor.py", "max_forks_repo_name": "CGCL-codes/naturalcc", "max_forks_repo_head_hexsha": "7bab9a97331fafac1235fb32de829ff8d572320f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-12-09T12:17:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T09:02:13.000Z", "avg_line_length": 45.1465517241, "max_line_length": 1040, "alphanum_fraction": 0.7000190949, "include": true, "reason": "import numpy", "num_tokens": 1292}
|
import os.path
import cv2
import numpy as np
import collections
from qimage2ndarray import rgb_view, alpha_view, array2qimage, byte_view
from PyQt5.QtCore import Qt, QRectF, pyqtSignal, QT_VERSION_STR, QPoint
from PyQt5.QtGui import QImage, QPixmap, QPainterPath, QPainter, QColor, QPen
from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QFileDialog, QApplication
__author__ = "Aleksei Tepljakov <alex@starspirals.net>"
__title__ = "QTImageAnnotator"
__original_author__ = "Marcel Goldschen-Ohm <marcel.goldschen@gmail.com>"
__original_title__ = "QtImageViewer"
__version__ = '1.6.0'
# Undo states
MAX_CTRLZ_STATES = 20
# TODO: setting the below constant is a temporary solution geared towards fixing a bug
# The situation is as follows: converting from QPixmap to QImage, then also converting to RGBA32 format
# causes slight (and random) variations in the RGB values. When we need to extract different masks, we also
# need to know PRECISE RGB values to compare against. Unfortunately, if they fluctuate, then we can only
# approximately compare the color values, which means we can never have colors that are very close
# together in either R, B, or G values.
#
# To fix this, one could consider drawing to a QImage instead and on every draw, convert that to a QPixmap.
# However, this will likely kill real-time performance. Unless, the QImage is hidden and used only for painting.
# Then we can actually paint greyscale immediately to save time.
#
# For now, we stick to this solution.
PIXMAP_CONV_BUG_ATOL = 2
# Reusable component for painting over an image for, e.g., masking purposes
class QtImageAnnotator(QGraphicsView):
# Mouse button signals emit image scene (x, y) coordinates.
# !!! For image (row, column) matrix indexing, row = y and column = x.
leftMouseButtonPressed = pyqtSignal(float, float)
middleMouseButtonPressed = pyqtSignal(float, float)
rightMouseButtonPressed = pyqtSignal(float, float)
leftMouseButtonReleased = pyqtSignal(float, float)
middleMouseButtonReleased = pyqtSignal(float, float)
rightMouseButtonReleased = pyqtSignal(float, float)
leftMouseButtonDoubleClicked = pyqtSignal(float, float)
middleMouseButtonDoubleClicked = pyqtSignal(float, float)
rightMouseButtonDoubleClicked = pyqtSignal(float, float)
mouseWheelRotated = pyqtSignal(float)
def __init__(self):
QGraphicsView.__init__(self)
# Image is displayed as a QPixmap in a QGraphicsScene attached to this QGraphicsView.
self.scene = QGraphicsScene()
self.setScene(self.scene)
# Shape of the loaded image (height, width)
self.shape = (None, None)
# Store a local handle to the scene's current image pixmap.
self._pixmapHandle = None # This holds the image
self._helperHandle = None # This holds the "helper" overlay which is not directly manipulated by the user
self._auxHelper = None # Aux helper for various purpuses
self._overlayHandle = None # This is the overlay over which we are painting
self._cursorHandle = None # This is the cursor that appears to assist with brush size
self._deleteCrossHandles = None # For showing that we've activated delete mode
# Helper display state
self.showHelper = True
self._lastCursorCoords = None # Latest coordinates of the cursor, need in some cursor overlay update operations
self._overlay_stack = collections.deque(maxlen=MAX_CTRLZ_STATES)
# Offscreen mask, used to speed things up (but has an impact on painting speed)
self._offscreen_mask = None
self._offscreen_mask_stack = collections.deque(maxlen=MAX_CTRLZ_STATES)
# Needed for proper drawing
self.lastPoint = QPoint()
self.lastCursorLocation = QPoint()
# Direct mask painting
self.direct_mask_paint = False
# Pixmap that contains the mask and the corresponding painter
self.mask_pixmap = None
# Parameters of the brush and paint
self.brush_diameter = 50
self.MIN_BRUSH_DIAMETER = 1
self.MAX_BRUSH_DIAMETER = 500
self.brush_fill_color = QColor(255,0,0,99)
# Zoom in modifier: this should be between 4 and 20
self.zoom_in_modifier = 4
# Painting and erasing modes
#self.MODE_PAINT = QPainter.RasterOp_SourceOrDestination
self.MODE_PAINT = QPainter.CompositionMode_Source
self.MODE_ERASE = QPainter.CompositionMode_Clear
self.current_painting_mode = self.MODE_PAINT
self.global_erase_override = False
# Mask related. This will allow to automatically create overlays given grayscale masks
# and also save grayscale masks from RGB drawings. Both dicts must be provided for the
# related functions to work properly (cannot assume unique key-value combinations)
self.d_rgb2gray = None
self.d_gray2rgb = None
# Make mouse events accessible
self.setMouseTracking(True)
# Image aspect ratio mode.
# Qt.IgnoreAspectRatio: Scale image to fit viewport.
# Qt.KeepAspectRatio: Scale image to fit inside viewport, preserving aspect ratio.
# Qt.KeepAspectRatioByExpanding: Scale image to fill the viewport, preserving aspect ratio.
self.aspectRatioMode = Qt.KeepAspectRatio
# Scroll bar behaviour.
# Qt.ScrollBarAlwaysOff: Never shows a scroll bar.
# Qt.ScrollBarAlwaysOn: Always shows a scroll bar.
# Qt.ScrollBarAsNeeded: Shows a scroll bar only when zoomed.
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
# Stack of QRectF zoom boxes in scene coordinates.
self.zoomStack = []
# Flags for enabling/disabling mouse interaction.
self.canZoom = True
self.canPan = True
def hasImage(self):
""" Returns whether or not the scene contains an image pixmap.
"""
return self._pixmapHandle is not None
# def paintEvent(self, event):
# painter = QPainter(self)
#
# if self._pixmapHandle is not None:
# painter.drawPixmap(self.rect(), self._pixmapHandle)
# if self._overlayHandle is not None:
# print("This isn't implemented yet")
# if self._cursorHandle is not None:
# painter.drawEllipse(self.rect(), self._cursorHandle)
def clearImage(self):
""" Removes the current image pixmap from the scene if it exists.
"""
if self.hasImage():
self.scene.removeItem(self._pixmapHandle)
self._pixmapHandle = None
def pixmap(self):
""" Returns the scene's current image pixmap as a QPixmap, or else None if no image exists.
:rtype: QPixmap | None
"""
if self.hasImage():
return self._pixmapHandle.pixmap()
return None
def image(self):
""" Returns the scene's current image pixmap as a QImage, or else None if no image exists.
:rtype: QImage | None
"""
if self.hasImage():
return self._pixmapHandle.pixmap().toImage()
return None
# Configure the annotator with data.
# NB! Breaking change. Both IMAGE and MASK arguments from version 1.0b are
# **assumed** to be numpy arrays!
#
# Named arguments:
# helper = additional layer which helps with the annotation process, its display can be toggled
# process_gray2rgb = whether the mask is supplied as a grayscale image which should be converted
# to RGB on initialization (this process is rather fast). Conversion dictionaries must be set.
# direct_mask_paint = to speed up multicolor mask export, it may be beneficial to draw directly
# on a hidden mask. Then, exporting it is super fast compared to converting the RGB mask to
# a grayscale one.
def clearAndSetImageAndMask(self, image, mask, helper=None, aux_helper=None,
process_gray2rgb=False, direct_mask_paint=False):
# Clear the scene
self.scene.clear()
# Set direct mask painting mode
self.direct_mask_paint = direct_mask_paint
# Clear handles
self._pixmapHandle = None
self._helperHandle = None
self._auxHelper = None
self._overlayHandle = None
# Clear UNDO stack
self._overlay_stack = collections.deque(maxlen=MAX_CTRLZ_STATES)
# For compatibility, convert IMAGE to QImage, if needed
if type(image) is np.array:
image = array2qimage(image)
# First we just set the image
if type(image) is QPixmap:
pixmap = image
elif type(image) is QImage:
pixmap = QPixmap.fromImage(image)
else:
raise RuntimeError("QtImageAnnotator.clearAndSetImageAndMask: Argument must be a QImage or QPixmap.")
self.shape = pixmap.height(), pixmap.width()
self._pixmapHandle = self.scene.addPixmap(pixmap)
self.setSceneRect(QRectF(pixmap.rect()))
# Off-screen mask for direct drawing
if direct_mask_paint:
# We need to convert the offscreen mask to QImage at this point
gray_mask = QImage(mask.data, mask.shape[1], mask.shape[0], mask.strides[0], QImage.Format_Grayscale8)
self._offscreen_mask = gray_mask.copy()
self._offscreen_mask_stack = collections.deque(maxlen=MAX_CTRLZ_STATES)
# Now we add the helper, if present
if type(helper) is np.array:
helper = array2qimage(helper)
if helper is not None:
if type(helper) is QPixmap:
pixmap = helper
elif type(helper) is QImage:
pixmap = QPixmap.fromImage(helper)
else:
raise RuntimeError("QtImageAnnotator.clearAndSetImageAndMask: Argument must be a QImage or QPixmap.")
# Add the helper layer
self._helperHandle = self.scene.addPixmap(pixmap)
if type(aux_helper) is np.array:
aux_helper = array2qimage(aux_helper)
if aux_helper is not None:
if type(aux_helper) is QPixmap:
pixmap = aux_helper
elif type(aux_helper) is QImage:
pixmap = QPixmap.fromImage(aux_helper)
else:
raise RuntimeError("QtImageAnnotator.clearAndSetImageAndMask: Argument must be a QImage or QPixmap.")
# Add the aux helper layer
self._auxHelper = self.scene.addPixmap(pixmap)
# If we are supplied a grayscale mask that we need to convert to RGB, we will do it here
if process_gray2rgb:
if self.d_gray2rgb:
# We assume mask is np array, grayscale and the conversion rules are set (otherwise cannot continue)
h, w = mask.shape
new_mask = np.zeros((h, w, 4), np.uint8)
for gr, rgb in self.d_gray2rgb.items():
col = QColor("#63" + rgb.split("#")[1]).getRgb() # TODO: not elegant, need external function
new_mask[mask == gr] = col
use_mask = array2qimage(new_mask)
else:
raise RuntimeError("Cannot convert the provided grayscale mask to RGB without color specifications.")
else:
use_mask = array2qimage(mask)
pixmap = QPixmap.fromImage(use_mask)
self.mask_pixmap = pixmap
self._overlayHandle = self.scene.addPixmap(self.mask_pixmap)
# Add brush cursor to top layer
self._cursorHandle = self.scene.addEllipse(0, 0, self.brush_diameter, self.brush_diameter)
# Add also X to the cursor for "delete" operation, and hide it by default only showing it when the
# either the global drawing mode is set to ERASE or when CTRL is held while drawing
self._deleteCrossHandles = (self.scene.addLine(0, 0, self.brush_diameter, self.brush_diameter),
self.scene.addLine(0, self.brush_diameter, self.brush_diameter, 0))
if self.current_painting_mode is not self.MODE_ERASE:
self._deleteCrossHandles[0].hide()
self._deleteCrossHandles[1].hide()
self.updateViewer()
# Clear everything
def clearAll(self):
self.shape = (None, None)
if self._pixmapHandle is not None:
self.scene.removeItem(self._pixmapHandle)
if self._helperHandle is not None:
self.scene.removeItem(self._helperHandle)
if self._auxHelper is not None:
self.scene.removeItem(self._auxHelper)
if self._overlayHandle is not None:
self.scene.removeItem(self._overlayHandle)
self._pixmapHandle = None
self._helperHandle = None
self._auxHelper = None
self._overlayHandle = None
if self.direct_mask_paint:
self._offscreen_mask = None
self._offscreen_mask_stack = collections.deque(maxlen=MAX_CTRLZ_STATES)
self._overlay_stack = collections.deque(maxlen=MAX_CTRLZ_STATES)
self.updateViewer()
# Set image only
def setImage(self, image):
""" Set the scene's current image pixmap to the input QImage or QPixmap.
Raises a RuntimeError if the input image has type other than QImage or QPixmap.
:type image: QImage | QPixmap | numpy.array
"""
if type(image) is np.array:
image = array2qimage(image)
if type(image) is QPixmap:
pixmap = image
elif type(image) is QImage:
pixmap = QPixmap.fromImage(image)
else:
raise RuntimeError("ImageViewer.setImage: Argument must be a QImage or QPixmap.")
if self.hasImage():
self._pixmapHandle.setPixmap(pixmap)
else:
self._pixmapHandle = self.scene.addPixmap(pixmap)
self.setSceneRect(QRectF(pixmap.rect())) # Set scene size to image size.
# Add the mask layer
self.mask_pixmap = QPixmap(pixmap.rect().width(), pixmap.rect().height())
self.mask_pixmap.fill(QColor(0,0,0,0))
self._overlayHandle = self.scene.addPixmap(self.mask_pixmap)
# Add brush cursor to top layer
self._cursorHandle = self.scene.addEllipse(0,0,self.brush_diameter,self.brush_diameter)
# Add also X to the cursor for "delete" operation, and hide it by default only showing it when the
# either the global drawing mode is set to ERASE or when CTRL is held while drawing
self._deleteCrossHandles = (self.scene.addLine(0, 0, self.brush_diameter, self.brush_diameter),
self.scene.addLine(0, self.brush_diameter, self.brush_diameter, 0))
if self.current_painting_mode is not self.MODE_ERASE:
self._deleteCrossHandles[0].hide()
self._deleteCrossHandles[1].hide()
self.updateViewer()
def loadImageFromFile(self, fileName=""):
""" Load an image from file.
Without any arguments, loadImageFromFile() will popup a file dialog to choose the image file.
With a fileName argument, loadImageFromFile(fileName) will attempt to load the specified image file directly.
"""
if len(fileName) == 0:
if QT_VERSION_STR[0] == '4':
fileName = QFileDialog.getOpenFileName(self, "Open image file.")
elif QT_VERSION_STR[0] == '5':
fileName, dummy = QFileDialog.getOpenFileName(self, "Open image file.")
if len(fileName) and os.path.isfile(fileName):
image = QImage(fileName)
self.setImage(image)
def updateViewer(self):
""" Show current zoom (if showing entire image, apply current aspect ratio mode).
"""
if not self.hasImage():
return
if len(self.zoomStack) and self.sceneRect().contains(self.zoomStack[-1]):
self.fitInView(self.zoomStack[-1], self.aspectRatioMode) # Show zoomed rect
else:
self.zoomStack = [] # Clear the zoom stack (in case we got here because of an invalid zoom).
self.fitInView(self.sceneRect(), self.aspectRatioMode) # Show entire image (use current aspect ratio mode).
def resizeEvent(self, event):
""" Maintain current zoom on resize.
"""
self.updateViewer()
def update_brush_diameter(self, change):
val = self.brush_diameter
val += change
if val > self.MAX_BRUSH_DIAMETER:
val = self.MAX_BRUSH_DIAMETER
if val < self.MIN_BRUSH_DIAMETER:
val = self.MIN_BRUSH_DIAMETER
self.brush_diameter = val
if self._lastCursorCoords is not None:
x, y = self._lastCursorCoords
else:
x, y = 0, 0
if self._cursorHandle is not None:
self._cursorHandle.setPos(x - self.brush_diameter / 2, y - self.brush_diameter / 2)
self._cursorHandle.setRect(0, 0, self.brush_diameter, self.brush_diameter)
if self._deleteCrossHandles is not None:
self._deleteCrossHandles[0].setLine(x - self.brush_diameter / (2 * np.sqrt(2)),
y - self.brush_diameter / (2 * np.sqrt(2)),
x + self.brush_diameter / (2 * np.sqrt(2)),
y + self.brush_diameter / (2 * np.sqrt(2)))
self._deleteCrossHandles[1].setLine(x - self.brush_diameter / (2 * np.sqrt(2)),
y + self.brush_diameter / (2 * np.sqrt(2)),
x + self.brush_diameter / (2 * np.sqrt(2)),
y - self.brush_diameter / (2 * np.sqrt(2)))
def update_cursor_location(self, event):
scenePos = self.mapToScene(event.pos())
x, y = scenePos.x(), scenePos.y()
# Store the coordinates for other operations to use
self._lastCursorCoords = (x,y)
if self._cursorHandle is not None:
self._cursorHandle.setPos(x - self.brush_diameter/2, y - self.brush_diameter/2)
if self._deleteCrossHandles is not None:
self._deleteCrossHandles[0].setLine(x - self.brush_diameter / (2 * np.sqrt(2)),
y - self.brush_diameter / (2 * np.sqrt(2)),
x + self.brush_diameter / (2 * np.sqrt(2)),
y + self.brush_diameter / (2 * np.sqrt(2)))
self._deleteCrossHandles[1].setLine(x - self.brush_diameter / (2 * np.sqrt(2)),
y + self.brush_diameter / (2 * np.sqrt(2)),
x + self.brush_diameter / (2 * np.sqrt(2)),
y - self.brush_diameter / (2 * np.sqrt(2)))
def redraw_cursor(self):
if self._cursorHandle is not None:
self._cursorHandle.update()
if self._deleteCrossHandles is not None:
self._deleteCrossHandles[0].update()
self._deleteCrossHandles[1].update()
# Draws a single ellipse
def fillMarker(self, event):
scenePos = self.mapToScene(event.pos())
painter = QPainter(self.mask_pixmap)
painter.setCompositionMode(self.current_painting_mode)
painter.setPen(self.brush_fill_color)
painter.setBrush(self.brush_fill_color)
# Get the coordinates of where to draw
a0 = scenePos.x() - self.brush_diameter/2
b0 = scenePos.y() - self.brush_diameter/2
r0 = self.brush_diameter
# Finally, draw
painter.drawEllipse(a0, b0, r0, r0)
# TODO: With really large images, update is rather slow. Must somehow fix this.
# It seems that the way to approach hardcore optimization is to switch to OpenGL
# for all rendering purposes. This update will likely come much later in the tool's
# lifecycle.
self._overlayHandle.setPixmap(self.mask_pixmap)
# In case of direct mask paint mode, we need to paint on the mask as well
if self.direct_mask_paint:
if not self.d_rgb2gray:
raise RuntimeError("Cannot use direct mask painting since there is no color conversion rules set.")
painter = QPainter(self._offscreen_mask)
painter.setCompositionMode(self.current_painting_mode)
tc = self.d_rgb2gray[self.brush_fill_color.name()]
painter.setPen(QColor(tc,tc,tc))
painter.setBrush(QColor(tc,tc,tc))
painter.drawEllipse(a0, b0, r0, r0)
self.lastPoint = scenePos
# Draws a line
def drawMarkerLine(self, event):
scenePos = self.mapToScene(event.pos())
painter = QPainter(self.mask_pixmap)
painter.setCompositionMode(self.current_painting_mode)
painter.setPen(QPen(self.brush_fill_color,
self.brush_diameter, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawLine(self.lastPoint, scenePos)
self._overlayHandle.setPixmap(self.mask_pixmap)
# In case of direct mask paint mode, we need to paint on the mask as well
if self.direct_mask_paint:
if not self.d_rgb2gray:
raise RuntimeError("Cannot use direct mask painting since there is no color conversion rules set.")
painter = QPainter(self._offscreen_mask)
painter.setCompositionMode(self.current_painting_mode)
tc = self.d_rgb2gray[self.brush_fill_color.name()]
painter.setPen(QPen(QColor(tc, tc, tc),
self.brush_diameter, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawLine(self.lastPoint, scenePos)
self.lastPoint = scenePos
# Fills an area using the last stored cursor location
# If optional argument remove_closed_contour is set to True, then
# the closed contour over which the cursor is hovering will be erased
def fillArea(self, remove_closed_contour=False, remove_only_current_color=True):
# Store previous state so we can go back to it
self._overlay_stack.append(self.mask_pixmap.copy())
if self.direct_mask_paint:
self._offscreen_mask_stack.append(self._offscreen_mask.copy())
# We first convert the mask to a QImage and then to ndarray
orig_mask = self.mask_pixmap.toImage().convertToFormat(QImage.Format_ARGB32)
msk = alpha_view(orig_mask).copy()
# Apply simple tresholding and invert the image
msk[np.where((msk>0))] = 255
msk = 255-msk
msk1 = np.copy(msk)
if remove_closed_contour:
msk1 = 255-msk1
if remove_closed_contour:
if remove_only_current_color:
the_mask = np.ones(msk1.shape[:2], np.uint8) * 255 # Initial mask
fullmask = self.export_ndarray_noalpha() # Get the colored version
reds, greens, blues = fullmask[:, :, 0], fullmask[:, :, 1], fullmask[:, :, 2]
cur_col = list(self.brush_fill_color.getRgb())[:-1] # Only current color is considered
# So that fill happens only for this specific color
the_mask[np.isclose(reds, cur_col[0], atol=PIXMAP_CONV_BUG_ATOL) &
np.isclose(greens, cur_col[1], atol=PIXMAP_CONV_BUG_ATOL) &
np.isclose(blues, cur_col[2], atol=PIXMAP_CONV_BUG_ATOL)] = 0
else:
the_mask = np.zeros(msk1.shape[:2], np.uint8)
else:
the_mask = cv2.bitwise_not(np.copy(msk))
the_mask = cv2.copyMakeBorder(the_mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)
# Fill the contour
seed_point = (int(self.lastCursorLocation.x()), int(self.lastCursorLocation.y()))
cv2.floodFill(msk1, the_mask, seed_point, 0, 0, 1)
# We paint in only the newly arrived pixels (or remove the pixels in the contour)
if remove_closed_contour:
paintin = msk1
else:
paintin = msk - msk1 # This is fill case
# Take original pixmap image: it has two components, RGB and ALPHA
new_img = np.dstack((rgb_view(orig_mask), alpha_view(orig_mask)))
# Fill the newly created area with current brush color
if not remove_closed_contour:
new_img[np.where((paintin==255))] = list(self.brush_fill_color.getRgb())
else:
new_img[np.where((paintin==0))] = (0,0,0,0) # Erase
new_qimg = array2qimage(new_img)
# In case of direct drawing, need to update the offscreen mask as well
if self.direct_mask_paint:
omask = byte_view(self._offscreen_mask).copy()
omask = omask.reshape(omask.shape[:-1])
if not remove_closed_contour:
tc = self.d_rgb2gray[self.brush_fill_color.name()]
omask[np.where((paintin==255))] = tc
else:
omask[np.where((paintin==0))] = 0
self._offscreen_mask = QImage(omask.data, omask.shape[1], omask.shape[0], omask.strides[0],
QImage.Format_Grayscale8)
# Finally update the screen stuff
self.mask_pixmap = QPixmap.fromImage(new_qimg)
self._overlayHandle.setPixmap(self.mask_pixmap)
# Repaint connected contour (disregarding color information) to the current paint color
def repaintArea(self):
self._overlay_stack.append(self.mask_pixmap.copy())
if self.direct_mask_paint:
self._offscreen_mask_stack.append(self._offscreen_mask.copy())
orig_mask = self.mask_pixmap.toImage().convertToFormat(QImage.Format_ARGB32)
msk = alpha_view(orig_mask).copy()
msk[np.where((msk>0))] = 255
msk = 255-msk
msk1 = 255-np.copy(msk)
the_mask = cv2.copyMakeBorder(np.zeros(msk1.shape[:2], np.uint8), 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)
seed_point = (int(self.lastCursorLocation.x()), int(self.lastCursorLocation.y()))
cv2.floodFill(msk1, the_mask, seed_point, 0, 0, 1)
paintin = np.bitwise_xor(msk, msk1)
new_img = np.dstack((rgb_view(orig_mask), alpha_view(orig_mask)))
new_img[np.where((paintin == 0))] = list(self.brush_fill_color.getRgb())
new_qimg = array2qimage(new_img)
if self.direct_mask_paint:
omask = byte_view(self._offscreen_mask).copy()
omask = omask.reshape(omask.shape[:-1])
tc = self.d_rgb2gray[self.brush_fill_color.name()]
omask[np.where((paintin == 0))] = tc
self._offscreen_mask = QImage(omask.data, omask.shape[1], omask.shape[0], omask.strides[0],
QImage.Format_Grayscale8)
self.mask_pixmap = QPixmap.fromImage(new_qimg)
self._overlayHandle.setPixmap(self.mask_pixmap)
'''
***********************
IMPORTERS AND EXPORTERS
***********************
'''
# Export the grayscale mask
# This should always be used with direct mode, which supports up to 255 colors for the mask
def export_rgb2gray_mask(self):
if self._overlayHandle is not None:
if self.d_rgb2gray:
if self.direct_mask_paint:
# Easy mode
mask = byte_view(self._offscreen_mask).copy()
mask = mask.reshape(mask.shape[:-1])
else:
# The hard way
# Split the image to rgb components
rgb_m = self.export_ndarray_noalpha()
reds, greens, blues = rgb_m[:, :, 0], rgb_m[:, :, 1], rgb_m[:, :, 2]
h, w, _ = rgb_m.shape
mask = np.zeros((h, w), np.uint8)
# Go through all the colors and paint the grayscale mask according to the conversion spec
for rgb, gr in self.d_rgb2gray.items():
cc = list(QColor(rgb).getRgb())
mask[np.isclose(reds, cc[0], atol=PIXMAP_CONV_BUG_ATOL) &
np.isclose(greens, cc[1], atol=PIXMAP_CONV_BUG_ATOL) &
np.isclose(blues, cc[2], atol=PIXMAP_CONV_BUG_ATOL)] = gr
else:
raise RuntimeError("Cannot convert the RGB mask to grayscale without color specifications.")
else:
raise RuntimeError("There is no RGB mask to export to grayscale.")
return mask
# Export current mask WITHOUT alpha channel (mask types are determined by colors, not by alpha anyway)
def export_ndarray_noalpha(self):
mask = self.mask_pixmap.toImage().convertToFormat(QImage.Format_ARGB32)
return rgb_view(mask).copy()
def export_ndarray(self):
mask = self.mask_pixmap.toImage().convertToFormat(QImage.Format_ARGB32)
return np.dstack((rgb_view(mask).copy(), alpha_view(mask).copy()))
'''
**************
EVENT HANDLERS
**************
'''
def wheelEvent(self, event):
if self.hasImage():
self.redraw_cursor()
# Depending on whether control is pressed, set brush diameter accordingly
if QApplication.keyboardModifiers() & Qt.ControlModifier:
change = 1 if event.angleDelta().y() > 0 else -1
self.update_brush_diameter(change)
self.redraw_cursor()
self.mouseWheelRotated.emit(change)
else:
QGraphicsView.wheelEvent(self, event)
def mouseMoveEvent(self, event):
if self.hasImage():
# Make sure that the element has focus when the mouse moves,
# otherwise keyboard shortcuts will not work
if not self.hasFocus():
self.setFocus()
self.update_cursor_location(event)
# Support for panning
if event.buttons() == Qt.MiddleButton:
offset = self.__prevMousePos - event.pos()
self.__prevMousePos = event.pos()
self.verticalScrollBar().setValue(self.verticalScrollBar().value() + offset.y())
self.horizontalScrollBar().setValue(self.horizontalScrollBar().value() + offset.x())
# Filling in the markers
if event.buttons() == Qt.LeftButton:
self.drawMarkerLine(event)
# Store cursor location separately; needed for certain operations (like fill)
self.lastCursorLocation = self.mapToScene(event.pos())
QGraphicsView.mouseMoveEvent(self, event)
# Keypress event handler
def keyPressEvent(self, event):
if self.hasImage():
# Zoom in
if event.key() == Qt.Key_Plus:
viewBBox = self.zoomStack[-1] if len(self.zoomStack) else self.sceneRect()
wh12 = int(max(viewBBox.width(), viewBBox.height()) / self.zoom_in_modifier)
x, y = self._lastCursorCoords
selectionBBox = QRectF(x-wh12, y-wh12, 2*wh12, 2*wh12).intersected(viewBBox)
if selectionBBox.isValid() and (selectionBBox != viewBBox):
self.zoomStack.append(selectionBBox)
self.updateViewer()
# Zoom out
if event.key() == Qt.Key_Minus:
if self.canZoom:
viewBBox = self.zoomStack[-1] if len(self.zoomStack) else False
if viewBBox:
self.zoomStack = self.zoomStack[:-1]
self.updateViewer()
# Fill mask region
if event.key() == Qt.Key_F:
try:
self.viewport().setCursor(Qt.BusyCursor)
self.fillArea()
except Exception as e:
print("Cannot fill region. Additional information:")
print(e)
self.viewport().setCursor(Qt.ArrowCursor)
# Erase closed contour under cursor with current paint color
if event.key() == Qt.Key_X:
if QApplication.keyboardModifiers() & Qt.ControlModifier:
try:
self.viewport().setCursor(Qt.BusyCursor)
self.fillArea(remove_closed_contour=True)
except Exception as e:
print("Cannot remove the contour. Additional information:")
print(e)
self.viewport().setCursor(Qt.ArrowCursor)
# Erase closed contour under cursor and any connected contour regardless of color
if event.key() == Qt.Key_Q:
if QApplication.keyboardModifiers() & Qt.ControlModifier:
try:
self.viewport().setCursor(Qt.BusyCursor)
self.fillArea(remove_closed_contour=True, remove_only_current_color=False)
except Exception as e:
print("Cannot remove the contour. Additional information:")
print(e)
self.viewport().setCursor(Qt.ArrowCursor)
# Erase mode enable/disable
if event.key() == Qt.Key_D:
self.global_erase_override = not self.global_erase_override
if self.global_erase_override:
self.current_painting_mode = self.MODE_ERASE
self._deleteCrossHandles[0].show()
self._deleteCrossHandles[1].show()
else:
self.current_painting_mode = self.MODE_PAINT
self._deleteCrossHandles[0].hide()
self._deleteCrossHandles[1].hide()
# Temporarily hide the overlay
if event.key() == Qt.Key_H:
self._overlayHandle.hide()
# Toggle helper on and off
if event.key() == Qt.Key_T:
if self._auxHelper is not None:
if self.showHelper:
self._auxHelper.hide()
self.showHelper = False
else:
self._auxHelper.show()
self.showHelper = True
# Undo operations
if event.key() == Qt.Key_Z:
if QApplication.keyboardModifiers() & Qt.ControlModifier:
if (len(self._overlay_stack) > 0):
self.mask_pixmap = self._overlay_stack.pop()
self._overlayHandle.setPixmap(self.mask_pixmap)
if self.direct_mask_paint:
if len(self._offscreen_mask_stack) > 0:
self._offscreen_mask = self._offscreen_mask_stack.pop()
# When CONTROL is pressed, show the delete cross
if event.key() == Qt.Key_Control and not self.global_erase_override:
self._deleteCrossHandles[0].show()
self._deleteCrossHandles[1].show()
QGraphicsView.keyPressEvent(self, event)
def keyReleaseEvent(self, event):
if self.hasImage():
if event.key() == Qt.Key_Control and not self.global_erase_override:
self._deleteCrossHandles[0].hide()
self._deleteCrossHandles[1].hide()
# Show the overlay again
if event.key() == Qt.Key_H:
self._overlayHandle.show()
QGraphicsView.keyPressEvent(self, event)
def mousePressEvent(self, event):
if self.hasImage():
""" Start drawing, panning with mouse, or zooming in
"""
scenePos = self.mapToScene(event.pos())
if event.button() == Qt.LeftButton:
self._overlay_stack.append(self.mask_pixmap.copy())
if self.direct_mask_paint:
self._offscreen_mask_stack.append(self._offscreen_mask.copy())
# If ALT is held, replace color
repaint_was_active = False
if QApplication.keyboardModifiers() & Qt.AltModifier:
try:
repaint_was_active = True
self.viewport().setCursor(Qt.BusyCursor)
self.repaintArea()
except Exception as e:
print("Cannot repaint region. Additional information:")
print(e)
self.viewport().setCursor(Qt.ArrowCursor)
# If SHIFT is held, draw a line
if QApplication.keyboardModifiers() & Qt.ShiftModifier:
self.drawMarkerLine(event)
# If CONTROL is held, erase, but only if global erase override is not enabled
if not self.global_erase_override:
if QApplication.keyboardModifiers() & Qt.ControlModifier:
self.current_painting_mode = self.MODE_ERASE
else:
self.current_painting_mode = self.MODE_PAINT
# If the user just clicks, add a marker (unless repainting was done)
if not repaint_was_active:
self.fillMarker(event)
self.leftMouseButtonPressed.emit(scenePos.x(), scenePos.y())
elif event.button() == Qt.MiddleButton:
if self.canPan:
self.__prevMousePos = event.pos()
self.viewport().setCursor(Qt.ClosedHandCursor)
self._cursorHandle.hide()
self.middleMouseButtonPressed.emit(scenePos.x(), scenePos.y())
elif event.button() == Qt.RightButton:
if self.canZoom:
self.setDragMode(QGraphicsView.RubberBandDrag)
self._cursorHandle.hide()
self.rightMouseButtonPressed.emit(scenePos.x(), scenePos.y())
QGraphicsView.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
""" Stop mouse pan or zoom mode (apply zoom if valid).
"""
if self.hasImage():
QGraphicsView.mouseReleaseEvent(self, event)
scenePos = self.mapToScene(event.pos())
if event.button() == Qt.MiddleButton:
self.viewport().setCursor(Qt.ArrowCursor)
self._cursorHandle.show()
self.middleMouseButtonReleased.emit(scenePos.x(), scenePos.y())
elif event.button() == Qt.RightButton:
if self.canZoom:
viewBBox = self.zoomStack[-1] if len(self.zoomStack) else self.sceneRect()
selectionBBox = self.scene.selectionArea().boundingRect().intersected(viewBBox)
self.scene.setSelectionArea(QPainterPath()) # Clear current selection area.
if selectionBBox.isValid() and (selectionBBox != viewBBox):
self.zoomStack.append(selectionBBox)
self.updateViewer()
self.setDragMode(QGraphicsView.NoDrag)
self._cursorHandle.show()
self.rightMouseButtonReleased.emit(scenePos.x(), scenePos.y())
QGraphicsView.mouseReleaseEvent(self, event)
def mouseDoubleClickEvent(self, event):
""" Show entire image.
"""
if self.hasImage():
scenePos = self.mapToScene(event.pos())
if event.button() == Qt.MiddleButton:
self.middleMouseButtonDoubleClicked.emit(scenePos.x(), scenePos.y())
elif event.button() == Qt.RightButton:
if self.canZoom:
self.zoomStack = [] # Clear zoom stack.
self.updateViewer()
self.rightMouseButtonDoubleClicked.emit(scenePos.x(), scenePos.y())
QGraphicsView.mouseDoubleClickEvent(self, event)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
def handleMiddleClick(x, y):
row = int(y)
column = int(x)
# Create the application.
app = QApplication(sys.argv)
# Create image viewer and load an image file to display.
viewer = QtImageAnnotator()
viewer.loadImageFromFile() # Pops up file dialog.
# Show viewer and run application.
viewer.show()
sys.exit(app.exec_())
|
{"hexsha": "2d9922bf62b4ca7656d74d2dbb1ebbeff2333fa7", "size": 40552, "ext": "py", "lang": "Python", "max_stars_repo_path": "ui_lib/QtImageAnnotator.py", "max_stars_repo_name": "extall/datm-annotation-tool", "max_stars_repo_head_hexsha": "7fe5a9648a94744e93f18af80fde020f8f0768eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-12T10:18:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-22T16:08:47.000Z", "max_issues_repo_path": "ui_lib/QtImageAnnotator.py", "max_issues_repo_name": "extall/datm-annotation-tool", "max_issues_repo_head_hexsha": "7fe5a9648a94744e93f18af80fde020f8f0768eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-02T08:42:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-27T13:48:21.000Z", "max_forks_repo_path": "ui_lib/QtImageAnnotator.py", "max_forks_repo_name": "extall/datm-annotation-tool", "max_forks_repo_head_hexsha": "7fe5a9648a94744e93f18af80fde020f8f0768eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-16T10:12:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T10:12:44.000Z", "avg_line_length": 43.5107296137, "max_line_length": 120, "alphanum_fraction": 0.608601302, "include": true, "reason": "import numpy", "num_tokens": 8763}
|
#!/usr/bin/env python3
import keras
from keras.applications.nasnet import preprocess_input, NASNetLarge
#from keras_applications.resnet import ResNet50, ResNet101, ResNet152
from keras.applications.resnet50 import ResNet50
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar100
import numpy as np
import os
# Training parameters
batch_size = 32
epochs = 200
num_classes = 10
class KerasModel():
'''Create, train and predict with a Keras neural network.
'''
def __init__(self,
model_spec='ResNet20',
include_top=True,
weights='imagenet',
input_tensor=None,
pooling='max',
classes=1000):
self.model_spec = model
self.include_top=include_top
self.weights = weights
self.input_tensor = input_tensor
self.pooling = pooling
self.classes = classes
if not self.include_top:
self.input_shape = (224, 224, 3)
else:
self.input_shape = None
self.model_options = {
'ResNet50': ResNet50,
'ResNet101': ResNet101,
'ResNet152': ResNet152,
'NASNetLarge': NASNetLarge,
}
self.model = self.model_options[self.model_spec]
def build_model(self):
self.model = self.model(include_top=self.include_top,
weights=self.weights,
input_tensor=self.input_tensor,
pooling=self.pooling)
def train_model(self):
pass
def predict(self):
pass
resnet50 = ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000)
nasnet_large = NASNetLarge(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000)
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
print(f'x_train shape: {x_train.shape}')
print(f'y_train shape: {y_train.shape}')
print(f'x_test shape: {x_test.shape}')
print(f'y_test shape: {y_test.shape}')
|
{"hexsha": "6209efcc19571305cfa0372ab5b81a11732ccbe5", "size": 3129, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/keras_applications.py", "max_stars_repo_name": "chriswmann/keras-applications-gpu-stress-test", "max_stars_repo_head_hexsha": "1e79b2e69edf557f788832540678fb06fcb77512", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/keras_applications.py", "max_issues_repo_name": "chriswmann/keras-applications-gpu-stress-test", "max_issues_repo_head_hexsha": "1e79b2e69edf557f788832540678fb06fcb77512", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/keras_applications.py", "max_forks_repo_name": "chriswmann/keras-applications-gpu-stress-test", "max_forks_repo_head_hexsha": "1e79b2e69edf557f788832540678fb06fcb77512", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9368421053, "max_line_length": 412, "alphanum_fraction": 0.527005433, "include": true, "reason": "import numpy", "num_tokens": 564}
|
import numpy as np
from numpy import convolve
import matplotlib.pyplot as plt
def movingaverage(values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'valid')
return sma
def moving_fun(dataframe, col, blanking, duration, newname='movmin', fun=min):
"""blanking: # timepoints between 'now' and evaluation. duration: # timepoints to evaluate"""
dataframe.loc[:, newname] = np.nan
colidx = list(dataframe.columns).index(col)
newnameidx = list(dataframe.columns).index(newname)
datalen = len(dataframe)
dataframe.iloc[blanking+duration:, newnameidx] = np.fromiter((fun(dataframe.iloc[idx:idx+duration, colidx])
for idx in range(datalen-blanking-duration)),
dtype=np.float64)
def percent_change(dataframe, col, newname='percent_change'):
dataframe[newname] = 0
newidx = list(dataframe.columns).index(newname)
colidx = list(dataframe.columns).index(col)
idcs_all = list(dataframe.index)
dataframe.iloc[idcs_all[1:], newidx] = ((dataframe.iloc[idcs_all[1:], colidx].values -
dataframe.iloc[idcs_all[:-1], colidx].values) /
dataframe.iloc[idcs_all[1:], colidx].values)
def diff(dataframe, col, newname='percent_change'):
dataframe[newname] = 0
newidx = list(dataframe.columns).index(newname)
colidx = list(dataframe.columns).index(col)
idcs_all = list(dataframe.index)
dataframe.iloc[idcs_all[1:], newidx] = (dataframe.iloc[idcs_all[1:], colidx].values -
dataframe.iloc[idcs_all[:-1], colidx].values)
|
{"hexsha": "cead7da917b2f838d5283c887709044cb04d3894", "size": 1755, "ext": "py", "lang": "Python", "max_stars_repo_path": "quickndirtybot/strategies/util.py", "max_stars_repo_name": "Heerpa/quickndirtybot", "max_stars_repo_head_hexsha": "d0ca6f2d0e33f2b7642c81efec7d4d406aa85a14", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "quickndirtybot/strategies/util.py", "max_issues_repo_name": "Heerpa/quickndirtybot", "max_issues_repo_head_hexsha": "d0ca6f2d0e33f2b7642c81efec7d4d406aa85a14", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "quickndirtybot/strategies/util.py", "max_forks_repo_name": "Heerpa/quickndirtybot", "max_forks_repo_head_hexsha": "d0ca6f2d0e33f2b7642c81efec7d4d406aa85a14", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0, "max_line_length": 111, "alphanum_fraction": 0.6216524217, "include": true, "reason": "import numpy,from numpy", "num_tokens": 410}
|
library(knitr)
library(rvest)
library(gsubfn)
library(reshape2)
library(shiny)
library(tidyr)
library(dygraphs)
library(xts)
library(tidyverse)
library(lubridate)
library(tmap)
library("readxl")
library("openxlsx")
source("https://raw.githubusercontent.com/jaanos/APPR-2019-20/master/lib/uvozi.zemljevid.r")
# Uvozimo funkcije za pobiranje in uvoz zemljevida.
source("lib/uvozi.zemljevid.r", encoding="UTF-8")
|
{"hexsha": "a7038937238d9d1cb00a8c6f10a8862c9df81793", "size": 411, "ext": "r", "lang": "R", "max_stars_repo_path": "lib/libraries.r", "max_stars_repo_name": "OkornA18/APPR-2019-20", "max_stars_repo_head_hexsha": "bb5cd8126a581978c434a7bb941e1bf3fb8c1e56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/libraries.r", "max_issues_repo_name": "OkornA18/APPR-2019-20", "max_issues_repo_head_hexsha": "bb5cd8126a581978c434a7bb941e1bf3fb8c1e56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-18T14:47:06.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-16T18:12:14.000Z", "max_forks_repo_path": "lib/libraries.r", "max_forks_repo_name": "OkornA18/APPR-2019-20", "max_forks_repo_head_hexsha": "bb5cd8126a581978c434a7bb941e1bf3fb8c1e56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8333333333, "max_line_length": 92, "alphanum_fraction": 0.7883211679, "num_tokens": 129}
|
[STATEMENT]
lemma infinite_cball:
fixes a :: "'a::euclidean_space"
assumes "r > 0"
shows "infinite (cball a r)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. infinite (cball a r)
[PROOF STEP]
using uncountable_cball[OF assms, THEN uncountable_infinite,of a]
[PROOF STATE]
proof (prove)
using this:
infinite (cball a r)
goal (1 subgoal):
1. infinite (cball a r)
[PROOF STEP]
.
|
{"llama_tokens": 164, "file": "Count_Complex_Roots_Count_Line", "length": 2}
|
#include <boost/test/unit_test.hpp>
#include "unbounded_ordered/node/unbounded_ordered_node.hpp"
struct NodePropertiesTest {
typedef unbounded_ordered::node<int> nodeint;
NodePropertiesTest() {}
~NodePropertiesTest() {}
};
BOOST_FIXTURE_TEST_SUITE( node_properties_suite, NodePropertiesTest )
BOOST_AUTO_TEST_CASE( count_children ) {
nodeint* node[12];
node[11] = new nodeint(11);
node[10] = new nodeint(10, {node[11]});
node[9] = new nodeint(9);
node[8] = new nodeint(8, {node[9]});
node[7] = new nodeint(7);
node[6] = new nodeint(6);
node[5] = new nodeint(5);
node[4] = new nodeint(4);
node[3] = new nodeint(3);
node[2] = new nodeint(2, {node[7], node[8]});
node[1] = new nodeint(1, {node[4], node[3], node[10], node[5]});
node[0] = new nodeint(0, {node[6], node[1], node[2]});
std::size_t result[12] = {3, 4, 2, 0, 0, 0, 0, 0, 1, 0, 1, 0};
for(int i = 0; i < 12; ++i) {
BOOST_CHECK(result[i] == node[i]->count_children());
}
node[0]->delete_subtree();
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "1e786b0c551ad9cb339dc2c3bf48c2ef5464fb7e", "size": 1038, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tst/node/node_properties_test.cpp", "max_stars_repo_name": "lejeuneretif/unbounded-ordered-tree", "max_stars_repo_head_hexsha": "0e4a431ee0b1228295c651858449005eb3e5c813", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-26T09:54:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T09:54:25.000Z", "max_issues_repo_path": "tst/node/node_properties_test.cpp", "max_issues_repo_name": "lejeuneretif/unbounded-ordered-tree", "max_issues_repo_head_hexsha": "0e4a431ee0b1228295c651858449005eb3e5c813", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tst/node/node_properties_test.cpp", "max_forks_repo_name": "lejeuneretif/unbounded-ordered-tree", "max_forks_repo_head_hexsha": "0e4a431ee0b1228295c651858449005eb3e5c813", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6153846154, "max_line_length": 69, "alphanum_fraction": 0.6396917148, "num_tokens": 370}
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import caffe
caffe.set_device(0)
caffe.set_mode_gpu()
net = caffe.Net('conv.prototxt', caffe.TEST)
# network arch
print(net.blobs['data'])
for k, v in net.blobs.items():
print(k, v.data.shape)
# params
print("weights: ", net.params['conv'][0].data.shape)
print("bias: ", net.params['conv'][1].data.shape)
im = np.array(Image.open('cat_gray.jpg'))
im_input = im[np.newaxis, np.newaxis, :, :]
net.blobs['data'].reshape(*im_input.shape)
net.blobs['data'].data[...] = im_input
# compute
net.forward()
net.save('cnn.caffemodel')
|
{"hexsha": "ba876cfd54ceab7c8a68820445f0de2202ff899c", "size": 611, "ext": "py", "lang": "Python", "max_stars_repo_path": "memos/caffe/cnn.py", "max_stars_repo_name": "Bingwen-Hu/hackaway", "max_stars_repo_head_hexsha": "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "memos/caffe/cnn.py", "max_issues_repo_name": "Bingwen-Hu/hackaway", "max_issues_repo_head_hexsha": "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "memos/caffe/cnn.py", "max_forks_repo_name": "Bingwen-Hu/hackaway", "max_forks_repo_head_hexsha": "69727d76fd652390d9660e9ea4354ba5cc76dd5c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.09375, "max_line_length": 52, "alphanum_fraction": 0.6923076923, "include": true, "reason": "import numpy", "num_tokens": 168}
|
import gym
import numpy as np
from maze import *
from matplotlib import pyplot as plt
from evaluationCar import *
from evaluationREINFORCE import *
env = gym.make("MountainCarContinuous-v0")
observation = env.reset()
env._max_episode_steps = 2000
discount = 0.9
# get the actionsSpace
# [position]
actionSpace = env.action_space
actionSpaceLowerBound = actionSpace.low
actionSpaceUpperBound = actionSpace.high
# descritize the action space
numBinsActions = 100
actionBins = np.linspace(actionSpaceLowerBound, actionSpaceUpperBound, numBinsActions)
# get the observations space
# [position, velocity]
observationSpace = env.observation_space
obsSpaceLowerbound = observationSpace.low
obsSpaceUpperbound = observationSpace.high
# descritize the observation space
numBinsObs = 100
obsBinsPos = np.linspace(obsSpaceLowerbound[0], obsSpaceUpperbound[0], numBinsObs)
obsBinsVel = np.linspace(obsSpaceLowerbound[1], obsSpaceUpperbound[1], numBinsObs)
#do the policy gradient
def REINFORCECar():
learningRate = .9
eval_steps, eval_reward = [], []
# for function approxomation
theta = np.random.random(size=(2,numBinsActions))
#baseline
b = np.sum(2,)
numIter = 1000
for i_episode in range(numIter):
#collect a set of trajectories by executing current policy
time = 2000
trajectories = np.zeros((3,time)) #(state,action,reward)
#collect a set the average of the observations
scores = np.zeros((2,time))
observation = env.reset()
for t in range(time-1):
# find the value of phi*theta
phiTheta = np.dot(observation, theta)
# take exponentials for softmax
phiThetaExp = np.exp(phiTheta)
# find the probailities of each column
probs = np.mean(phiThetaExp, axis=0) / np.mean(phiThetaExp)
# e-greedy probaility
e = 1.0 - t / (100)
# randomly pick action based on epsilon
if np.random.rand() < e:
action = random.uniform(actionSpaceLowerBound, actionSpaceUpperBound)
else:
# find the appropriate action
actionDes = int(np.argmax(probs))
action = np.array(actionBins[actionDes]).reshape((1,))
#take a step
observationPrime, reward, done, info = env.step(action)
#fill im trajectories
trajectories[1,t] = action
trajectories[2,t] = reward
#fill in the scores
scores[:,t] = (observation - observation/3).reshape(2,)
observation = observationPrime
env.render()
# evaluation
if (i_episode % 50 == 0):
avg_step, avg_reward = evaluationREINFORCE(env, action)
eval_steps.append(avg_step)
eval_reward.append(avg_reward)
#find gradient of J(theta)
for t in range(time):
#find Gt and update bt
Gt = 0
for tRest in range(t,time-1):
Gt = Gt + ((discount**tRest) * trajectories[2,tRest])
#get b
b = np.mean(trajectories[2,:])
#get the advantage
At = Gt - b
#correct action taken
action = int(trajectories[1,t])
#calculate g-hat, the gradient of logPi
gHat = scores[:,t]
#reconfigure thetas
theta[:,action] = theta[:,action] #+ learningRate * discount * At * gHat
f2, ax2 = plt.subplots()
# repeat for different algs
ax2.plot(range(0, numIter, 50), eval_steps)
f2.suptitle('Evaluation Steps')
f3, ax3 = plt.subplots()
# repeat for different algs
ax3.plot(range(0, numIter, 50), eval_reward)
f3.suptitle('Evaluation Reward')
plt.show()
def qLearningMountain():
learningRate = .2
eval_steps, eval_reward = [], []
qVals = np.random.choice(a = np.linspace(0,100,100), size=(numBinsObs,numBinsObs,numBinsActions))
numIter = 500
for i_episode in range(numIter):
observation = env.reset()
for t in range(2000):
env.render()
#get the descritized states
pos = observation[0]
vel = observation[1]
posDes = np.digitize(pos, obsBinsPos)
velDes = np.digitize(vel, obsBinsVel)
# e-greedy probaility
e = 1.0 - t / (100)
# randomly pick action based on epsilon
if np.random.rand() < e:
action = random.uniform(actionSpaceLowerBound, actionSpaceUpperBound)
#discretize the action
actionDes = np.digitize(action, actionBins)
else:
# get the action derived from q for current state
possibleActions = qVals[posDes,velDes, :]
actionDes = np.argmax(possibleActions)
action = np.array(actionBins[actionDes]).reshape((1,))
# take that action and step
observationPrime, reward, done, info = env.step(action)
# get the current q value
currQVal = qVals[posDes,velDes,actionDes]
#descretize the observation primes
posPrime = np.digitize(observationPrime[0], obsBinsPos)
velPrime = np.digitize(observationPrime[1], obsBinsVel)
# get the possible qvalues for s'
possibleNewStates = qVals[posPrime,velPrime, :]
# get the best action based on the next state s'
actionPrime = np.argmax(possibleNewStates)
# get the q value of that state
maxQSPrime = qVals[posPrime,velPrime, actionPrime]
# update the q value table
qVals[posDes,velDes,actionDes] = currQVal + learningRate * (reward + discount * maxQSPrime - currQVal)
# set the current state equal to the next state
observation = observationPrime
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
#evaluation
if (i_episode % 50 == 0):
avg_step, avg_reward = evaluationCar(env, qVals,50,numIter)
eval_steps.append(avg_step)
eval_reward.append(avg_reward)
f2, ax2 = plt.subplots()
# repeat for different algs
ax2.plot(range(0, numIter, 50),eval_steps)
f2.suptitle('Evaluation Steps')
f3, ax3 = plt.subplots()
# repeat for different algs
ax3.plot(range(0,numIter,50),eval_reward)
f3.suptitle('Evaluation Reward')
plt.show()
if __name__ == "__main__":
REINFORCECar()
#qLearningMountain()
|
{"hexsha": "913bf56ac2eb98a6027c5992ffced5687b8d97f3", "size": 6635, "ext": "py", "lang": "Python", "max_stars_repo_path": "car.py", "max_stars_repo_name": "RajatBhageria/Reinforcement-Learning", "max_stars_repo_head_hexsha": "5b49b697345257d9346dc699663265fdb1401c33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "car.py", "max_issues_repo_name": "RajatBhageria/Reinforcement-Learning", "max_issues_repo_head_hexsha": "5b49b697345257d9346dc699663265fdb1401c33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "car.py", "max_forks_repo_name": "RajatBhageria/Reinforcement-Learning", "max_forks_repo_head_hexsha": "5b49b697345257d9346dc699663265fdb1401c33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-21T13:02:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-21T13:02:51.000Z", "avg_line_length": 31.4454976303, "max_line_length": 114, "alphanum_fraction": 0.6093443858, "include": true, "reason": "import numpy", "num_tokens": 1598}
|
#!/usr/bin/env python
# encoding: utf-8
"""
gmconvert.py
Created by Brant Faircloth on 28 April 2011.
Copyright 2011 Brant C. Faircloth. All rights reserved.
"""
import pdb
import os
import sys
import copy
import optparse
import numpy
import la
from openpyxl.workbook import Workbook
from openpyxl.cell import get_column_letter
from openpyxl.writer.excel import ExcelWriter
def interface():
'''Command-line interface'''
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--input', dest = 'input', action='store',
type='string', default = None, help='The path to the input file.',
metavar='FILE')
p.add_option('--output', dest = 'output', action='store',
type='string', default = None, help='The path to the output file.',
metavar='FILE')
(options,arg) = p.parse_args()
if not options.input:
p.print_help()
sys.exit(2)
if not os.path.isfile(options.input):
print "You must provide a valid path to the configuration file."
p.print_help()
sys.exit(2)
return options, arg
def get_number_of_markers(input):
markers = set()
samples = set()
for line in open(input, 'rU'):
line_elements = line.strip('\n').split('\t')
if line_elements[0] == 'Sample File':
marker_pos = line_elements.index('Marker')
sample_pos = line_elements.index('Sample Name')
else:
markers.add(line_elements[marker_pos])
samples.add(line_elements[sample_pos])
return markers, samples
def create_population(samples, markers, columns = ['A','B']):
population = {}
for individual in samples:
population[individual] = create_larry(markers)
return population
def create_larry(markers, depth = '0', columns = ['A','B']):
labels = [[depth], list(markers), columns]
a = numpy.zeros((1,len(markers),len(columns)), dtype='|S4')
return la.larry(a , labels)
def insert_genotype_data(input, population, markers):
for line in open(input, 'rU'):
line_elements = line.strip('\n').split('\t')
if line_elements[0] == 'Sample File':
sample_pos = line_elements.index('Sample Name')
marker_pos = line_elements.index('Marker')
allele1_pos = line_elements.index('Allele 1')
allele2_pos = line_elements.index('Allele 2')
else:
name = line_elements[sample_pos]
marker = line_elements[marker_pos]
a1 = line_elements[allele1_pos]
a2 = line_elements[allele2_pos]
if a1 == '' and a2 == '':
pass
else:
if a2 == '' and a1 != '':
a2 = a1
# check for entries in first level of larry
original_shape = population[name].shape[0]
for l in xrange(original_shape):
level = str(l)
#pdb.set_trace()
if (population[name].lix[[level]].lix[[marker]].x == numpy.array(['', ''], dtype='|S4')).all():
population[name].lix[[level]].lix[[marker]].x[0] = a1
population[name].lix[[level]].lix[[marker]].x[1] = a2
elif not original_shape > (l + 1):
#pdb.set_trace()
new_level = "{0}".format(l + 1)
# create a new level
empty = create_larry(markers, depth = new_level)
# merge it into the existing level
population[name] = population[name].merge(empty)
population[name].lix[[new_level]].lix[[marker]].x[0] = a1
population[name].lix[[new_level]].lix[[marker]].x[1] = a2
return population
def write_header(ws, header):
# excel columns are indexed by 1, not 0
for col in xrange(1, len(header) + 1):
cl = get_column_letter(col)
ws.cell('{0}1'.format(cl)).value = header[col - 1]
return ws
def get_max_depth(population):
depths = []
for name, genotypes in population.iteritems():
depths.append(genotypes.shape[0])
return max(depths)
def create_additional_sheet(wb, header, max_depth):
sheet_depth = len(wb.get_sheet_names())
if sheet_depth < max_depth:
for sheet in range(max_depth - sheet_depth):
ws = wb.create_sheet()
ws.title = "Level{0}".format(sheet + sheet_depth)
ws = write_header(ws, header)
# create a summary sheet
ws = wb.create_sheet()
ws.title = "Summary"
ws = write_header(ws, header)
return wb
def write_records_to_excel(output, population, markers, alleles = ['A','B']):
wb = Workbook()
ew = ExcelWriter(workbook = wb)
ws = wb.worksheets[0]
ws.title = "Level0"
# add marker columns - 2 columns for each allele
header = ["Individual"] + ["{0}_{1}".format(m,a) for m in markers for a in alleles]
#pdb.set_trace()
ws = write_header(ws, header)
max_depth = get_max_depth(population)
wb = create_additional_sheet(wb, header, max_depth)
pop_keys = population.keys()
pop_keys.sort()
for name_idx, name in enumerate(pop_keys):
# pdb.set_trace()
# get depth of stack
depth = population[name].shape[0]
# write invididual name at all depths
#for level in xrange(max_depth):
# ws = wb.get_sheet_by_name("Level{0}".format(level))
# ws.cell('A{0}'.format(name_idx + 2)).value = name
# ensure there is workbook at max stack depth
for level in xrange(max_depth):
# write the sample id for each row of all levels
ws = wb.get_sheet_by_name("Level{0}".format(level))
ws.cell('A{0}'.format(name_idx + 2)).set_value_explicit(value = name, data_type = 's')
# but only write the genotypes for the sample where there
# is a level
if level < depth:
for marker_idx, marker in enumerate(header[1:]):
cl = get_column_letter(marker_idx + 2)
marker, allele = marker.split('_')
if allele == 'A':
pos = 0
else:
pos = 1
ws.cell('{0}{1}'.format(cl, name_idx + 2)).value = population[name].lix[[str(level)]].lix[[marker]].x[pos]
# check all non-zero entries for similarity and store to summary
ws = wb.get_sheet_by_name("Summary".format(level))
ws.cell('A{0}'.format(name_idx + 2)).set_value_explicit(value = name, data_type = 's')
for marker_idx, marker in enumerate(header[1:]):
cl = get_column_letter(marker_idx + 2)
marker, allele = marker.split('_')
if allele == 'A':
pos = 0
else:
pos = 1
# if only one element, self = self
if depth <= 1:
identical = True
elif (population[name].lix[:,[marker],pos].x == '').all():
identical = True
else:
# don't penalize comparisons having '' in >= one column
empties = population[name].lix[:,[marker],pos].x != ''
genotypes_no_empties = population[name].lix[:,[marker],pos].x[empties]
# just test first element against all elements
identical = (genotypes_no_empties == genotypes_no_empties[0]).all()
if identical:
ws.cell('{0}{1}'.format(cl, name_idx + 2)).value = "T"
else:
ws.cell('{0}{1}'.format(cl, name_idx + 2)).value = "F"
#pdb.set_trace()
ew.save(filename = output)
def main():
options, args = interface()
markers, samples = get_number_of_markers(options.input)
m_list = list(markers)
population = create_population(samples, markers)
population = insert_genotype_data(options.input, population, markers)
write_records_to_excel(options.output, population, markers)
#pdb.set_trace()
if __name__ == '__main__':
main()
|
{"hexsha": "d5128650e99b0e5f42f9439c944016dc3fe8a618", "size": 8153, "ext": "py", "lang": "Python", "max_stars_repo_path": "cli/gmconvert.py", "max_stars_repo_name": "brantfaircloth/gmconvert", "max_stars_repo_head_hexsha": "7ffa70dab4a13bfef93cc74912535ec843338ea2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cli/gmconvert.py", "max_issues_repo_name": "brantfaircloth/gmconvert", "max_issues_repo_head_hexsha": "7ffa70dab4a13bfef93cc74912535ec843338ea2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-25T06:47:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-25T06:47:13.000Z", "max_forks_repo_path": "cli/gmconvert.py", "max_forks_repo_name": "brantfaircloth/gmconvert", "max_forks_repo_head_hexsha": "7ffa70dab4a13bfef93cc74912535ec843338ea2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6398104265, "max_line_length": 126, "alphanum_fraction": 0.5753710291, "include": true, "reason": "import numpy", "num_tokens": 1918}
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
import numpy as np
import math
from matplotlib import animation
import argparse
# All the argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("--width", help="(int) width of output", type=int, default=160)
parser.add_argument("--height", help="(int) height of output, will be increased by one if even due to rendering bug", type=int, default=120)
parser.add_argument('--linear_interpolate', help="if present, will use linear interpolation to speed up rendering; default is to use cosine interpolation for higher quality", default=False)
parser.add_argument("--max_octave", help="(int) how many octaves of Perlin noise to generate (sensible range 1-8)", type=int, default=8)
parser.add_argument('--pregenerate_noise', help="(int) if present, will pre-generate noise in an array of specified length", type=int, default=0)
parser.add_argument("--frame_count", help="(int) how many frames to render", type=int, default=20)
parser.add_argument("--frame_time_interval", help="(int) time interval between frames", type=int, default=50)
parser.add_argument("--frame_z_interval", help="(int) z-interval between frames", type=float, default=0.2)
parser.add_argument("--out", help="(String) filename to save as animation", default=None)
args = parser.parse_args()
width, height = args.width, args.height
if height % 2 == 0:
# Even numbers produce a glitch at 12 O'clock — in the JS version, the same error is at 9 O'clock
height += 1
print("Actual dimensions:", width, height)
# Pre-calculated constants to speed things up and make code more readable
hWidth, hHeight = width/2.0, height/2.0
centerToCorner = math.sqrt((hWidth*hWidth) + (hHeight*hHeight))
tangentScale = math.pi / (2*centerToCorner)
thetaToPerlinScale = 128 / math.pi
# Quality setting
# MAX_OCTAVE should be in range 1-8: Higher values = more detail, lower values = faster; no point going over 8 because of dynamic range of colour space
MAX_OCTAVE = args.max_octave
# Utility functions, will only use one of {linearInterpolate, cosineInterpolate}, the former is faster, the latter is nicer
def linearInterpolate(a, b, x):
return a*(1-x) + b*x
def cosineInterpolate(a, b, x):
ft = x * math.pi
f = (1 - math.cos(ft)) * 0.5
return a*(1-f) + b*f
interpolate = cosineInterpolate
if args.linear_interpolate:
interpolate = linearInterpolate
# Random number sources (two options)
def seededRandom(x):
# Magic numbers from tutorial on pseudo-random number generators
x = int(x & 0x7fffffff)
temp = x << 13 ^ x
temp = (x * (x * x * 15731 + 789221) + 1376312589)
temp = int(temp) & 0x7fffffff # bitwise and
return 255*( temp/float(0x7fffffff) )
pregeneratedRandomData = None
def pregeneratedRandom(x):
global pregeneratedRandomData
if None==pregeneratedRandomData:
pregeneratedRandomData = 255*np.random.rand(args.pregenerate_noise)
print("Pre-generated random data")
return pregeneratedRandomData[x%pregeneratedRandomData.__len__()]
noiseFunction = seededRandom
if args.pregenerate_noise:
noiseFunction = pregeneratedRandom
# Three-function Perlin noise function; the time parameter affects each octave differently because that looks more interesting when animated
def perlinNoise(perlinTheta, r, time):
sum = 0
for octave in range(1, MAX_OCTAVE):
sf = 2**octave
sf8 = sf*4 # I can't remember where this variable name came from
# The constants 64 and 4 are essentially arbitary:
# they define the scale of the largest component of the Perlin noise
new_theta_double = sf*perlinTheta/64.0
new_r_double = sf*r/4.0 + time # Add current time to this to get an animated effect
new_theta_int = int(new_theta_double)
new_r_int = int(new_r_double)
fraction_r = new_r_double - new_r_int
fraction_theta = new_theta_double - new_theta_int
t1 = noiseFunction( new_theta_int + sf8 * new_r_int )
t2 = noiseFunction( new_theta_int + sf8 * (new_r_int+1))
if new_theta_int+1 >= sf8:
# So that interpolation with angle 0-360° doesn't do weird things with angles > 360°
new_theta_int = new_theta_int - sf8
t3 = noiseFunction((new_theta_int+1) + sf8 * new_r_int )
t4 = noiseFunction((new_theta_int+1) + sf8 * (new_r_int+1))
i1 = interpolate(t1, t2, fraction_r)
i2 = interpolate(t3, t4, fraction_r)
sum += interpolate(i1, i2, fraction_theta)/sf
return sum
# Render function
def render(time):
outputImage = np.empty((height, width), np.uint8)
for y in range(0, height):
dy = y - hHeight
for x in range(0, width):
dx = x - hWidth
perlinTheta = 128 + thetaToPerlinScale*math.atan2(dy, dx) # Range 0-255, this is a coordinate space 2^n where n is integer, not a colour space
r = math.sqrt((dx*dx) + (dy*dy))
r = centerToCorner - r
r = math.tan(tangentScale*r)
sum = perlinNoise(perlinTheta, r, time)
outputImage[y][x] = sum
return outputImage
# Renders all images and displays them
images = []
for time in range(0, args.frame_count):
img = render(time*args.frame_z_interval)
images.append(img)
fig = plt.figure()
im = plt.imshow(images[0], animated=True, cmap=plt.cm.gist_gray)
index = 0
def update(*args):
global index
index += 1
index %= images.__len__()
im.set_array(images[index])
return im,
ani = animation.FuncAnimation(fig, update, interval=args.frame_time_interval, blit=False)
plt.axis('off')
plt.show()
# Tries to save the hyperspace tunnel as a movie, but I can't test this because I can't get the required parts to install correctly
if args.out != None:
# Requires ffmpeg (or mencoder?)
print("Saving as:", args.out)
ani.save(args.out)
|
{"hexsha": "98d875303c57e7877ae46c08b8913cbc22995c1f", "size": 5638, "ext": "py", "lang": "Python", "max_stars_repo_path": "hyperspace.py", "max_stars_repo_name": "BenWheatley/Hyperspace-tunnel", "max_stars_repo_head_hexsha": "b31aa137c4f807141e1da91e82da03006e61cdde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hyperspace.py", "max_issues_repo_name": "BenWheatley/Hyperspace-tunnel", "max_issues_repo_head_hexsha": "b31aa137c4f807141e1da91e82da03006e61cdde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hyperspace.py", "max_forks_repo_name": "BenWheatley/Hyperspace-tunnel", "max_forks_repo_head_hexsha": "b31aa137c4f807141e1da91e82da03006e61cdde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2375, "max_line_length": 189, "alphanum_fraction": 0.7328840014, "include": true, "reason": "import numpy", "num_tokens": 1535}
|
[STATEMENT]
lemma usubstappf_antimon: "V\<subseteq>U \<Longrightarrow> usubstappf \<sigma> U \<phi> \<noteq> undeff \<Longrightarrow> usubstappf \<sigma> U \<phi> = usubstappf \<sigma> V \<phi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>V \<subseteq> U; usubstappf \<sigma> U \<phi> \<noteq> undeff\<rbrakk> \<Longrightarrow> usubstappf \<sigma> U \<phi> = usubstappf \<sigma> V \<phi>
[PROOF STEP]
using usubstappf_and_usubstappp_antimon
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?V \<subseteq> ?U; usubstappf ?\<sigma> ?U ?\<phi> \<noteq> undeff\<rbrakk> \<Longrightarrow> usubstappf ?\<sigma> ?U ?\<phi> = usubstappf ?\<sigma> ?V ?\<phi>
\<lbrakk>?V \<subseteq> ?U; snd (usubstappp ?\<sigma> ?U ?\<alpha>) \<noteq> undefg\<rbrakk> \<Longrightarrow> snd (usubstappp ?\<sigma> ?U ?\<alpha>) = snd (usubstappp ?\<sigma> ?V ?\<alpha>)
goal (1 subgoal):
1. \<lbrakk>V \<subseteq> U; usubstappf \<sigma> U \<phi> \<noteq> undeff\<rbrakk> \<Longrightarrow> usubstappf \<sigma> U \<phi> = usubstappf \<sigma> V \<phi>
[PROOF STEP]
by simp
|
{"llama_tokens": 442, "file": "Differential_Game_Logic_USubst", "length": 2}
|
# Introduction
Molecular energy levels are determined by electronic, vibrational and rotational levels. Spectral lines are dense and they form so called band spectra. Within single band, referent point is determined by electronic or vibrational level. Selection rules for rotational spectra is $\Delta J = 0, \pm 1$, with forbidden $J = 0 \to 0$.
$R$-branch of spectra is branch with one of rotational level being $J' = 1$. Fortan's parabola is described by:
\begin{equation}
k = k_0 + (B' + B'') (J'' + 1) + (B' - B'')(J'' + 1)^2
\end{equation}
# Analysis
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.constants import hbar
```
```python
data = pd.read_csv("../data/cn.csv", sep=" ")
```
```python
data
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>J</th>
<th>λ</th>
<th>k</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>0</td>
<td>3878.40</td>
<td>0.000258</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>3878.00</td>
<td>0.000258</td>
</tr>
<tr>
<th>2</th>
<td>2</td>
<td>3877.44</td>
<td>0.000258</td>
</tr>
<tr>
<th>3</th>
<td>3</td>
<td>3876.96</td>
<td>0.000258</td>
</tr>
<tr>
<th>4</th>
<td>4</td>
<td>3876.40</td>
<td>0.000258</td>
</tr>
<tr>
<th>5</th>
<td>5</td>
<td>3875.84</td>
<td>0.000258</td>
</tr>
<tr>
<th>6</th>
<td>6</td>
<td>3875.28</td>
<td>0.000258</td>
</tr>
<tr>
<th>7</th>
<td>7</td>
<td>3874.72</td>
<td>0.000258</td>
</tr>
<tr>
<th>8</th>
<td>8</td>
<td>3874.08</td>
<td>0.000258</td>
</tr>
<tr>
<th>9</th>
<td>9</td>
<td>3873.36</td>
<td>0.000258</td>
</tr>
</tbody>
</table>
</div>
```python
def parabola(x, *args):
return args[0] + args[1] * x + args[2] * x ** 2
```
```python
data.columns
```
Index(['J', 'λ', 'k'], dtype='object')
```python
j = data["J"].values
k = data["k"].values * 10 ** 10
```
```python
popt, pcov = curve_fit(parabola, j + 1, k, p0=[2e-3, 100, 10])
```
```python
j_0 = np.linspace(-1, 10, 200)
```
```python
fig, ax = plt.subplots()
ax.scatter(j, k * 10 ** -6, c='b', edgecolor='k')
ax.plot(j_0, parabola(j_0 + 1, *popt) * 10 ** -6)
ax.grid()
ax.set_ylim((.9999 * k.min() * 10 ** -6, 1.0001 * k.max() * 10 ** -6))
ax.set_title("Fortan curve of CN")
ax.set_ylabel(r"$k[Mm]$")
ax.set_xlabel(r"$J''$")
```
```python
b_1 = (popt[1] + popt[2]) / 2
b_2 = (popt[1] - popt[2]) / 2
```
```python
delta = (np.sqrt(pcov[1, 1]) + np.sqrt(pcov[2, 2])) / 2
```
```python
b_1, b_2, delta
```
(146.17452560353468, 138.25791460956538, 6.170608799469187)
\begin{equation}
\begin{aligned}
B' = (146 \pm 7) m^{-1}\\
B'' = (138 \pm 7) m^{-1}
\end{aligned}
\end{equation}
Moment of intertia is related to $B$ coefficient:
\begin{equation}
B = \frac{\hbar^2}{2I}
\end{equation}
```python
def moment_of_inertia(b, db):
return hbar ** 2 / (2 * b), hbar ** 2 / (2 * b ** 2) * db
```
```python
i_1, d_i_1 = moment_of_inertia(b_1, delta)
i_2, d_i_2 = moment_of_inertia(b_2, delta)
```
```python
i_1, d_i_1
```
(3.80408856145368e-71, 1.6058572623613524e-72)
```python
i_2, d_i_2
```
(4.021909649039747e-71, 1.795024258909058e-72)
\begin{equation}
\begin{aligned}
I' = (3.8 \pm 0.2) 10 ^ {-71} kg m^{2}\\
I'' = (4.0 \pm 0.2) 10 ^ {-71} kg m^{2}
\end{aligned}
\end{equation}
|
{"hexsha": "3e4399ab11fa02213385a6acdf598b148a430528", "size": 28931, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Physics of Molecules/src/Fortan diagrams.ipynb", "max_stars_repo_name": "PhyProg/Numerical-simulations-in-Physics", "max_stars_repo_head_hexsha": "ab335117d993be4129654fbfc4455176410fabe2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Physics of Molecules/src/Fortan diagrams.ipynb", "max_issues_repo_name": "PhyProg/Numerical-simulations-in-Physics", "max_issues_repo_head_hexsha": "ab335117d993be4129654fbfc4455176410fabe2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Physics of Molecules/src/Fortan diagrams.ipynb", "max_forks_repo_name": "PhyProg/Numerical-simulations-in-Physics", "max_forks_repo_head_hexsha": "ab335117d993be4129654fbfc4455176410fabe2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 68.0729411765, "max_line_length": 19424, "alphanum_fraction": 0.7877017732, "converted": true, "num_tokens": 1537}
|
from agent import Agent
from monitor import interact
import gym
import numpy as np
env = gym.make('Taxi-v3')
agent = Agent()
avg_rewards, best_avg_reward = interact(env, agent)
#in v2 online on the notebook
# sarsa[0] --> 9.256
# Q-learning --> 9.223
# E-Sarsa --> 9.118
#in v3:
# sarsa[0] --> 8.793
# Q-learning --> 8.554
# E-Sarsa --> 8.874
|
{"hexsha": "12113527d4a8be6e08af69eff2ff83df94b4cce4", "size": 363, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab-taxi/main.py", "max_stars_repo_name": "ramanpreet9/deep-reinforcement-learning", "max_stars_repo_head_hexsha": "daa0a92dc4ed18af8a953193c73a6af22635277e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab-taxi/main.py", "max_issues_repo_name": "ramanpreet9/deep-reinforcement-learning", "max_issues_repo_head_hexsha": "daa0a92dc4ed18af8a953193c73a6af22635277e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab-taxi/main.py", "max_forks_repo_name": "ramanpreet9/deep-reinforcement-learning", "max_forks_repo_head_hexsha": "daa0a92dc4ed18af8a953193c73a6af22635277e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.15, "max_line_length": 51, "alphanum_fraction": 0.6336088154, "include": true, "reason": "import numpy", "num_tokens": 134}
|
import os
import torch
#from skimage import io, transform
from PIL import Image
from scipy.io import loadmat
import numpy as np
from torch.utils.data import Dataset
from torchvision import transforms
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# class Rescale(object):
# """Rescale the image in a sample to a given size.
# Args:
# output_size (tuple): Desired output size.
# """
# def __init__(self, output_size):
# assert isinstance(output_size, tuple)
# assert len(output_size) == 2
# self.output_size = output_size
# def __call__(self, sample):
# image, labels = sample['image'], sample['labels']
# img = transform.resize(image, self.output_size)
# return {'image': img, 'labels': labels}
# class ToTensor(object):
# """Convert ndarrays in sample to Tensors."""
# def __call__(self, sample):
# image, labels = sample['image'], sample['labels']
# # swap color axis because
# # numpy image: H x W x C
# # torch image: C X H X W
# image = image.transpose((2, 0, 1))
# return {'image': torch.from_numpy(image),
# 'labels': torch.from_numpy(labels)}
# REF: https://pytorch.org/docs/stable/torchvision/models.html
# Note: PIL.Image.open(filename) returns a PIL image
# skimage.io.imread(filename) returns a numpy ndarray
image_size = (224, 224)
# image_size = (299, 299) # torchvision.models.resnet152 will not work properly for this size (i.e., size mismatch error)
data_transforms = transforms.Compose([transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
class ImageDataset(Dataset):
"""Image dataset."""
def __init__(self, image_dir, image_list_file, label_file, vocab_file, transform=data_transforms):
"""
Args:
image_dir (string): Directory with all the images.
image_list_file (string): Path to the list of image filenames.
label_file (string): Path to the .mat file of image labels.
vocab_file (string): Path to the vocabulary file of labels.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.image_dir = image_dir
image_files = []
with open(image_list_file, 'r') as fd:
for line in fd:
image_files.append(line.strip())
labels_dict = loadmat(label_file)
if 'mat_train' in labels_dict:
all_labels = labels_dict['mat_train'].astype(np.uint8)
else:
all_labels = labels_dict['mat_test'].astype(np.uint8)
# filtering out examples without any label
nlabels = all_labels.sum(axis=1)
if np.min(nlabels) < 1:
nz_ix = np.nonzero(nlabels)[0]
self.image_files = np.array(image_files)[nz_ix]
self.all_labels = all_labels[nz_ix, :]
else:
self.image_files = image_files
self.all_labels = all_labels
vocab = []
with open(vocab_file, 'r') as fd:
for line in fd:
vocab.append(line.strip())
self.label_vocab = np.array(vocab)
self.num_samples, self.num_labels = self.all_labels.shape
assert self.num_samples == len(self.image_files)
assert self.num_labels == len(self.label_vocab)
self.transform = transform
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
img_name = os.path.join(self.image_dir, self.image_files[idx])
#image = io.imread(img_name)
image = Image.open(img_name)
img_labels = self.all_labels[idx, :]
# convert grayscale image to rgb color image
rgbmode = 'RGB'
if image.mode != rgbmode:
image = image.convert(mode=rgbmode)
if self.transform:
image = self.transform(image)
sample = {'image': image, 'labels': img_labels}
return sample
def decode_labels(self, label_vec):
if type(label_vec) == torch.Tensor:
label_vec = label_vec.numpy()
assert label_vec.shape == (self.num_labels,)
label_ix = np.nonzero(label_vec)[0]
return self.label_vocab[label_ix].tolist()
|
{"hexsha": "69f2f00f6c5e41848cb50a83e012f5f32160e18d", "size": 4552, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dataset.py", "max_stars_repo_name": "cdawei/image_labeling", "max_stars_repo_head_hexsha": "119c80c2d8f78c9701f7500236038dab7e43327d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dataset.py", "max_issues_repo_name": "cdawei/image_labeling", "max_issues_repo_head_hexsha": "119c80c2d8f78c9701f7500236038dab7e43327d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dataset.py", "max_forks_repo_name": "cdawei/image_labeling", "max_forks_repo_head_hexsha": "119c80c2d8f78c9701f7500236038dab7e43327d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0153846154, "max_line_length": 121, "alphanum_fraction": 0.5971001757, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1003}
|
import os
import tempfile
import numpy as np
import pandas as pd
import datetime as dt
if __name__ == "__main__":
base_dir = "/opt/ml/processing"
#Read Data
df = pd.read_csv(
f"{base_dir}/input/storedata_total.csv"
)
# convert created column to datetime
df["created"] = pd.to_datetime(df["created"])
#Convert firstorder and lastorder to datetime datatype
df["firstorder"] = pd.to_datetime(df["firstorder"],errors='coerce')
df["lastorder"] = pd.to_datetime(df["lastorder"],errors='coerce')
#Drop Rows with Null Values
df = df.dropna()
#Create column which gives the days between the last order and the first order
df['first_last_days_diff'] = (df['lastorder'] - df['firstorder']).dt.days
#Create column which gives the days between the customer record was created and the first order
df['created_first_days_diff'] = (df['created'] - df['firstorder']).dt.days
#Drop columns
df.drop(['custid', 'created','firstorder','lastorder'], axis=1, inplace=True)
#Apply one hot encoding on favday and city columns
df = pd.get_dummies(df, prefix=['favday', 'city'], columns=['favday', 'city'])
# Split into train, validation and test datasets
y = df.pop("retained")
X_pre = df
y_pre = y.to_numpy().reshape(len(y), 1)
X = np.concatenate((y_pre, X_pre), axis=1)
np.random.shuffle(X)
# Split in Train, Test and Validation Datasets
train, validation, test = np.split(X, [int(.7*len(X)), int(.85*len(X))])
train_rows = np.shape(train)[0]
validation_rows = np.shape(validation)[0]
test_rows = np.shape(test)[0]
train = pd.DataFrame(train)
test = pd.DataFrame(test)
validation = pd.DataFrame(validation)
# Convert the label column to integer
train[0] = train[0].astype(int)
test[0] = test[0].astype(int)
validation[0] = validation[0].astype(int)
# Save the Dataframes as csv files
train.to_csv(f"{base_dir}/train/train.csv", header=False, index=False)
validation.to_csv(f"{base_dir}/validation/validation.csv", header=False, index=False)
test.to_csv(f"{base_dir}/test/test.csv", header=False, index=False)
|
{"hexsha": "21b278d5aa1e5534056107d7b1f019781e24fab3", "size": 2182, "ext": "py", "lang": "Python", "max_stars_repo_path": "pipelines/customerchurn/preprocess.py", "max_stars_repo_name": "aws-samples/customer-churn-sagemaker-pipelines-sample", "max_stars_repo_head_hexsha": "3b4def4a29bd62ca7dfa485273389baf3e131194", "max_stars_repo_licenses": ["MIT-0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-10-24T01:53:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T23:54:30.000Z", "max_issues_repo_path": "pipelines/customerchurn/preprocess.py", "max_issues_repo_name": "aws-samples/customer-churn-sagemaker-pipelines-sample", "max_issues_repo_head_hexsha": "3b4def4a29bd62ca7dfa485273389baf3e131194", "max_issues_repo_licenses": ["MIT-0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-10T08:50:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T08:50:34.000Z", "max_forks_repo_path": "pipelines/customerchurn/preprocess.py", "max_forks_repo_name": "aws-samples/customer-churn-sagemaker-pipelines-sample", "max_forks_repo_head_hexsha": "3b4def4a29bd62ca7dfa485273389baf3e131194", "max_forks_repo_licenses": ["MIT-0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-24T01:53:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T00:44:45.000Z", "avg_line_length": 40.4074074074, "max_line_length": 99, "alphanum_fraction": 0.670944088, "include": true, "reason": "import numpy", "num_tokens": 575}
|
import os.path
import time
import warnings
import numpy as np
import torch
from torch.autograd import Variable
from copy import deepcopy
from core.estimator_tools.samplers.srw_mhwg_sampler import SrwMhwgSampler
from core.estimators.gradient_ascent import GradientAscent
from core.estimators.mcmc_saem import McmcSaem
# Estimators
from core.estimators.scipy_optimize import ScipyOptimize
from core.model_tools.manifolds.exponential_factory import ExponentialFactory
from core.model_tools.manifolds.generic_spatiotemporal_reference_frame import GenericSpatiotemporalReferenceFrame
from core.models.longitudinal_metric_learning import LongitudinalMetricLearning
from core.models.model_functions import create_regular_grid_of_points
from in_out.array_readers_and_writers import read_2D_array
from in_out.dataset_functions import read_and_create_scalar_dataset, read_and_create_image_dataset
from support.probability_distributions.multi_scalar_normal_distribution import MultiScalarNormalDistribution
from support.utilities.general_settings import Settings
def _initialize_parametric_exponential(model, xml_parameters, dataset, exponential_factory, metric_parameters):
"""
if width is None: raise error
else: if initialization_points : use them
else: initialize them using the width
if metric_parameters is None: naive initialization
else: assert on the size.
"""
metric_parameters = np.reshape(metric_parameters, (len(metric_parameters), int(Settings().dimension * (Settings().dimension + 1) / 2)))
dimension = Settings().dimension
if xml_parameters.deformation_kernel_width is None:
raise ValueError("Please provide a kernel width for the parametric exponenial")
width = xml_parameters.deformation_kernel_width
if xml_parameters.interpolation_points_file is None:
print("I am initializing the interpolation points using the width", width)
# Initializing the interpolation points.
if xml_parameters.interpolation_points_file is None:
box = np.zeros((Settings().dimension, 2))
box[:, 1] = np.ones(Settings().dimension)
interpolation_points_all = create_regular_grid_of_points(box, width)
print("Suggested cp to fill the box:", len(interpolation_points_all))
interpolation_points_filtered = []
numpy_observations = np.concatenate([elt.cpu().data.numpy() for elt in dataset.deformable_objects])
numpy_observations = numpy_observations.reshape(len(numpy_observations), dimension)
for p in interpolation_points_all:
if np.min(np.sum((p - numpy_observations) ** 2, 1)) < 2 * width**2:
interpolation_points_filtered.append(p)
print("Cp after filtering:", len(interpolation_points_filtered))
interpolation_points = np.array(interpolation_points_filtered)
else:
print("Loading the interpolation points from file", xml_parameters.interpolation_points_file)
interpolation_points = read_2D_array(xml_parameters.interpolation_points_file)
interpolation_points = interpolation_points.reshape(len(interpolation_points), Settings().dimension)
model.number_of_interpolation_points = len(interpolation_points)
# Now initializing the metric parameters
if metric_parameters is None:
# Naive initialization of the metric... with multivariate case.
# It should be a (nb_points, dim*(dim - 1 ) /2)
# We start by a (nb_points, dim, dim) list of upper triangular matrices
dim = xml_parameters.dimension
diagonal_indices = []
spacing = 0
pos_in_line = 0
for j in range(int(dim*(dim+1)/2) - 1, -1, -1):
if pos_in_line == spacing:
spacing += 1
pos_in_line = 0
diagonal_indices.append(j)
else:
pos_in_line += 1
metric_parameters = np.zeros((model.number_of_interpolation_points, int(dim * (dim + 1) / 2)))
val = np.sqrt(1. / model.number_of_interpolation_points)
for i in range(len(metric_parameters)):
for k in range(len(metric_parameters[i])):
if k in diagonal_indices:
metric_parameters[i, k] = val
else:
assert len(metric_parameters) == len(interpolation_points), "Bad input format for the metric parameters"
assert len(metric_parameters[0]) == Settings().dimension * (Settings().dimension + 1)/2, "Bad input format for the metric parameters"
# Parameters of the parametric manifold:
manifold_parameters = {}
print("The width for the metric interpolation is set to", width)
manifold_parameters['number_of_interpolation_points'] = model.number_of_interpolation_points
manifold_parameters['width'] = width
manifold_parameters['interpolation_points_torch'] = Variable(torch.from_numpy(interpolation_points)
.type(Settings().tensor_scalar_type),
requires_grad=False)
manifold_parameters['interpolation_values_torch'] = Variable(torch.from_numpy(metric_parameters)
.type(Settings().tensor_scalar_type))
exponential_factory.set_parameters(manifold_parameters)
return metric_parameters
def initialize_spatiotemporal_reference_frame(model, xml_parameters, dataset, observation_type='image'):
"""
Initialize everything which is relative to the geodesic its parameters.
"""
assert xml_parameters.dimension is not None, "Provide a dimension for the longitudinal metric learning atlas."
exponential_factory = ExponentialFactory()
if xml_parameters.exponential_type is not None:
print("Initializing exponential type to", xml_parameters.exponential_type)
exponential_factory.set_manifold_type(xml_parameters.exponential_type)
else:
msg = "Defaulting exponential type to parametric"
warnings.warn(msg)
# Reading parameter file, if there is one:
metric_parameters = None
if xml_parameters.metric_parameters_file is not None:
print("Loading metric parameters from file", xml_parameters.metric_parameters_file)
metric_parameters = np.loadtxt(xml_parameters.metric_parameters_file)
# Initial metric parameters
if exponential_factory.manifold_type == 'parametric':
metric_parameters = _initialize_parametric_exponential(model, xml_parameters, dataset, exponential_factory, metric_parameters)
if exponential_factory.manifold_type == 'deep':
manifold_parameters = {'latent_space_dimension': xml_parameters.latent_space_dimension}
exponential_factory.set_parameters(manifold_parameters)
elif exponential_factory.manifold_type == 'logistic':
"""
No initial parameter to set ! Just freeze the model parameters (or even delete the key ?)
"""
model.is_frozen['metric_parameters'] = True
model.spatiotemporal_reference_frame = GenericSpatiotemporalReferenceFrame(exponential_factory)
model.spatiotemporal_reference_frame.set_concentration_of_time_points(xml_parameters.concentration_of_time_points)
model.spatiotemporal_reference_frame.set_number_of_time_points(xml_parameters.number_of_time_points)
model.parametric_metric = (xml_parameters.exponential_type in ['parametric'])
if xml_parameters.exponential_type == 'deep':
model.deep_metric_learning = True
model.latent_space_dimension = xml_parameters.latent_space_dimension
model.initialize_deep_metric_learning()
model.set_metric_parameters(metric_parameters)
if xml_parameters.exponential_type == 'parametric':
model.is_frozen['metric_parameters'] = xml_parameters.freeze_metric_parameters
model.set_metric_parameters(metric_parameters)
if Settings().dimension == 1:
print("I am setting the no_parallel_transport flag to True because the dimension is 1")
model.no_parallel_transport = True
model.spatiotemporal_reference_frame.no_parallel_transport = True
model.number_of_sources = 0
elif xml_parameters.number_of_sources == 0 or xml_parameters.number_of_sources is None:
print("I am setting the no_parallel_transport flag to True because the number of sources is 0.")
model.no_parallel_transport = True
model.spatiotemporal_reference_frame.no_parallel_transport = True
model.number_of_sources = 0
else:
print("I am setting the no_parallel_transport flag to False.")
model.no_parallel_transport = False
model.spatiotemporal_reference_frame.no_parallel_transport = False
model.number_of_sources = xml_parameters.number_of_sources
def instantiate_longitudinal_metric_model(xml_parameters, dataset=None, number_of_subjects=None, observation_type='scalar'):
model = LongitudinalMetricLearning()
model.observation_type = observation_type # TODO : replace this with use of the template object.
template = dataset.deformable_objects[0][0] # because we only care about its 'metadata'
model.template = deepcopy(template)
# Reference time
model.set_reference_time(xml_parameters.t0)
model.is_frozen['reference_time'] = xml_parameters.freeze_reference_time
# Initial velocity
initial_velocity_file = xml_parameters.v0
model.set_v0(read_2D_array(initial_velocity_file))
model.is_frozen['v0'] = xml_parameters.freeze_v0
# Initial position
initial_position_file = xml_parameters.p0
model.set_p0(read_2D_array(initial_position_file))
model.is_frozen['p0'] = xml_parameters.freeze_p0
# Time shift variance
model.set_onset_age_variance(xml_parameters.initial_time_shift_variance)
model.is_frozen['onset_age_variance'] = xml_parameters.freeze_time_shift_variance
# Log acceleration variance
model.set_log_acceleration_variance(xml_parameters.initial_log_acceleration_variance)
model.is_frozen["log_acceleration_variance"] = xml_parameters.freeze_log_acceleration_variance
# Non-mandatory parameters, the model can initialize them
# Noise variance
if xml_parameters.initial_noise_variance is not None:
model.set_noise_variance(xml_parameters.initial_noise_variance)
# Initializations of the individual random effects
assert not (dataset is None and number_of_subjects is None), "Provide at least one info"
if dataset is not None:
number_of_subjects = dataset.number_of_subjects
# Initialization from files
if xml_parameters.initial_onset_ages is not None:
print("Setting initial onset ages from", xml_parameters.initial_onset_ages, "file")
onset_ages = read_2D_array(xml_parameters.initial_onset_ages).reshape((len(dataset.times),))
else:
print("Initializing all the onset_ages to the reference time.")
onset_ages = np.zeros((number_of_subjects,))
onset_ages += model.get_reference_time()
if xml_parameters.initial_log_accelerations is not None:
print("Setting initial log accelerations from", xml_parameters.initial_log_accelerations, "file")
log_accelerations = read_2D_array(xml_parameters.initial_log_accelerations).reshape((len(dataset.times),))
else:
print("Initializing all log-accelerations to zero.")
log_accelerations = np.zeros((number_of_subjects,))
individual_RER = {}
individual_RER['onset_age'] = onset_ages
individual_RER['log_acceleration'] = log_accelerations
# Initialization of the spatiotemporal reference frame.
initialize_spatiotemporal_reference_frame(model, xml_parameters, dataset, observation_type=observation_type)
# Modulation matrix.
model.is_frozen['modulation_matrix'] = xml_parameters.freeze_modulation_matrix
if xml_parameters.initial_modulation_matrix is not None:
modulation_matrix = read_2D_array(xml_parameters.initial_modulation_matrix)
if len(modulation_matrix.shape) == 1:
# modulation_matrix = modulation_matrix.reshape(Settings().dimension, 1)
modulation_matrix = modulation_matrix.reshape(xml_parameters.latent_space_dimension, xml_parameters.latent_space_dimension-1)
print('>> Reading ' + str(modulation_matrix.shape[1]) + '-source initial modulation matrix from file: '
+ xml_parameters.initial_modulation_matrix)
assert xml_parameters.number_of_sources == modulation_matrix.shape[1], "Please set correctly the number of sources"
model.set_modulation_matrix(modulation_matrix)
model.number_of_sources = modulation_matrix.shape[1]
else:
model.number_of_sources = xml_parameters.number_of_sources
modulation_matrix = np.zeros((Settings().dimension, model.number_of_sources))
model.set_modulation_matrix(modulation_matrix)
model.initialize_modulation_matrix_variables()
# Sources initialization
if xml_parameters.initial_sources is not None:
print("Setting initial sources from", xml_parameters.initial_sources, "file")
individual_RER['sources'] = read_2D_array(xml_parameters.initial_sources).reshape(len(dataset.times), model.number_of_sources)
elif model.number_of_sources > 0:
print("Initializing all sources to zero")
individual_RER['sources'] = np.zeros((number_of_subjects, model.number_of_sources))
model.initialize_source_variables()
if dataset is not None:
total_number_of_observations = dataset.total_number_of_observations
model.number_of_subjects = dataset.number_of_subjects
if model.get_noise_variance() is None:
v0, p0, metric_parameters, modulation_matrix = model._fixed_effects_to_torch_tensors(False)
onset_ages, log_accelerations, sources = model._individual_RER_to_torch_tensors(individual_RER, False)
residuals = model._compute_residuals(dataset, v0, p0, metric_parameters, modulation_matrix,
log_accelerations, onset_ages, sources)
total_residual = 0.
for i in range(len(residuals)):
total_residual += torch.sum(residuals[i]).cpu().data.numpy()[0]
dof = total_number_of_observations
nv = total_residual / dof
model.set_noise_variance(nv)
print('>> Initial noise variance set to %.2f based on the initial mean residual value.' % nv)
if not model.is_frozen['noise_variance']:
dof = total_number_of_observations
model.priors['noise_variance'].degrees_of_freedom.append(dof)
else:
if model.get_noise_variance() is None:
raise RuntimeError("I can't initialize the initial noise variance: no dataset and no initialization given.")
model.is_frozen['noise_variance'] = xml_parameters.freeze_noise_variance
model.update()
return model, individual_RER
def estimate_longitudinal_metric_model(xml_parameters):
print('')
print('[ estimate_longitudinal_metric_model function ]')
print('')
dataset = None
# Two alternatives: scalar dataset or image dataset for now.
observation_type = 'None'
template_specifications = xml_parameters.template_specifications
for val in template_specifications.values():
if val['deformable_object_type'].lower() == 'scalar':
dataset = read_and_create_scalar_dataset(xml_parameters)
observation_type = 'scalar'
#dataset.order_observations()
break
if dataset is None:
dataset = read_and_create_image_dataset(xml_parameters.dataset_filenames, xml_parameters.visit_ages,
xml_parameters.subject_ids, xml_parameters.template_specifications)
observation_type = 'image'
model, individual_RER = instantiate_longitudinal_metric_model(xml_parameters, dataset, observation_type=observation_type)
if xml_parameters.optimization_method_type == 'GradientAscent'.lower():
estimator = GradientAscent()
estimator.initial_step_size = xml_parameters.initial_step_size
estimator.scale_initial_step_size = xml_parameters.scale_initial_step_size
estimator.max_line_search_iterations = xml_parameters.max_line_search_iterations
estimator.line_search_shrink = xml_parameters.line_search_shrink
estimator.line_search_expand = xml_parameters.line_search_expand
estimator.optimized_log_likelihood = xml_parameters.optimized_log_likelihood
elif xml_parameters.optimization_method_type == 'ScipyLBFGS'.lower():
estimator = ScipyOptimize()
estimator.max_line_search_iterations = xml_parameters.max_line_search_iterations
estimator.memory_length = xml_parameters.memory_length
# estimator.memory_length = 1
# msg = 'Impossible to use a Sobolev gradient for the template data with the ScipyLBFGS estimator memory ' \
# 'length being larger than 1. Overriding the "memory_length" option, now set to "1".'
# warnings.warn(msg)
elif xml_parameters.optimization_method_type == 'McmcSaem'.lower():
sampler = SrwMhwgSampler()
estimator = McmcSaem()
estimator.sampler = sampler
# Onset age proposal distribution.
onset_age_proposal_distribution = MultiScalarNormalDistribution()
onset_age_proposal_distribution.set_variance_sqrt(xml_parameters.onset_age_proposal_std)
sampler.individual_proposal_distributions['onset_age'] = onset_age_proposal_distribution
# Log-acceleration proposal distribution.
log_acceleration_proposal_distribution = MultiScalarNormalDistribution()
log_acceleration_proposal_distribution.set_variance_sqrt(xml_parameters.log_acceleration_proposal_std)
sampler.individual_proposal_distributions['log_acceleration'] = log_acceleration_proposal_distribution
# Sources proposal distribution
if model.number_of_sources > 0:
sources_proposal_distribution = MultiScalarNormalDistribution()
sources_proposal_distribution.set_variance_sqrt(xml_parameters.sources_proposal_std)
sampler.individual_proposal_distributions['sources'] = sources_proposal_distribution
estimator.sample_every_n_mcmc_iters = xml_parameters.sample_every_n_mcmc_iters
# Gradient-based estimator.
estimator.gradient_based_estimator = GradientAscent()
estimator.gradient_based_estimator.statistical_model = model
estimator.gradient_based_estimator.dataset = dataset
estimator.gradient_based_estimator.optimized_log_likelihood = 'class2'
estimator.gradient_based_estimator.max_iterations = 5
estimator.gradient_based_estimator.max_line_search_iterations = 5
estimator.gradient_based_estimator.convergence_tolerance = 1e-2
estimator.gradient_based_estimator.print_every_n_iters = 1
estimator.gradient_based_estimator.save_every_n_iters = 100000
estimator.gradient_based_estimator.initial_step_size = xml_parameters.initial_step_size
estimator.gradient_based_estimator.line_search_shrink = 0.5
estimator.gradient_based_estimator.line_search_expand = 1.2
estimator.gradient_based_estimator.scale_initial_step_size = True
estimator.number_of_burn_in_iterations = xml_parameters.max_iterations
else:
estimator = GradientAscent()
estimator.initial_step_size = xml_parameters.initial_step_size
estimator.max_line_search_iterations = xml_parameters.max_line_search_iterations
estimator.line_search_shrink = xml_parameters.line_search_shrink
estimator.line_search_expand = xml_parameters.line_search_expand
msg = 'Unknown optimization-method-type: \"' + xml_parameters.optimization_method_type \
+ '\". Defaulting to GradientAscent.'
warnings.warn(msg)
estimator.max_iterations = xml_parameters.max_iterations
estimator.convergence_tolerance = xml_parameters.convergence_tolerance
estimator.print_every_n_iters = xml_parameters.print_every_n_iters
estimator.save_every_n_iters = xml_parameters.save_every_n_iters
estimator.dataset = dataset
estimator.statistical_model = model
# Initial random effects realizations
estimator.individual_RER = individual_RER
"""
Launch.
"""
if not os.path.exists(Settings().output_dir): os.makedirs(Settings().output_dir)
model.name = 'LongitudinalMetricModel'
print('')
print('[ update method of the ' + estimator.name + ' optimizer ]')
start_time = time.time()
estimator.update()
estimator.write()
end_time = time.time()
print('>> Estimation took: ' + str(time.strftime("%d days, %H hours, %M minutes and %S seconds.",
time.gmtime(end_time - start_time))))
|
{"hexsha": "453a48083f60726a63b581081f3a7e8a9e43224d", "size": 20956, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/launch/estimate_longitudinal_metric_model.py", "max_stars_repo_name": "EuroPOND/deformetrica", "max_stars_repo_head_hexsha": "29cb3a4ecd40d6a19b3ee2a1c5827c4ebab54062", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-27T07:30:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-27T07:30:56.000Z", "max_issues_repo_path": "src/launch/estimate_longitudinal_metric_model.py", "max_issues_repo_name": "EuroPOND/deformetrica", "max_issues_repo_head_hexsha": "29cb3a4ecd40d6a19b3ee2a1c5827c4ebab54062", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/launch/estimate_longitudinal_metric_model.py", "max_forks_repo_name": "EuroPOND/deformetrica", "max_forks_repo_head_hexsha": "29cb3a4ecd40d6a19b3ee2a1c5827c4ebab54062", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.6218097448, "max_line_length": 141, "alphanum_fraction": 0.7400744417, "include": true, "reason": "import numpy", "num_tokens": 4142}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "kudu/tablet/deltamemstore.h"
#include <memory>
#include <ostream>
#include <utility>
#include <boost/optional/optional.hpp>
#include <glog/logging.h>
#include "kudu/common/row_changelist.h"
#include "kudu/common/timestamp.h"
#include "kudu/consensus/opid.pb.h"
#include "kudu/gutil/port.h"
#include "kudu/gutil/strings/substitute.h"
#include "kudu/tablet/delta_key.h"
#include "kudu/tablet/deltafile.h"
#include "kudu/tablet/rowset.h"
#include "kudu/util/faststring.h"
#include "kudu/util/memcmpable_varint.h"
#include "kudu/util/memory/memory.h"
#include "kudu/util/slice.h"
#include "kudu/util/status.h"
namespace kudu {
namespace tablet {
using fs::IOContext;
using log::LogAnchorRegistry;
using std::string;
using std::shared_ptr;
using std::unique_ptr;
using std::vector;
using strings::Substitute;
////////////////////////////////////////////////////////////
// DeltaMemStore implementation
////////////////////////////////////////////////////////////
static const int kInitialArenaSize = 16;
Status DeltaMemStore::Create(int64_t id,
int64_t rs_id,
LogAnchorRegistry* log_anchor_registry,
shared_ptr<MemTracker> parent_tracker,
shared_ptr<DeltaMemStore>* dms) {
shared_ptr<DeltaMemStore> local_dms(new DeltaMemStore(id, rs_id,
log_anchor_registry,
std::move(parent_tracker)));
dms->swap(local_dms);
return Status::OK();
}
DeltaMemStore::DeltaMemStore(int64_t id,
int64_t rs_id,
LogAnchorRegistry* log_anchor_registry,
shared_ptr<MemTracker> parent_tracker)
: id_(id),
rs_id_(rs_id),
allocator_(new MemoryTrackingBufferAllocator(
HeapBufferAllocator::Get(), std::move(parent_tracker))),
arena_(new ThreadSafeMemoryTrackingArena(kInitialArenaSize, allocator_)),
tree_(arena_),
anchorer_(log_anchor_registry,
Substitute("Rowset-$0/DeltaMemStore-$1", rs_id_, id_)),
disambiguator_sequence_number_(0) {
}
Status DeltaMemStore::Init(const IOContext* /*io_context*/) {
return Status::OK();
}
Status DeltaMemStore::Update(Timestamp timestamp,
rowid_t row_idx,
const RowChangeList &update,
const consensus::OpId& op_id) {
DeltaKey key(row_idx, timestamp);
faststring buf;
key.EncodeTo(&buf);
Slice key_slice(buf);
btree::PreparedMutation<DMSTreeTraits> mutation(key_slice);
mutation.Prepare(&tree_);
if (PREDICT_FALSE(mutation.exists())) {
// We already have a delta for this row at the same timestamp.
// Try again with a disambiguating sequence number appended to the key.
int seq = disambiguator_sequence_number_.Increment();
PutMemcmpableVarint64(&buf, seq);
key_slice = Slice(buf);
mutation.Reset(key_slice);
mutation.Prepare(&tree_);
CHECK(!mutation.exists())
<< "Appended a sequence number but still hit a duplicate "
<< "for rowid " << row_idx << " at timestamp " << timestamp;
}
if (PREDICT_FALSE(!mutation.Insert(update.slice()))) {
return Status::IOError("Unable to insert into tree");
}
anchorer_.AnchorIfMinimum(op_id.index());
return Status::OK();
}
Status DeltaMemStore::FlushToFile(DeltaFileWriter *dfw,
gscoped_ptr<DeltaStats>* stats_ret) {
gscoped_ptr<DeltaStats> stats(new DeltaStats());
gscoped_ptr<DMSTreeIter> iter(tree_.NewIterator());
iter->SeekToStart();
while (iter->IsValid()) {
Slice key_slice, val;
iter->GetCurrentEntry(&key_slice, &val);
DeltaKey key;
RETURN_NOT_OK(key.DecodeFrom(&key_slice));
RowChangeList rcl(val);
RETURN_NOT_OK_PREPEND(dfw->AppendDelta<REDO>(key, rcl), "Failed to append delta");
stats->UpdateStats(key.timestamp(), rcl);
iter->Next();
}
dfw->WriteDeltaStats(*stats);
stats_ret->swap(stats);
return Status::OK();
}
Status DeltaMemStore::NewDeltaIterator(const RowIteratorOptions& opts,
unique_ptr<DeltaIterator>* iterator) const {
iterator->reset(new DMSIterator(shared_from_this(), opts));
return Status::OK();
}
Status DeltaMemStore::CheckRowDeleted(rowid_t row_idx,
const IOContext* /*io_context*/,
bool *deleted) const {
*deleted = false;
DeltaKey key(row_idx, Timestamp(0));
faststring buf;
key.EncodeTo(&buf);
Slice key_slice(buf);
bool exact;
// TODO(unknown): can we avoid the allocation here?
gscoped_ptr<DMSTreeIter> iter(tree_.NewIterator());
if (!iter->SeekAtOrAfter(key_slice, &exact)) {
return Status::OK();
}
while (iter->IsValid()) {
// Iterate forward until reaching an entry with a larger row idx.
Slice key_slice, v;
iter->GetCurrentEntry(&key_slice, &v);
RETURN_NOT_OK(key.DecodeFrom(&key_slice));
DCHECK_GE(key.row_idx(), row_idx);
if (key.row_idx() != row_idx) break;
RowChangeList val(v);
// Mutation is for the target row, check deletion status.
RowChangeListDecoder decoder((RowChangeList(v)));
decoder.InitNoSafetyChecks();
decoder.TwiddleDeleteStatus(deleted);
iter->Next();
}
return Status::OK();
}
void DeltaMemStore::DebugPrint() const {
tree_.DebugPrint();
}
////////////////////////////////////////////////////////////
// DMSIterator
////////////////////////////////////////////////////////////
DMSIterator::DMSIterator(const shared_ptr<const DeltaMemStore>& dms,
RowIteratorOptions opts)
: dms_(dms),
preparer_(std::move(opts)),
iter_(dms->tree_.NewIterator()),
seeked_(false) {}
Status DMSIterator::Init(ScanSpec* /*spec*/) {
initted_ = true;
return Status::OK();
}
Status DMSIterator::SeekToOrdinal(rowid_t row_idx) {
faststring buf;
DeltaKey key(row_idx, Timestamp(0));
key.EncodeTo(&buf);
bool exact; /* unused */
iter_->SeekAtOrAfter(Slice(buf), &exact);
preparer_.Seek(row_idx);
seeked_ = true;
return Status::OK();
}
Status DMSIterator::PrepareBatch(size_t nrows, int prepare_flags) {
// This current implementation copies the whole batch worth of deltas
// into a buffer local to this iterator, after filtering out deltas which
// aren't yet committed in the current MVCC snapshot. The theory behind
// this approach is the following:
// Each batch needs to be processed once per column, meaning that unless
// we make a local copy, we'd have to reset the CBTree iterator back to the
// start of the batch and re-iterate for each column. CBTree iterators make
// local copies as they progress in order to shield from concurrent mutation,
// so with N columns, we'd end up making N copies of the data. Making a local
// copy here is instead a single copy of the data, so is likely faster.
CHECK(seeked_);
DCHECK(initted_) << "must init";
rowid_t start_row = preparer_.cur_prepared_idx();
rowid_t stop_row = start_row + nrows - 1;
preparer_.Start(nrows, prepare_flags);
bool finished_row = false;
while (iter_->IsValid()) {
Slice key_slice, val;
iter_->GetCurrentEntry(&key_slice, &val);
DeltaKey key;
RETURN_NOT_OK(key.DecodeFrom(&key_slice));
rowid_t cur_row = key.row_idx();
DCHECK_GE(cur_row, start_row);
// If this delta is for the same row as before, skip it if the previous
// AddDelta() call told us that we're done with this row.
if (preparer_.last_added_idx() &&
preparer_.last_added_idx() == cur_row &&
finished_row) {
iter_->Next();
continue;
}
finished_row = false;
if (cur_row > stop_row) {
// Delta is for a row which comes after the block we're processing.
break;
}
// Note: if AddDelta() sets 'finished_row' to true, we could skip the
// remaining deltas for this row by seeking the tree iterator. This trades
// off the cost of a seek against the cost of decoding some irrelevant delta
// keys. Experimentation with a microbenchmark revealed that only when ~50
// deltas were skipped was the seek cheaper than the decoding.
//
// Given that updates are expected to be uncommon and that most scans are
// _not_ historical, the current implementation eschews seeking in favor of
// skipping irrelevant deltas one by one.
RETURN_NOT_OK(preparer_.AddDelta(key, val, &finished_row));
iter_->Next();
}
preparer_.Finish(nrows);
return Status::OK();
}
Status DMSIterator::ApplyUpdates(size_t col_to_apply, ColumnBlock* dst,
const SelectionVector& filter) {
return preparer_.ApplyUpdates(col_to_apply, dst, filter);
}
Status DMSIterator::ApplyDeletes(SelectionVector* sel_vec) {
return preparer_.ApplyDeletes(sel_vec);
}
Status DMSIterator::SelectUpdates(SelectionVector* sel_vec) {
return preparer_.SelectUpdates(sel_vec);
}
Status DMSIterator::CollectMutations(vector<Mutation*>*dst, Arena* arena) {
return preparer_.CollectMutations(dst, arena);
}
Status DMSIterator::FilterColumnIdsAndCollectDeltas(const vector<ColumnId>& col_ids,
vector<DeltaKeyAndUpdate>* out,
Arena* arena) {
return preparer_.FilterColumnIdsAndCollectDeltas(col_ids, out, arena);
}
bool DMSIterator::HasNext() {
return iter_->IsValid();
}
bool DMSIterator::MayHaveDeltas() const {
return preparer_.MayHaveDeltas();
}
string DMSIterator::ToString() const {
return "DMSIterator";
}
} // namespace tablet
} // namespace kudu
|
{"hexsha": "6f658109200a26d473c78a3214a5a49688446320", "size": 10564, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/kudu/tablet/deltamemstore.cc", "max_stars_repo_name": "sdreynolds/kudu", "max_stars_repo_head_hexsha": "13642f60f9a6ba6dd77f97a6736467b8ab5849af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-07-17T19:08:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-17T19:13:25.000Z", "max_issues_repo_path": "src/kudu/tablet/deltamemstore.cc", "max_issues_repo_name": "sdreynolds/kudu", "max_issues_repo_head_hexsha": "13642f60f9a6ba6dd77f97a6736467b8ab5849af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kudu/tablet/deltamemstore.cc", "max_forks_repo_name": "sdreynolds/kudu", "max_forks_repo_head_hexsha": "13642f60f9a6ba6dd77f97a6736467b8ab5849af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3249211356, "max_line_length": 86, "alphanum_fraction": 0.6592199924, "num_tokens": 2456}
|
"""
Inexact augmented Lagrange multiplier (IALM)
"""
import numpy as np
from numpy import linalg
from md_utils import shrinking
def jay_func(y_mat, lambd):
"""
implements
J(D) = max(norm_{2}(D), lambda^(-1)*norm_{inf}(D))
"""
return max(linalg.norm(y_mat, 2),
np.dot(np.reciprocal(lambd), linalg.norm(y_mat, np.inf)))
def rpca_ialm(
data_mat, lmbda,
max_iter, tol):
"""
Required input:
D - (m x n) data matrix
lambda - weight of sparse error
Adjustable parameters:
tol - tolerance for stopping criterion (DEFAULT=1e-2)
max_iter - maximum number of iterations (DEFAULT=1000)
Return:
s_hat - estimate of S
"""
d_norm = linalg.norm(data_mat)
l_k = np.zeros(data_mat.shape)
s_k = np.zeros(data_mat.shape)
y_k = data_mat/jay_func(data_mat, lmbda)
mu_k = 1.25/linalg.norm(data_mat, 2)
mu_bar = mu_k*1e7
rho = 1.6
# Solving RPCA-PCP via IALM
converged = k = 0
while converged == 0:
U, sigm, v = linalg.svd(data_mat - s_k + np.reciprocal(mu_k)*y_k,
full_matrices=False) # economy SVD
sigm = np.diag(sigm)
l_kp1 = np.dot(U, shrinking(sigm, np.reciprocal(mu_k)))
l_kp1 = np.dot(l_kp1, v)
shr = data_mat - l_kp1 + np.dot(np.reciprocal(mu_k), y_k)
s_kp1 = shrinking(shr, lmbda*np.reciprocal(mu_k))
mu_k = min(mu_k*rho, mu_bar)
k = k+1
l_k = l_kp1
s_k = s_kp1
stop_criterion = linalg.norm(data_mat - l_k - s_k, 'fro')/d_norm
if (converged == 0 and k >= max_iter) or stop_criterion < tol:
converged = 1
s_hat = s_k
return s_hat
|
{"hexsha": "29e50b556a04fc992e8e2b139c2cda4086477329", "size": 1747, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_driven_method/ialm.py", "max_stars_repo_name": "vjhansen/IRSTD", "max_stars_repo_head_hexsha": "0470b6bd14701bfc12737f774686b84b03d48e1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-23T13:16:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T13:25:02.000Z", "max_issues_repo_path": "model_driven_method/ialm.py", "max_issues_repo_name": "vjhansen/IRSTD", "max_issues_repo_head_hexsha": "0470b6bd14701bfc12737f774686b84b03d48e1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_driven_method/ialm.py", "max_forks_repo_name": "vjhansen/IRSTD", "max_forks_repo_head_hexsha": "0470b6bd14701bfc12737f774686b84b03d48e1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-14T13:25:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T03:29:26.000Z", "avg_line_length": 27.296875, "max_line_length": 73, "alphanum_fraction": 0.5809959931, "include": true, "reason": "import numpy,from numpy", "num_tokens": 538}
|
/*
* Copyright 2010-2012 Esrille Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HTMLScriptElementImp.h"
#include <iostream>
#include <boost/bind.hpp>
#include <boost/version.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/device/file_descriptor.hpp>
#include "ECMAScript.h"
#include "utf.h"
#include "DocumentImp.h"
#include "DocumentWindow.h"
#include "U16InputStream.h"
#include "HTMLBindingElementImp.h"
namespace org { namespace w3c { namespace dom { namespace bootstrap {
HTMLScriptElementImp::HTMLScriptElementImp(DocumentImp* ownerDocument, const std::u16string& localName) :
ObjectMixin(ownerDocument, localName),
alreadyStarted(false),
parserInserted(false),
wasParserInserted(false),
forceAsync(true),
readyToBeParserExecuted(false),
request(0)
{
}
HTMLScriptElementImp::HTMLScriptElementImp(HTMLScriptElementImp* org, bool deep) :
ObjectMixin(org, deep),
alreadyStarted(false),
parserInserted(false),
wasParserInserted(false),
forceAsync(true),
readyToBeParserExecuted(false),
request(0)
{
}
HTMLScriptElementImp::~HTMLScriptElementImp()
{
delete request;
}
// cf. http://www.whatwg.org/specs/web-apps/current-work/multipage/scripting-1.html#prepare-a-script
bool HTMLScriptElementImp::prepare()
{
bool hasAsync = getAsync();
bool hasDefer = getDefer();
if (alreadyStarted)
return false;
if (parserInserted) {
wasParserInserted = true;
parserInserted = false;
} else
wasParserInserted = false;
if (wasParserInserted && !hasAsync)
forceAsync = true;
// TODO 4. - 7.
if (wasParserInserted) {
parserInserted = true;
forceAsync = false;
}
alreadyStarted = true;
// TODO 10. - 13.
DocumentImp* document = getOwnerDocumentImp();
if (hasAttribute(u"src")) {
std::u16string src = getSrc();
if (src.empty()) {
// TODO: fire the error event
return false;
}
request = new(std::nothrow) HttpRequest(document->getDocumentURI());
if (request) {
request->open(u"GET", src);
request->setHandler(boost::bind(&HTMLScriptElementImp::notify, this));
document->incrementLoadEventDelayCount();
if (hasDefer && parserInserted && !hasAsync) {
type = Defer;
document->addDeferScript(this);
} else if (parserInserted && !hasAsync) {
type = Blocking;
document->setPendingParsingBlockingScript(this);
} else if (!hasAsync && !forceAsync) {
type = Ordered;
document->addOrderedScript(this);
} else {
type = Async;
document->addAsyncScript(this);
}
request->send();
}
return true;
}
// TODO: Check style sheets that is blocking scripts
return execute();
}
void HTMLScriptElementImp::notify()
{
DocumentImp* document = getOwnerDocumentImp();
if (request->getStatus() == 200) {
readyToBeParserExecuted = true;
switch (type) {
case Blocking:
document->decrementLoadEventDelayCount();
break;
case Async:
execute();
document->removeAsyncScript(this);
document->decrementLoadEventDelayCount();
break;
case Ordered:
document->processOrderedScripts();
break;
default:
break;
}
} else {
switch (type) {
case Blocking:
assert(document->getPendingParsingBlockingScript() == this);
document->setPendingParsingBlockingScript(0);
break;
case Defer:
document->removeDeferScript(this);
break;
case Async:
document->removeAsyncScript(this);
break;
case Ordered:
document->removeOrderedScript(this);
document->processOrderedScripts();
break;
default:
break;
}
document->decrementLoadEventDelayCount();
}
}
bool HTMLScriptElementImp::execute()
{
std::u16string script;
if (request) {
assert(request->getStatus() == 200);
boost::iostreams::stream<boost::iostreams::file_descriptor_source> stream(request->getContentDescriptor(), boost::iostreams::never_close_handle);
U16InputStream u16stream(stream, "utf-8"); // TODO detect encode
script = u16stream;
} else {
Nullable<std::u16string> content = getTextContent();
if (!content.hasValue())
return false;
script = content.value();
stripLeadingAndTrailingWhitespace(script);
if (script.compare(0, 4, u"<!--") == 0)
script.erase(0, 4);
if (3 <= script.length() && script.compare(script.length() - 3, 3, u"-->") == 0)
script.erase(script.length() - 3);
}
DocumentWindowPtr window = getOwnerDocumentImp()->activate();
Any result = window->getContext()->evaluate(script);
if (auto binding = dynamic_cast<HTMLBindingElementImp*>(getParentElement().self())) {
if (result.isObject() && !binding->getImplementation())
binding->setImplementation(result.toObject());
}
return true;
}
// Node
Node HTMLScriptElementImp::cloneNode(bool deep)
{
return new(std::nothrow) HTMLScriptElementImp(this, deep);
}
// HTMLScriptElement
std::u16string HTMLScriptElementImp::getSrc()
{
return getAttribute(u"src");
}
void HTMLScriptElementImp::setSrc(const std::u16string& src)
{
setAttribute(u"src", src);
}
bool HTMLScriptElementImp::getAsync()
{
if (forceAsync)
return true;
Nullable<std::u16string> value = getAttribute(u"async");
if (!value.hasValue())
return false;
return value.value().empty() || !compareIgnoreCase(value.value(), u"async");
}
void HTMLScriptElementImp::setAsync(bool async)
{
forceAsync = false;
if (async)
setAttribute(u"async", u"");
else
removeAttribute(u"async");
}
bool HTMLScriptElementImp::getDefer()
{
Nullable<std::u16string> value = getAttribute(u"defer");
if (!value.hasValue())
return false;
return value.value().empty() || !compareIgnoreCase(value.value(), u"defer");
}
void HTMLScriptElementImp::setDefer(bool defer)
{
if (defer)
setAttribute(u"defer", u"");
else
removeAttribute(u"defer");
}
std::u16string HTMLScriptElementImp::getType()
{
return getAttribute(u"type");
}
void HTMLScriptElementImp::setType(const std::u16string& type)
{
setAttribute(u"type", type);
}
std::u16string HTMLScriptElementImp::getCharset()
{
return getAttribute(u"charset");
}
void HTMLScriptElementImp::setCharset(const std::u16string& charset)
{
setAttribute(u"charset", charset);
}
std::u16string HTMLScriptElementImp::getText()
{
return getTextContent();
}
void HTMLScriptElementImp::setText(const std::u16string& text)
{
setTextContent(text);
}
std::u16string HTMLScriptElementImp::getEvent()
{
return u"";
}
void HTMLScriptElementImp::setEvent(const std::u16string& event)
{
}
std::u16string HTMLScriptElementImp::getHtmlFor()
{
return u"";
}
void HTMLScriptElementImp::setHtmlFor(const std::u16string& htmlFor)
{
}
}}}} // org::w3c::dom::bootstrap
|
{"hexsha": "082225066d4ba56cde8d848efe6edc98a6ddd152", "size": 7941, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "escort/src/html/HTMLScriptElementImp.cpp", "max_stars_repo_name": "rvedam/es-operating-system", "max_stars_repo_head_hexsha": "32d3e4791c28a5623744800f108d029c40c745fc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-11-30T18:38:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T07:44:03.000Z", "max_issues_repo_path": "escort/src/html/HTMLScriptElementImp.cpp", "max_issues_repo_name": "LambdaLord/es-operating-system", "max_issues_repo_head_hexsha": "32d3e4791c28a5623744800f108d029c40c745fc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-01-14T03:09:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-14T03:09:45.000Z", "max_forks_repo_path": "escort/src/html/HTMLScriptElementImp.cpp", "max_forks_repo_name": "LambdaLord/es-operating-system", "max_forks_repo_head_hexsha": "32d3e4791c28a5623744800f108d029c40c745fc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9186440678, "max_line_length": 153, "alphanum_fraction": 0.638206775, "num_tokens": 1825}
|
import pytest
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from hulearn.datasets import load_titanic
from hulearn.classification.functionclassifier import FunctionClassifier
from hulearn.common import flatten
from tests.conftest import (
select_tests,
general_checks,
classifier_checks,
nonmeta_checks,
)
def predict(X):
np.random.seed(42)
return np.array([1 if r > 0.5 else 0 for r in np.random.normal(0, 1, len(X))])
def predict_variant(X):
np.random.seed(42)
return np.array([1 if r > 0.0 else 0 for r in np.random.normal(0, 1, len(X))])
def class_based(dataf, sex="male", pclass=1):
predicate = (dataf["sex"] == sex) & (dataf["pclass"] == pclass)
return np.array(predicate).astype(int)
@pytest.mark.parametrize(
"test_fn",
select_tests(
include=flatten([general_checks, classifier_checks, nonmeta_checks]),
exclude=[
"check_methods_subset_invariance",
"check_fit2d_1sample",
"check_fit2d_1feature",
"check_classifier_data_not_an_array",
"check_classifiers_one_label",
"check_classifiers_classes",
"check_classifiers_train",
"check_supervised_y_2d",
"check_estimators_pickle",
"check_pipeline_consistency",
"check_fit2d_predict1d",
"check_fit1d",
"check_dtype_object",
"check_complex_data",
"check_estimators_empty_data_messages",
"check_estimators_nan_inf",
"check_estimator_sparse_data",
"check_supervised_y_no_nan",
"check_estimators_partial_fit_n_features",
"check_sample_weights_list",
"check_sample_weights_pandas_series",
],
),
)
def test_estimator_checks(test_fn):
clf = FunctionClassifier(func=predict)
test_fn(FunctionClassifier.__name__ + "_fallback", clf)
def test_works_with_gridsearch(random_xy_dataset_clf):
X, y = random_xy_dataset_clf
clf = FunctionClassifier(func=predict)
grid = GridSearchCV(clf, cv=5, param_grid={"func": [predict, predict_variant]})
grid.fit(X, y).predict(X)
def test_smoke_with_pandas():
df = load_titanic(as_frame=True)
X, y = df.drop(columns=["survived"]), df["survived"]
mod = FunctionClassifier(class_based, pclass=10)
params = {"pclass": [1, 2, 3], "sex": ["male", "female"]}
grid = GridSearchCV(mod, cv=3, param_grid=params).fit(X, y)
pd.DataFrame(grid.cv_results_)
def test_smoke_partial_fit():
df = load_titanic(as_frame=True)
X, y = df.drop(columns=["survived"]), df["survived"]
mod = FunctionClassifier(class_based, pclass=10)
assert mod.partial_fit(X, y, classes=np.unique(y)).predict(X).shape[0] == y.shape[0]
|
{"hexsha": "c3872bc622322fde7896f809e54ca279ab9449e2", "size": 2818, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_classification/test_functionclassifier.py", "max_stars_repo_name": "ParikhKadam/human-learn", "max_stars_repo_head_hexsha": "f3cb41aa4f18bd079aefe6e843d24530c15ddb3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 575, "max_stars_repo_stars_event_min_datetime": "2020-09-12T05:24:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:11:04.000Z", "max_issues_repo_path": "tests/test_classification/test_functionclassifier.py", "max_issues_repo_name": "ParikhKadam/human-learn", "max_issues_repo_head_hexsha": "f3cb41aa4f18bd079aefe6e843d24530c15ddb3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2020-09-13T15:33:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T15:45:02.000Z", "max_forks_repo_path": "tests/test_classification/test_functionclassifier.py", "max_forks_repo_name": "ParikhKadam/human-learn", "max_forks_repo_head_hexsha": "f3cb41aa4f18bd079aefe6e843d24530c15ddb3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 48, "max_forks_repo_forks_event_min_datetime": "2020-09-13T15:29:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T20:07:59.000Z", "avg_line_length": 30.967032967, "max_line_length": 88, "alphanum_fraction": 0.6689141235, "include": true, "reason": "import numpy", "num_tokens": 696}
|
from unittest import TestCase
import numpy as np
import graph_matching_tools.metrics.matching as matching
class TestMatching(TestCase):
def test_compute_f1score(self):
t1 = [[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 1]]
t2 = [[1, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
res = matching.compute_f1score(np.array(t1), np.array(t2))
self.assertEqual(res[1], 0.5)
self.assertEqual(res[2], 1.0)
|
{"hexsha": "540b067406bf830340c7268a5ee6b5e40c92abda", "size": 547, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/metrics/test_matching.py", "max_stars_repo_name": "fxdupe/graphmatchingtools", "max_stars_repo_head_hexsha": "4503a04c4a0822315535e6ab3cd698417859908d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/metrics/test_matching.py", "max_issues_repo_name": "fxdupe/graphmatchingtools", "max_issues_repo_head_hexsha": "4503a04c4a0822315535e6ab3cd698417859908d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/metrics/test_matching.py", "max_forks_repo_name": "fxdupe/graphmatchingtools", "max_forks_repo_head_hexsha": "4503a04c4a0822315535e6ab3cd698417859908d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-25T09:11:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T09:11:19.000Z", "avg_line_length": 21.88, "max_line_length": 66, "alphanum_fraction": 0.4862888483, "include": true, "reason": "import numpy", "num_tokens": 186}
|
[STATEMENT]
lemma (in \<Z>) smc_SemiCAT_obj_initialD:
assumes "obj_initial (smc_SemiCAT \<alpha>) \<AA>"
shows "\<AA> = smc_0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<AA> = smc_0
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
obj_initial (smc_SemiCAT \<alpha>) \<AA>
goal (1 subgoal):
1. \<AA> = smc_0
[PROOF STEP]
unfolding obj_initial_def
[PROOF STATE]
proof (prove)
using this:
obj_terminal (op_smc (smc_SemiCAT \<alpha>)) \<AA>
goal (1 subgoal):
1. \<AA> = smc_0
[PROOF STEP]
proof
(
elim obj_terminalE,
unfold smc_op_simps smc_SemiCAT_is_arr_iff smc_SemiCAT_Obj_iff
)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
assume prems:
"semicategory \<alpha> \<AA>"
"semicategory \<alpha> \<BB> \<Longrightarrow> \<exists>!\<FF>. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> \<BB>"
for \<BB>
[PROOF STATE]
proof (state)
this:
semicategory \<alpha> \<AA>
semicategory \<alpha> ?\<BB> \<Longrightarrow> \<exists>!\<FF>. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> ?\<BB>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
from prems(2)[OF semicategory_smc_0]
[PROOF STATE]
proof (chain)
picking this:
\<exists>!\<FF>. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0
[PROOF STEP]
obtain \<FF> where "\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0"
[PROOF STATE]
proof (prove)
using this:
\<exists>!\<FF>. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0
goal (1 subgoal):
1. (\<And>\<FF>. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by meson
[PROOF STATE]
proof (state)
this:
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0
[PROOF STEP]
interpret \<FF>: is_semifunctor \<alpha> \<AA> smc_0 \<FF>
[PROOF STATE]
proof (prove)
using this:
\<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0
goal (1 subgoal):
1. \<FF> : \<AA> \<mapsto>\<mapsto>\<^sub>S\<^sub>M\<^sub>C\<^bsub>\<alpha>\<^esub> smc_0
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
have "\<R>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<R>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
[PROOF STEP]
unfolding smc_0_components(1)[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<R>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> smc_0\<lparr>Obj\<rparr>
[PROOF STEP]
by (simp add: \<FF>.smcf_ObjMap_vrange)
[PROOF STATE]
proof (state)
this:
\<R>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<R>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
[PROOF STEP]
have "\<FF>\<lparr>ObjMap\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
\<R>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
goal (1 subgoal):
1. \<FF>\<lparr>ObjMap\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by (auto intro: \<FF>.ObjMap.vsv_vrange_vempty)
[PROOF STATE]
proof (state)
this:
\<FF>\<lparr>ObjMap\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
with \<FF>.smcf_ObjMap_vdomain
[PROOF STATE]
proof (chain)
picking this:
\<D>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) = \<AA>\<lparr>Obj\<rparr>
\<FF>\<lparr>ObjMap\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
have Obj[simp]: "\<AA>\<lparr>Obj\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
\<D>\<^sub>\<circ> (\<FF>\<lparr>ObjMap\<rparr>) = \<AA>\<lparr>Obj\<rparr>
\<FF>\<lparr>ObjMap\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<AA>\<lparr>Obj\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<AA>\<lparr>Obj\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
have "\<R>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) \<subseteq>\<^sub>\<circ> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<R>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
[PROOF STEP]
unfolding smc_0_components(2)[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<R>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) \<subseteq>\<^sub>\<circ> smc_0\<lparr>Arr\<rparr>
[PROOF STEP]
by (simp add: \<FF>.smcf_ArrMap_vrange)
[PROOF STATE]
proof (state)
this:
\<R>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<R>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
[PROOF STEP]
have "\<FF>\<lparr>ArrMap\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
\<R>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) \<subseteq>\<^sub>\<circ> []\<^sub>\<circ>
goal (1 subgoal):
1. \<FF>\<lparr>ArrMap\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by (auto intro: \<FF>.ArrMap.vsv_vrange_vempty)
[PROOF STATE]
proof (state)
this:
\<FF>\<lparr>ArrMap\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
with \<FF>.smcf_ArrMap_vdomain
[PROOF STATE]
proof (chain)
picking this:
\<D>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) = \<AA>\<lparr>Arr\<rparr>
\<FF>\<lparr>ArrMap\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
have Arr[simp]: "\<AA>\<lparr>Arr\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
\<D>\<^sub>\<circ> (\<FF>\<lparr>ArrMap\<rparr>) = \<AA>\<lparr>Arr\<rparr>
\<FF>\<lparr>ArrMap\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<AA>\<lparr>Arr\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<AA>\<lparr>Arr\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
from \<FF>.HomDom.Dom.vdomain_vrange_is_vempty
[PROOF STATE]
proof (chain)
picking this:
(\<D>\<^sub>\<circ> (\<AA>\<lparr>Dom\<rparr>) = []\<^sub>\<circ>) = (\<R>\<^sub>\<circ> (\<AA>\<lparr>Dom\<rparr>) = []\<^sub>\<circ>)
[PROOF STEP]
have [simp]: "\<AA>\<lparr>Dom\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
(\<D>\<^sub>\<circ> (\<AA>\<lparr>Dom\<rparr>) = []\<^sub>\<circ>) = (\<R>\<^sub>\<circ> (\<AA>\<lparr>Dom\<rparr>) = []\<^sub>\<circ>)
goal (1 subgoal):
1. \<AA>\<lparr>Dom\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by (auto simp: smc_cs_simps intro: \<FF>.HomDom.Dom.vsv_vrange_vempty)
[PROOF STATE]
proof (state)
this:
\<AA>\<lparr>Dom\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
from \<FF>.HomDom.Cod.vdomain_vrange_is_vempty
[PROOF STATE]
proof (chain)
picking this:
(\<D>\<^sub>\<circ> (\<AA>\<lparr>Cod\<rparr>) = []\<^sub>\<circ>) = (\<R>\<^sub>\<circ> (\<AA>\<lparr>Cod\<rparr>) = []\<^sub>\<circ>)
[PROOF STEP]
have [simp]: "\<AA>\<lparr>Cod\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
(\<D>\<^sub>\<circ> (\<AA>\<lparr>Cod\<rparr>) = []\<^sub>\<circ>) = (\<R>\<^sub>\<circ> (\<AA>\<lparr>Cod\<rparr>) = []\<^sub>\<circ>)
goal (1 subgoal):
1. \<AA>\<lparr>Cod\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by (auto simp: smc_cs_simps intro: \<FF>.HomDom.Cod.vsv_vrange_vempty)
[PROOF STATE]
proof (state)
this:
\<AA>\<lparr>Cod\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
from Arr
[PROOF STATE]
proof (chain)
picking this:
\<AA>\<lparr>Arr\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
have "\<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat> = 0"
[PROOF STATE]
proof (prove)
using this:
\<AA>\<lparr>Arr\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat> = []\<^sub>\<circ>
[PROOF STEP]
by (simp add: vcpower_of_vempty)
[PROOF STATE]
proof (state)
this:
\<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
with \<FF>.HomDom.Comp.pnop_vdomain
[PROOF STATE]
proof (chain)
picking this:
\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) \<subseteq>\<^sub>\<circ> \<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat>
\<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat> = []\<^sub>\<circ>
[PROOF STEP]
have "\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = 0"
[PROOF STATE]
proof (prove)
using this:
\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) \<subseteq>\<^sub>\<circ> \<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat>
\<AA>\<lparr>Arr\<rparr> ^\<^sub>\<times> 2\<^sub>\<nat> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
with \<FF>.HomDom.Comp.vdomain_vrange_is_vempty
[PROOF STATE]
proof (chain)
picking this:
(\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>) = (\<R>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>)
\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>
[PROOF STEP]
have [simp]: "\<AA>\<lparr>Comp\<rparr> = 0"
[PROOF STATE]
proof (prove)
using this:
(\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>) = (\<R>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>)
\<D>\<^sub>\<circ> (\<AA>\<lparr>Comp\<rparr>) = []\<^sub>\<circ>
goal (1 subgoal):
1. \<AA>\<lparr>Comp\<rparr> = []\<^sub>\<circ>
[PROOF STEP]
by (auto intro: \<FF>.HomDom.Comp.vsv_vrange_vempty)
[PROOF STATE]
proof (state)
this:
\<AA>\<lparr>Comp\<rparr> = []\<^sub>\<circ>
goal (1 subgoal):
1. \<lbrakk>semicategory \<alpha> \<AA>; \<And>a. semicategory \<alpha> a \<Longrightarrow> Ex1 (is_semifunctor \<alpha> \<AA> a); obj_initial (smc_SemiCAT \<alpha>) \<AA>\<rbrakk> \<Longrightarrow> \<AA> = smc_0
[PROOF STEP]
show "\<AA> = smc_0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<AA> = smc_0
[PROOF STEP]
by (rule smc_eqI[of \<alpha>])
(simp_all add: prems(1) smc_0_components semicategory_smc_0)
[PROOF STATE]
proof (state)
this:
\<AA> = smc_0
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5695, "file": "CZH_Foundations_czh_semicategories_CZH_SMC_SemiCAT", "length": 46}
|
[STATEMENT]
lemma knows_Spy_Inputs_secureM_srb_Spy:
"evs \<in>srb \<Longrightarrow> knows Spy (Inputs Spy C X # evs) = insert X (knows Spy evs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. evs \<in> srb \<Longrightarrow> knows Spy (Inputs Spy C X # evs) = insert X (knows Spy evs)
[PROOF STEP]
apply (simp (no_asm_simp))
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 172, "file": null, "length": 2}
|
% Copyright 2019 by Christian Feuersaenger
%
% This file may be distributed and/or modified
%
% 1. under the LaTeX Project Public License and/or
% 2. under the GNU Free Documentation License.
%
% See the file doc/generic/pgf/licenses/LICENSE for more details.
\section{Floating Point Unit Library}
\label{pgfmath-floatunit}
\label{section-library-fpu}
{\noindent {\emph{by Christian Feuersänger}}}
\begingroup
\pgfqkeys{/pgf/number format}{sci}
\pgfkeys{/pgf/fpu}
\begin{pgflibrary}{fpu}
The floating point unit (fpu) allows the full data range of scientific
computing for use in \pgfname. Its core is the \pgfname\ math routines for
mantissa operations, leading to a reasonable trade-of between speed and
accuracy. It does not require any third-party packages or external
programs.
\end{pgflibrary}
\subsection{Overview}
The fpu provides a replacement set of math commands which can be installed in
isolated placed to achieve large data ranges at reasonable accuracy. It
provides at least%
\footnote{To be more precise, the FPU's exponent is currently a 32-bit
integer. That means it supports a significantly larger data range than an
IEEE double precision number -- but if a future \TeX\ version may provide
low-level access to doubles, this may change.}%
the IEEE double precision data range, $\pgfmathprintnumber{-1e+324}, \dotsc,
\pgfmathprintnumber{+1e324}$. The absolute smallest number bigger than zero is
$\pgfmathprintnumber{1e-324}$. The FPU's relative precision is at least
$\pgfmathprintnumber{1e-4}$ although operations like addition have a relative
precision of $\pgfmathprintnumber{1e-6}$.
Note that the library has not really been tested together with any drawing
operations. It should be used to work with arbitrary input data which is then
transformed somehow into \pgfname\ precision. This, in turn, can be processed
by \pgfname.
\subsection{Usage}
\begin{key}{/pgf/fpu=\marg{boolean} (default true)}
This key installs or uninstalls the FPU. The installation exchanges any
routines of the standard math parser with those of the FPU: |\pgfmathadd|
will be replaced with |\pgfmathfloatadd| and so on. Furthermore, any number
will be parsed with |\pgfmathfloatparsenumber|.
%
\begin{codeexample}[preamble={\usepgflibrary{fpu}}]
\pgfkeys{/pgf/fpu}
\pgfmathparse{1+1}\pgfmathresult
\end{codeexample}
%
\noindent The FPU uses a low-level number representation consisting of
flags, mantissa and exponent%
\footnote{Users should \emph{always} use high
level routines to manipulate floating point numbers as the format may
change in a future release.}.%
To avoid unnecessary format conversions, |\pgfmathresult| will usually
contain such a cryptic number. Depending on the context, the result may
need to be converted into something which is suitable for \pgfname\
processing (like coordinates) or may need to be typeset. The FPU provides
such methods as well.
%--------------------------------------------------
% TODOsp: codeexamples: Why is this example commented?
% \begin{codeexample}[preamble={\usepgflibrary{fpu}}]
% \begin{tikzpicture}
% \fill[red,fpu,/pgf/fpu/scale results=1e-10] (*1.234e10,*1e10) -- (*2e10,*2e10);
% \end{tikzpicture}
% \end{codeexample}
%--------------------------------------------------
Use |fpu=false| to deactivate the FPU. This will restore any change. Please
note that this is not necessary if the FPU is used inside of a \TeX\ group
-- it will be deactivated afterwards anyway.
It does not hurt to call |fpu=true| or |fpu=false| multiple times.
Please note that if the |fixedpointarithmetic| library of \pgfname\ will
be activated after the FPU, the FPU will be deactivated automatically.
\end{key}
\begin{key}{/pgf/fpu/output format=\mchoice{float,sci,fixed} (initially float)}
This key allows to change the number format in which the FPU assigns
|\pgfmathresult|.
The predefined choice |float| uses the low-level format used by the FPU.
This is useful for further processing inside of any library.
%
\begin{codeexample}[preamble={\usepgflibrary{fpu}}]
\pgfkeys{/pgf/fpu,/pgf/fpu/output format=float}
\pgfmathparse{exp(50)*42}\pgfmathresult
\end{codeexample}
The choice |sci| returns numbers in the format
\meta{mantissa}|e|\meta{exponent}. It provides almost no computational
overhead.
%
\begin{codeexample}[preamble={\usepgflibrary{fpu}}]
\pgfkeys{/pgf/fpu,/pgf/fpu/output format=sci}
\pgfmathparse{4.22e-8^-2}\pgfmathresult
\end{codeexample}
The choice |fixed| returns normal fixed point numbers and provides the
highest compatibility with the \pgfname\ engine. It is activated
automatically in case the FPU scales results.
%
\begin{codeexample}[preamble={\usepgflibrary{fpu}}]
\pgfkeys{/pgf/fpu,/pgf/fpu/output format=fixed}
\pgfmathparse{sqrt(1e-12)}\pgfmathresult
\end{codeexample}
%
\end{key}
\begin{key}{/pgf/fpu/scale results=\marg{scale}}
A feature which allows semi-automatic result scaling. Setting this key has
two effects: first, the output format for \emph{any} computation will be
set to |fixed| (assuming results will be processed by \pgfname's kernel).
Second, any expression which starts with a star, |*|, will be multiplied
with \meta{scale}.
\end{key}
\begin{keylist}{
/pgf/fpu/scale file plot x=\marg{scale},%
/pgf/fpu/scale file plot y=\marg{scale},%
/pgf/fpu/scale file plot z=\marg{scale}%
}
These keys will patch \pgfname's |plot file| command to automatically scale
single coordinates by \meta{scale}.
The initial setting does not scale |plot file|.
\end{keylist}
\begin{command}{\pgflibraryfpuifactive\marg{true-code}\marg{false-code}}
This command can be used to execute either \meta{true-code} or
\meta{false-code}, depending on whether the FPU has been activated or not.
\end{command}
\begin{key}{/pgf/fpu/install only=\marg{list of names}}
\label{fpu-install-only}
Unfortunately, the FPU is currently incompatible with drawing operations.
However, it can still be useful to replace single definitions with FPU
counterparts to avoid errors of the kind |Dimension too large| which tend
to happen when transformation matrices are inverted.
This key allows to specify a list of definitions to be pulled into the
current scope. \emph{Note that there is no reverse operation to uninstall
these definitions at the moment}, so it is advisable to do this in a group.
Conveniently, \tikzname{} paths form an implicit group, so you can use this
key on a path as well.
You have to be aware of the limitations that the FPU imposes. It will not
magically give \TeX{} better precision, but it will avoid overflow or
underflow situations for large or small operands by rescaling them. In the
following example, in the first case the FPU variant performs much better
than the normal variant, however, in the second case where a rescaling
would not in fact be needed the rescaling introduces a small round-off
error.
%
\begin{codeexample}[
preamble={\usepgflibrary{fpu}},
pre={\pgfkeys{/pgf/fpu=false}},
]
\begingroup
\pgfkeys{/pgf/fpu/install only={divide}}
\pgfmathparse{12.34/0.001234}\pgfmathresult (good)
\pgfmathparse{12/4}\pgfmathresult (bad)
\endgroup
\end{codeexample}
%
\emph{This key is experimental and can change or disappear at any time!}
\end{key}
\subsection{Comparison to the fixed point arithmetics library}
There are other ways to increase the data range and/or the precision of
\pgfname's math parser. One of them is the |fp| package, preferable combined
with \pgfname's |fixedpointarithmetic| library. The differences between the FPU
and |fp| are:
%
\begin{itemize}
\item The FPU supports at least the complete IEEE double precision number
range, while |fp| covers only numbers of magnitude
$\pm\pgfmathprintnumber{1e17}$.
\item The FPU has a uniform relative precision of about 4--5 correct
digits. The fixed point library has an absolute precision which may
perform good in many cases -- but will fail at the ends of the data
range (as every fixed point routines does).
\item The FPU has potential to be faster than |fp| as it has access to fast
mantissa operations using \pgfname's math capabilities (which use \TeX\
registers).
\end{itemize}
\subsection{Command Reference and Programmer's Manual}
\subsubsection{Creating and Converting Floats}
\begin{command}{\pgfmathfloatparsenumber\marg{x}}
Reads a number of arbitrary magnitude and precision and stores its result
into |\pgfmathresult| as floating point number $m \cdot 10^e$ with mantissa
and exponent base~$10$.
The algorithm and the storage format is purely text-based. The number is
stored as a triple of flags, a positive mantissa and an exponent, such as
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{2}
\pgfmathresult
\end{codeexample}
%
Please do not rely on the low-level representation here, use
|\pgfmathfloattomacro| (and its variants) and |\pgfmathfloatcreate| if you
want to work with these components.
The flags encoded in |\pgfmathresult| are represented as a digit where
`$0$' stands for the number $\pm 0\cdot 10^0$, `$1$' stands for a positive
sign, `$2$' means a negative sign, `$3$' stands for `not a number', `$4$'
means $+\infty$ and `$5$' stands for $-\infty$.
The mantissa is a normalized real number $m \in \mathbb{R}$, $1 \le m <
10$. It always contains a period and at least one digit after the period.
The exponent is an integer.
Examples:
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{0}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E.
\end{codeexample}
\begin{codeexample}[]
\pgfmathfloatparsenumber{0.2}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E.
\end{codeexample}
\begin{codeexample}[]
\pgfmathfloatparsenumber{42}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E.
\end{codeexample}
\begin{codeexample}[]
\pgfmathfloatparsenumber{20.5E+2}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E.
\end{codeexample}
\begin{codeexample}[]
\pgfmathfloatparsenumber{1e6}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E.
\end{codeexample}
\begin{codeexample}[]
\pgfmathfloatparsenumber{5.21513e-11}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E.
\end{codeexample}
%
The argument \meta{x} may be given in fixed point format or the scientific
``e'' (or ``E'') notation. The scientific notation does not necessarily
need to be normalized. The supported exponent range is (currently) only
limited by the \TeX-integer range (which uses 31 bit integer numbers).
\end{command}
\begin{key}{/pgf/fpu/handlers/empty number=\marg{input}\marg{unreadable part}}
This command key is invoked in case an empty string is parsed inside of
|\pgfmathfloatparsenumber|. You can overwrite it to assign a replacement
|\pgfmathresult| (in float!).
The initial setting is to invoke |invalid number|, see below.
\end{key}
\begin{key}{/pgf/fpu/handlers/invalid number=\marg{input}\marg{unreadable part}}
This command key is invoked in case an invalid string is parsed inside of
|\pgfmathfloatparsenumber|. You can overwrite it to assign a replacement
|\pgfmathresult| (in float!).
The initial setting is to generate an error message.
\end{key}
\begin{key}{/pgf/fpu/handlers/wrong lowlevel format=\marg{input}\marg{unreadable part}}
This command key is invoked whenever |\pgfmathfloattoregisters| or its
variants encounter something which is not a properly formatted low-level
floating point number. As for |invalid number|, this key may assign a new
|\pgfmathresult| (in floating point) which will be used instead of the
offending \meta{input}.
The initial setting is to generate an error message.
\end{key}
\begin{command}{\pgfmathfloatqparsenumber\marg{x}}
The same as |\pgfmathfloatparsenumber|, but does not perform sanity checking.
\end{command}
\begin{command}{\pgfmathfloattofixed{\marg{x}}}
Converts a number in floating point representation to a fixed point number.
It is a counterpart to |\pgfmathfloatparsenumber|. The algorithm is purely
text based and defines |\pgfmathresult| as a string sequence which
represents the floating point number \meta{x} as a fixed point number (of
arbitrary precision).
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{0.00052}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E
$\to$
\pgfmathfloattofixed{\pgfmathresult}
\pgfmathresult
\end{codeexample}
\begin{codeexample}[]
\pgfmathfloatparsenumber{123.456e4}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E
$\to$
\pgfmathfloattofixed{\pgfmathresult}
\pgfmathresult
\end{codeexample}
%
\end{command}
\begin{command}{\pgfmathfloattoint\marg{x}}
Converts a number from low-level floating point representation to an
integer (by truncating the fractional part).
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{123456}
\pgfmathfloattoint{\pgfmathresult}
\pgfmathresult
\end{codeexample}
See also |\pgfmathfloatint| which returns the result as float.
\end{command}
\begin{command}{\pgfmathfloattosci\marg{float}}
Converts a number from low-level floating point representation to
scientific format, $1.234e4$. The result will be assigned to the macro
|\pgfmathresult|.
\end{command}
\begin{command}{\pgfmathfloatvalueof\marg{float}}
Expands a number from low-level floating point representation to scientific
format, $1.234e4$.
Use |\pgfmathfloatvalueof| in contexts where only expandable macros are
allowed.
\end{command}
\begin{command}{\pgfmathfloatcreate{\marg{flags}}{\marg{mantissa}}{\marg{exponent}}}
Defines |\pgfmathresult| as the floating point number encoded by
\meta{flags}, \meta{mantissa} and \meta{exponent}.
All arguments are characters and will be expanded using |\edef|.
%
\begin{codeexample}[]
\pgfmathfloatcreate{1}{1.0}{327}
\pgfmathfloattomacro{\pgfmathresult}{\F}{\M}{\E}
Flags: \F; Mantissa \M; Exponent \E
\end{codeexample}
%
\end{command}
\begin{command}{\pgfmathfloatifflags\marg{floating point number}\marg{flag}\marg{true-code}\marg{false-code}}
Invokes \meta{true-code} if the flag of \meta{floating point number} equals
\meta{flag} and \meta{false-code} otherwise.
The argument \meta{flag} can be one of
%
\begin{description}
\item[0] to test for zero,
\item[1] to test for positive numbers,
\item[+] to test for positive numbers,
\item[2] to test for negative numbers,
\item[-] to test for negative numbers,
\item[3] for ``not-a-number'',
\item[4] for $+\infty$,
\item[5] for $-\infty$.
\end{description}
%
\begin{codeexample}[preamble={\usetikzlibrary{fpu}}]
\pgfmathfloatparsenumber{42}
\pgfmathfloatifflags{\pgfmathresult}{0}{It's zero!}{It's not zero!}
\pgfmathfloatifflags{\pgfmathresult}{1}{It's positive!}{It's not positive!}
\pgfmathfloatifflags{\pgfmathresult}{2}{It's negative!}{It's not negative!}
% or, equivalently
\pgfmathfloatifflags{\pgfmathresult}{+}{It's positive!}{It's not positive!}
\pgfmathfloatifflags{\pgfmathresult}{-}{It's negative!}{It's not negative!}
\end{codeexample}
%
\end{command}
\begin{command}{\pgfmathfloattomacro{\marg{x}}{\marg{flagsmacro}}{\marg{mantissamacro}}{\marg{exponentmacro}}}
Extracts the flags of a floating point number \meta{x} to
\meta{flagsmacro}, the mantissa to \meta{mantissamacro} and the exponent to
\meta{exponentmacro}.
\end{command}
\begin{command}{\pgfmathfloattoregisters{\marg{x}}{\marg{flagscount}}{\marg{mantissadimen}}{\marg{exponentcount}}}
Takes a floating point number \meta{x} as input and writes flags to count
register \meta{flagscount}, mantissa to dimen register \meta{mantissadimen}
and exponent to count register \meta{exponentcount}.
Please note that this method rounds the mantissa to \TeX-precision.
\end{command}
\begin{command}{\pgfmathfloattoregisterstok{\marg{x}}{\marg{flagscount}}{\marg{mantissatoks}}{\marg{exponentcount}}}
A variant of |\pgfmathfloattoregisters| which writes the mantissa into a
token register. It maintains the full input precision.
\end{command}
\begin{command}{\pgfmathfloatgetflags{\marg{x}}{\marg{flagscount}}}
Extracts the flags of \meta{x} into the count register \meta{flagscount}.
\end{command}
\begin{command}{\pgfmathfloatgetflagstomacro{\marg{x}}{\marg{macro}}}
Extracts the flags of \meta{x} into the macro \meta{macro}.
\end{command}
\begin{command}{\pgfmathfloatgetmantissa{\marg{x}}{\marg{mantissadimen}}}
Extracts the mantissa of \meta{x} into the dimen register
\meta{mantissadimen}.
\end{command}
\begin{command}{\pgfmathfloatgetmantissatok{\marg{x}}{\marg{mantissatoks}}}
Extracts the mantissa of \meta{x} into the token register
\meta{mantissatoks}.
\end{command}
\begin{command}{\pgfmathfloatgetexponent{\marg{x}}{\marg{exponentcount}}}
Extracts the exponent of \meta{x} into the count register
\meta{exponentcount}.
\end{command}
\subsubsection{Symbolic Rounding Operations}
Commands in this section constitute the basic level implementations of the
rounding routines. They work symbolically, i.e.\ they operate on text, not on
numbers and allow arbitrarily large numbers.
\begin{command}{\pgfmathroundto{\marg{x}}}
Rounds a fixed point number to prescribed precision and writes the result
to |\pgfmathresult|.
The desired precision can be configured with
|/pgf/number format/precision|, see section~\ref{pgfmath-numberprinting}.
This section does also contain application examples.
Any trailing zeros after the period are discarded. The algorithm is purely
text based and allows to deal with precisions beyond \TeX's fixed point
support.
As a side effect, the global boolean |\ifpgfmathfloatroundhasperiod| will
be set to true if and only if the resulting mantissa has a period.
Furthermore, |\ifpgfmathfloatroundmayneedrenormalize| will be set to true
if and only if the rounding result's floating point representation would
have a larger exponent than \meta{x}.
%
\begin{codeexample}[]
\pgfmathroundto{1}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[]
\pgfmathroundto{4.685}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[]
\pgfmathroundto{19999.9996}
\pgfmathresult
\end{codeexample}
%
\end{command}
\begin{command}{\pgfmathroundtozerofill{\marg{x}}}
A variant of |\pgfmathroundto| which always uses a fixed number of digits
behind the period. It fills missing digits with zeros.
%
\begin{codeexample}[]
\pgfmathroundtozerofill{1}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[]
\pgfmathroundto{4.685}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[]
\pgfmathroundtozerofill{19999.9996}
\pgfmathresult
\end{codeexample}
%
\end{command}
\begin{command}{\pgfmathfloatround{\marg{x}}}
Rounds a normalized floating point number to a prescribed precision and
writes the result to |\pgfmathresult|.
The desired precision can be configured with
|/pgf/number format/precision|, see section~\ref{pgfmath-numberprinting}.
This method employs |\pgfmathroundto| to round the mantissa and applies
renormalization if necessary.
As a side effect, the global boolean |\ifpgfmathfloatroundhasperiod| will
be set to true if and only if the resulting mantissa has a period.
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{52.5864}
\pgfmathfloatround{\pgfmathresult}
\pgfmathfloattosci{\pgfmathresult}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{9.995}
\pgfmathfloatround{\pgfmathresult}
\pgfmathfloattosci{\pgfmathresult}
\pgfmathresult
\end{codeexample}
%
\end{command}
\begin{command}{\pgfmathfloatroundzerofill{\marg{x}}}
A variant of |\pgfmathfloatround| produces always the same number of digits
after the period (it includes zeros if necessary).
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{52.5864}
\pgfmathfloatroundzerofill{\pgfmathresult}
\pgfmathfloattosci{\pgfmathresult}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[]
\pgfmathfloatparsenumber{9.995}
\pgfmathfloatroundzerofill{\pgfmathresult}
\pgfmathfloattosci{\pgfmathresult}
\pgfmathresult
\end{codeexample}
%
\end{command}
\subsubsection{Math Operations Commands}
This section describes some of the replacement commands in more detail.
Please note that these commands can be used even if the |fpu| as such has not
been activated -- it is sufficient to load the library.
\begin{command}{\pgfmathfloat\meta{op}}
Methods of this form constitute the replacement operations where \meta{op}
can be any of the well-known math operations.
Thus, \declareandlabel{\pgfmathfloatadd} is the counterpart for
|\pgfmathadd| and so on. The semantics and number of arguments is the same,
but all input and output arguments are \emph{expected} to be floating point
numbers.
\end{command}
\begin{command}{\pgfmathfloattoextentedprecision{\marg{x}}}
Renormalizes \meta{x} to extended precision mantissa, meaning $100 \le m <
1000$ instead of $1 \le m < 10$.
The ``extended precision'' means we have higher accuracy when we apply
pgfmath operations to mantissas.
The input argument is expected to be a normalized floating point number;
the output argument is a non-normalized floating point number (well,
normalized to extended precision).
The operation is supposed to be very fast.
\end{command}
\begin{command}{\pgfmathfloatsetextprecision\marg{shift}}
Sets the precision used inside of |\pgfmathfloattoextentedprecision| to
\meta{shift}.
The different choices are
\begin{tabular}{llrll}
0 & normalization to & $0$ & $\le m < 1$ & (disable extended precision) \\
1 & normalization to & $10$ & $\le m < 100$ & \\
2 & normalization to & $100$ & $\le m < 1000$ & (default of |\pgfmathfloattoextentedprecision|) \\
3 & normalization to & $1000$ & $\le m < 10000$ & \\
\end{tabular}
\end{command}
\begin{command}{\pgfmathfloatlessthan{\marg{x}}{\marg{y}}}
Defines |\pgfmathresult| as $1.0$ if $\meta{x} < \meta{y}$, but $0.0$
otherwise. It also sets the global \TeX-boolean |\pgfmathfloatcomparison|
accordingly. The arguments \meta{x} and \meta{y} are expected to be numbers
which have already been processed by |\pgfmathfloatparsenumber|. Arithmetic
is carried out using \TeX-registers for exponent- and mantissa comparison.
\end{command}
\begin{command}{\pgfmathfloatmultiplyfixed\marg{float}\marg{fixed}}
Defines |\pgfmathresult| to be $\meta{float} \cdot \meta{fixed}$ where
\meta{float} is a floating point number and \meta{fixed} is a fixed point
number. The computation is performed in floating point arithmetics, that
means we compute $m \cdot \meta{fixed}$ and renormalize the result where
$m$ is the mantissa of \meta{float}.
This operation renormalizes \meta{float} with
|\pgfmathfloattoextentedprecision| before the operation, that means it is
intended for relatively small arguments of \meta{fixed}. The result is a
floating point number.
\end{command}
\begin{command}{\pgfmathfloatifapproxequalrel\marg{a}\marg{b}\marg{true-code}\marg{false-code}}
Computes the relative error between \meta{a} and \meta{b} (assuming
\meta{b}$\neq 0$) and invokes \meta{true-code} if the relative error is
below |/pgf/fpu/rel thresh| and \meta{false-code} if that is not the case.
The input arguments will be parsed with |\pgfmathfloatparsenumber|.
\begin{key}{/pgf/fpu/rel thresh=\marg{number} (initially 1e-4)}
A threshold used by |\pgfmathfloatifapproxequalrel| to decide whether
numbers are approximately equal.
\end{key}
\end{command}
\begin{command}{\pgfmathfloatshift{\marg{x}}{\marg{num}}}
Defines |\pgfmathresult| to be $\meta{x} \cdot 10^{\meta{num}}$. The
operation is an arithmetic shift base ten and modifies only the exponent of
\meta{x}. The argument \meta{num} is expected to be a (positive or
negative) integer.
\end{command}
\begin{command}{\pgfmathfloatabserror\marg{x}\marg{y}}
Defines |\pgfmathresult| to be the absolute error between two floating
point numbers $x$ and $y$, $\lvert x - y\rvert $ and returns the result as
floating point number.
\end{command}
\begin{command}{\pgfmathfloatrelerror\marg{x}\marg{y}}
Defines |\pgfmathresult| to be the relative error between two floating
point numbers $x$ and $y$, $\lvert x - y\rvert / \lvert y \rvert$ and
returns the result as floating point number.
\end{command}
\begin{command}{\pgfmathfloatint\marg{x}}
Returns the integer part of the floating point number \meta{x}, by
truncating any digits after the period. This methods truncates the absolute
value $\rvert x \lvert$ to the next smaller integer and restores the
original sign afterwards.
The result is returned as floating point number as well.
See also |\pgfmathfloattoint| which returns the number in integer format.
\end{command}
\begin{command}{\pgfmathlog{\marg{x}}}
Defines |\pgfmathresult| to be the natural logarithm of \meta{x},
$\ln(\meta{x})$. This method is logically the same as |\pgfmathln|, but it
applies floating point arithmetics to read number \meta{x} and employs the
logarithm identity \[ \ln(m \cdot 10^e) = \ln(m) + e \cdot \ln(10) \] to
get the result. The factor $\ln(10)$ is a constant, so only $\ln(m)$ with
$1 \le m < 10$ needs to be computed. This is done using standard pgf math
operations.
Please note that \meta{x} needs to be a number, expression parsing is not
possible here.
If \meta{x} is \emph{not} a bounded positive real number (for example
$\meta{x} \le 0$), |\pgfmathresult| will be \emph{empty}, no error message
will be generated.
%
\begin{codeexample}[preamble={\usetikzlibrary{fpu}}]
\pgfmathlog{1.452e-7}
\pgfmathresult
\end{codeexample}
%
\begin{codeexample}[preamble={\usetikzlibrary{fpu}}]
\pgfmathlog{6.426e+8}
\pgfmathresult
\end{codeexample}
%
\end{command}
\subsubsection{Accessing the Original Math Routines for Programmers}
As soon as the library is loaded, every private math routine will be copied to
a new name. This allows library and package authors to access the \TeX-register
based math routines even if the FPU is activated. And, of course, it allows the
FPU as such to perform its own mantissa computations.
The private implementations of \pgfname\ math commands, which are of the form
|\pgfmath|\meta{name}|@|, will be available as|\pgfmath@basic@|\meta{name}|@|
as soon as the library is loaded.
\endgroup
|
{"hexsha": "04989eef44d8f1182da62e225e72abf276357694", "size": 27352, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-library-fpu.tex", "max_stars_repo_name": "waqas4afzal/LatexUrduBooksTools", "max_stars_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-library-fpu.tex", "max_issues_repo_name": "waqas4afzal/LatexUrduBooksTools", "max_issues_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Texlive_Windows_x32/2020/texmf-dist/doc/generic/pgf/text-en/pgfmanual-en-library-fpu.tex", "max_forks_repo_name": "waqas4afzal/LatexUrduBooksTools", "max_forks_repo_head_hexsha": "52fe6e0cd5af6b4610fd344a7392cca11bc5a72e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3081232493, "max_line_length": 116, "alphanum_fraction": 0.7260894999, "num_tokens": 7831}
|
#pragma once
#include <new>
#include <map>
#include <mutex>
#include <vector>
#include <string>
#include <utility>
#include <cstdlib>
#define BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED
#include <boost/stacktrace.hpp>
namespace
{
template<typename T>
struct malloc_allocator_t : std::allocator<T>
{
T* allocate(std::size_t n)
{
T* ptr = (T*)std::malloc(n * sizeof(T));
if(!ptr) throw std::bad_alloc();
return ptr;
}
void deallocate(T* ptr, std::size_t) { std::free(ptr); }
template<typename U>
struct rebind { typedef malloc_allocator_t<U> other; };
};
using char_t = char;
using string_t = const char_t*;
using stack_trace_t = std::string;
using new_entry_t = struct { std::size_t bytes; string_t file; string_t proc; int line; stack_trace_t stack; };
using ptr_t = void*;
using new_ptr_map_t = std::map<ptr_t, new_entry_t, std::less<ptr_t>, malloc_allocator_t<std::pair<ptr_t const, new_entry_t>>>;
using leak_list_t = std::vector<new_entry_t>;
inline static auto& get_ptr_map()
{
static new_ptr_map_t new_ptr_map;
return new_ptr_map;
}
inline static auto get_leaks()
{
leak_list_t leaks;
leaks.reserve(get_ptr_map().size());
for(const auto& it : get_ptr_map())
leaks.push_back(it.second);
return leaks;
}
}
void* operator new(std::size_t n)
{
void* ptr = std::malloc(n);
if(!ptr) throw std::bad_alloc();
return ptr;
}
void* operator new (std::size_t n, const char* file, const char* func, int line)
{
void* ptr = ::operator new(n);
try {
namespace st = boost::stacktrace;
auto stack = st::to_string(st::stacktrace());
static std::recursive_mutex operator_new_lock;
std::scoped_lock guard{ operator_new_lock };
get_ptr_map().emplace(ptr, new_entry_t{ n, file, func, line, std::move(stack) });
} catch(...) { }
return ptr;
}
void* operator new [] (std::size_t n, const char* file, const char* func, int line)
{
return ::operator new(n, file, func, line);
}
void operator delete (void* ptr) noexcept
{
static std::recursive_mutex operator_delete_lock;
std::scoped_lock guard{ operator_delete_lock };
auto it = get_ptr_map().find(ptr);
if(it != get_ptr_map().end())
get_ptr_map().erase(it);
std::free(ptr);
}
#if defined(new)
#error Macro 'new' is already defined!
#else
#define new new(__FILE_NAME__, __FUNCTION__, __LINE__)
#endif
|
{"hexsha": "d2cc479d535a8dfa4a4d47bd830eb4a8708efd1e", "size": 2319, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/newtrace.st.hpp", "max_stars_repo_name": "SammyEnigma/blog", "max_stars_repo_head_hexsha": "f0f3ef44ea4fd622befae81d2f4e5e6a607acfd1", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 94.0, "max_stars_repo_stars_event_min_datetime": "2019-02-17T09:25:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:25:14.000Z", "max_issues_repo_path": "src/newtrace.st.hpp", "max_issues_repo_name": "SammyEnigma/blog", "max_issues_repo_head_hexsha": "f0f3ef44ea4fd622befae81d2f4e5e6a607acfd1", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2020-09-05T09:38:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T15:38:57.000Z", "max_forks_repo_path": "src/newtrace.st.hpp", "max_forks_repo_name": "SammyEnigma/blog", "max_forks_repo_head_hexsha": "f0f3ef44ea4fd622befae81d2f4e5e6a607acfd1", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": 29.0, "max_forks_repo_forks_event_min_datetime": "2019-02-17T09:25:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T08:53:38.000Z", "avg_line_length": 24.4105263158, "max_line_length": 127, "alphanum_fraction": 0.701164295, "num_tokens": 634}
|
\documentclass[hyp]{socreport}
\usepackage{mathtools}
\usepackage{mathtools}
\usepackage{fullpage}
\usepackage{float}
\usepackage{hyperref}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{caption}
\usepackage[shortlabels]{enumitem}
\usepackage[utf8]{inputenc}
\graphicspath{{./figs/}}
\DeclarePairedDelimiter{\abs}{\lvert}{\rvert}
%%% Begin document
\begin{document}
\pagenumbering{roman}
\title{Underwater Real-Time Object Recognition and Tracking for Autonomous Underwater Vehicle}
\author{Tan Soon Jin}
\projyear{2016/17}
\projnumber{H021400}
\advisor{Prof. Terrence Sim Mong Cheng}
\deliverables{
\item Report: 1 Volume}
\maketitle
\include{chap0_abstract}
\listoffigures
\listoftables
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 1
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Introduction}
\section{Background on Robosub}
\subsection{Information about the competition}
Robosub is an international AUV competition where students from around
the world build their own customized AUV to complete a series of
underwater missions that involve both visual tasks and acoustics task.
The competition is held annually in TRANSDEC (Transducer Evaluation
Center) man-made pool.
\begin{figure}[ht]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{transdec_aerial.png}
\caption{Aerial view of TRANSDEC. Operational depth of 16 ft for most vision tasks}
\label{fig:transdec_aerial}
\end{figure}
\subsection{Description of vision tasks}
Vision tasks in Robosub can divided into forward-facing tasks and
bottom-facing tasks which poses different sets of challenges. Since the
tasks do not vary significantly every year, we can use datasets
collected from this year's competition as testbed for our vision
algorithms.
\begin{figure}[ht]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{robosub_vision_tasks.png}
\caption{Robosub 2016 Vision Tasks. a) Scuttle Ship b) Navigate
Channel c) Weigh Anchor d) Set Course e) Bury Treasure (Coins)
f) Bury Treasure (Island)}
\label{fig:robosub2016_tasks}
\end{figure}
\begin{enumerate}
\item \textbf{Scuttle Ship (Buoy)}
A recurring task where the AUV has identify the correct color
buoy and touch it. There are two major challenges with this
task:
\begin{enumerate}[a.]
\item Red buoy tends to exhibit color distortion as red
wavelength attenuates the fastest \cite{Galdran2015}.
\item Non-uniform illumination on top-half of buoys make it hard
to distinguish the buoys.
\end{enumerate}
\item \textbf{Navigate Channel} \\
The AUV is required to move in between and over the PVC pipes.
\item \textbf{Weight Anchor} \\
Classic object classification task where the AUV is required to
drop a marker into the correct bin to obtain maximum points
after removing the cover using a manipulator.
\item \textbf{Set Course} \\
Identification of covered square (orange panel) and remove it.
Fire two markers over 2 smaller holes. As yellow and orange are
really close on the colour spectrum, this forces us to use other
visual cues such as edge for better detection.
\item \textbf{Bury Treasure} \\
For this task, one has to identify the small cylinders (red and
green) and drop them onto their respective colored circles (on
the Island). Identifying and distinguishing small objects afar
(4 m) underwater is the biggest challenge in this task. Besides
that, the dropped cylinders may potentially occlude the circles.
\end{enumerate}
\section{Challenges in Underwater Image Processing}
Many literature such as \outcite{M2016} that investigates various
underwater image restoration methods cite haze formation which happens
as light propagated from object undergoes attenuation and scattering
causing image with low contrast. In addition, Beer-Lambert law
\cite{gevers2012color} states relates attenuation of light to properties
of water medium; therefore, light components with low wavelength; green
and blue are not as easily absorbed compared to red wavelength. This
causes underwater images tend to have greenish or bluish color cast.
\begin{figure}[ht]
\centering
\includegraphics[width=0.8\textwidth, height=0.2\textheight]{underwater_beerlambert.png}
\caption{Absorption of light at the surface}
\label{fig:water_surface_effect}
\end{figure}
\section{Project Requirements Analysis}
Though it is the objective of the project to design a vision framework
for the Robosub missions, the vision framework should also be easily
extended to work for more complex real world applications.
\subsection{Nature of tasks}
\begin{enumerate}
\item Vision algorithms perform with acceptable accuracy under the following conditions:
\begin{figure}[ht]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{task_challenges.png}
\caption{Different vision challenges. a) Haze formation b)
Partial occlusion c) Non-uniform illumination d) Sunlight
flickers e) Shadow}
\label{fig:vision_challenges}
\end{figure}
\item Low detection latency (near real-time) \\
AUV needs to make swift decision based on sensor inputs to
complete task under time constraints (same for real world time
critical mission i.e underwater mine detection)
\item Geometric properties of objects are made known in advance
\item Short-period single target tracking for task (unlike video surveillance application)
\item Able to detect objects from far away (5m) and near distance (for manipulation task)
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Literature Review}
This review is conducted with the purpose to investigate and select most
suitable algorithms that generate the best result on the Robosub
datasets. Since every teams who participate in Robosub are required to
submit a journal paper,vision algorithms deployed by top-peforming
schools such as Cornell University, University of Florida and École de
technologie supérieure provide valuable insights on image processing
that are effective in underwater environment. Besides that, review of
popular image processing techniques in particular on topics like object
detection, object tracking, color constancy, saliency mechanism,
detection proposals and adapatation of algorithms.
\section{Preprocessing}
\subsection{Underwater Image Enhancement}
The paper by \outcite{garcia2002way} compared methods such as
homomorphic filtering and local adaptive histogram equalization
(Contrast Limited Adaptive Histogram) which considers that image is a
product of illumination and reflectance properties. However, homomorphic
filter has the benefit of preserving sharp edges while attenuating
non-uniform illumination. On the other hand, by only redistributing
pixels exceeding a clipping level to increase contrast of an image,
CLAHE manages to reduce noise amplification in normal local histogram
equalization.
Instead of relying on a single image, \outcite{Gracias2008} recover
corrupted underwater image by finding the difference between the current
frame with temporal median of a registered set of N frames. Image
dehazing is equally as important to ensure good performance of further
image processing operation such feature detection. \outcite{Kaiming2011}
proposed a single image dehazing method using the dark channel prior
which states that haze-free image contains local region with low
intensities in at least one color channel. \outcite{Galdran2015} propose
a variant of dark channel prior for underwater environment, the Red
Channel method as red color shows most degradation in turbid water
medium. From another perspective, \outcite{Ancuti2011} takes a
fusion-approach to recover the original image by generating a few weight
maps that correlates with intrinsic properties of the image itself. A
color corrected and contrast enhanced of the input image are used to
generate different weight maps that are fused using a Laplacian
multi-scale strategy to generate a smoothed output image. This method
has the benefit of using a single image but the weight maps must be
combined with different weightage to achieve an ideal result.
\subsection{Color Constancy}
Color cue plays an important role to distinguish different objects such
as the small cylinders in Robosub that requires sorting by color. The
ability to account for color of the light source is called color
constancy. The work of \outcite{Gijsenij2011} analyzes various color
constancy algorithms. Attention is paid especially on low-level
statistics methods that are computationally inexpensive compared to
learning-based methods. The Grey-World \cite{buchsbaum1980spatial}
estimate the color of the light source by estimating the average color
in the image assuming that any deviation from average color (Grey) is
caused by illuminants. The White-Patch method \cite{land1977retinex}
estimates the color of light source by computing the maximum response in
individual RGB color channels. \outcite{finlayson2004shades} shows that
both Grey-World and White-Patch algorithms are special instantiation of
a more general color constancy algorithm based on Minkowski norm called
Shades of Grey. Their investigation of best illumination estimation
suggests using Minkowski norm, p = 6 to obtain optimal performance.
Though we see new method such as the Color Rabbit \cite{Bani??2014}
which combine multiple local illumination estimations to a global one,
these class of methods are more computationally expensive which is not
suitable for real-time application. Inspired by primary visual cortex
(V1) of human visual system (HVS), \outcite{Gao2013} estimate the true
illuminant color of a scene by computing the maximum response in
separate RGB channels of the responses of double-opponent cells. This
method is shown to perform better on outdoor scenes from Gehler-Shi
dataset where the mean reflectance is not achromatic which is assumed by
Grey-World based methods.
\section{Saliency Region Detection}
Ability of human visual system (HVS) to selectively process only the
salient visual stimuli, specifically salient object detection helps to
reduce computation time of object recognition that traditionally relies
of sliding-window approach to detect object of interest.
\outcite{achanta2009frequency} estimate centre-surround contrast using
color and luminance features using a frequency-tuned approach to
generate high-resolution saliency map. In contrast, biological inspired
method of \cite{Itti1998} that computes centre-surround contrast using
Difference of Gaussian (DoG) which generates low resolution map and
ill-defined boundaries because of down sampling of original
image.Because saliency detection often work poorly in low contrast
environment i.e underwater environment, work of
\outcite{VanDeWeijer2005} boost local color information by analyzing
isosalient colour derivatives. \outcite{Cao2010} extended work of Van de
Weijer as Gaussian derivatives of each opponent color to get a better
iso-salient transformation.
\section{Detection Proposals}
Relying on saliency mechanism is insufficient in perturbed underwater
condition; therefore, different detection proposals algorithms are
investigated. \outcite{Hosang2015} cited that "detection proposals"
which can be grouped into a) grouping proposal methods and b) Window
scoring proposals methods are used extensively by top performing object
detectors in PASCAL and ImageNet. On top of reduced computation cost by
avoiding exhaustive sliding window approach, detection proposals improve
recall by filtering out false positives. Recent work of
\outcite{Winschel2016} combines top performing detection proposals
methods, SelectiveSearch \cite{uijlings2013selective} and EdgeBox
\cite{zitnick2014edge}. Though detection proposals allow for faster
object recognition, it is important that it does not filter out object
of interest and incur more computation costs that out weights time
saved.
\section{Object Detection and Tracking}
An overall review of journal papers submitted by top-performing teams in
Robosub shows a general trend of combining surprisingly simple computer
vision techniques such as adaptive color thresholding, edge detection
i.e Canny Edge \cite{canny1986computational}, and contour analysis i.e
Hu moment \cite{hu1962visual}. Team CUAUV (Cornell AUV) proposes
adaptive color thresholding on different color spaces such as LAB, LUV
and YCrCb where the individual masks are combined to form final
binarized mask. This is a blob-based detection approach where contour
generated by OpenCV's implementation of \cite{suzuki1985topological}
will be matched against known geometric properties of desired object of
interest. \outcite{Walters2014} use particle filter approach to detect
and track object of interest. Known for its ability to deal with
non-linear noise and multi-modal hypotheses
\cite{isard1998condensation}, particle filter has the ability to recover
from wrongly tracked objects. Though more sophisticated techniques such
as neural-network classification is deployed, teams still generally rely
on low-level visual cues such as color and edge. This may be attributed
to simplicity and efficiency of mentioned algorithms.
\outcite{Benoit2014} focuses on developing sophisticated vision tuning
client that allows for rapid prototyping via "mix and match" approach to
design a suitable vision pipeline for each individual vision tasks.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 3: Design & Methodology
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Design \& Methodology}
\section{Proposed design}
Though many solutions to underwater vision challenges exist, many of them are
not designed to work with each other as they do not share a common interface. To
increase ease of use and productivity of developers, this paper proposed a
vision framework that consists of modular components tailored for underwater
application, and ease of integration to Robot Operating System (ROS) which are
commonly used by the robotics community.
The proposed vision framework is divided into \textit{offline} modules and
\textit{online} modules. \textit{Offline} modules refer to modules that will
deployed prior to object tracking mission such as video annotation, visual data
analysis and model learning. In contrast, \textit{Online} modules are deployed
during mission such as preprocessing, object detection and object tracking.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{framework.png}
\caption{Proposed vision framework}
\label{fig:proposed_vision_framework}
\end{figure}
\subsection{Offline modules}
\textbf{Video annotation} is extremely important for ground truth
generation that is essential for model learning. To achieve rapid ground truth
generation with limited manpower and time, model-free tracking method such as
mean-shift by (\outcite{comaniciu2002mean}) and correlation-filter based tracking
(\outcite{bolme2010visual}). Of course this is under the assumption that some
degree of localization error is acceptable and human intervention is used to
redefine the target window if drift occurs. This enables a faster testing
iteration as data collected can be integrated more quickly to update our model.
\textbf{Data analysis} helps us discover patterns and statistical nature of
collected visual data which is important for feature engineering and model
learning. These tools include visualization of image under different color
spaces, estimation of illuminants, saliency map generation and image quality
assessments. This information is used as metadata to label and categorize
dataset to increase productivity of model training and validation. Again, this
is an attempt to automate trivial task that require human attention.
\textbf{Model Training} is divided into several stages such as feature
selection, model selection and hyperparameters optimization. To increase
usability of the software without machine learning knowledge, this paper adopt the trending
automatic machine learning approach by leveraging on available open-source
libraries such as \href{https://github.com/automl/auto-sklearn}{Auto-Sklearn},
\href{https://github.com/rhiever/tpot}{TPOT}, and \href{https://github.com/automl/HPOlib}{HPOlib}.
\subsection{Online modules}
\textbf{Preprocessing} has considerable effect on accuracy of underwater object
detection because of the challenges mentioned. Color normalization is performed
on image to remove effect of color cast because of light attenuation. Low-level
stastical methods have been explored because they are simple to implement and
fast while producing accuracy comparable to other methods such as gamut mapping
and learning methods \outcite{Gijsenij2011}. In addition, fusion-based
underwater image enhancement by \outcite{fang2013effective} is implemented to
remove haze effect because of back-scattering of light. Following that, various
illumination compensation methods are executed to reduce effect of flickering and
adjusting brightness of the image for more optimal object detection.
\textbf{Object Tracking} is separated into 3 components: a) \textit{object
proposal}, b) \textit{object classification} and c) \textit{online preprocessing}. An
adaptive object model and pre-learned object model are applied to achieve higher
tracking accuracy. \textit{Object proposals} based on superpixel, edge-detection
and saliency are exploited to produce candidates for classification instead of
the traditional sliding-window approach which is more computationally expensive.
For \textit{object classification} of candidate windows, Support Vector Machine
(SVM), Random Forest and Gaussian Process are the supported classifiers.
To improve generalization of the tracker to different conditions,
\textit{preprocessing} steps are taken to ensure invariance to non-uniform
illuminations and underwater challenges.
\newpage
\section{Methodology}
In this section we will explain how these modular components are used together
for underwater real-time object tracking.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{method.png}
\caption{Main methodology}
\label{fig:main_methodology}
\end{figure}
\subsection{Generating datasets}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{dataset_method.png}
\caption{Dataset generation methodology}
\label{fig:dataset_methodology}
\end{figure}
\textbf{Data analysis} is performed on all images to further categorize them into
different datasets according to various criteria such degree of haze, existence
of shadow, illuminations and color cast. Each dataset will be tagged with
metadata generated from the analysis. Next, \textbf{video annotation} is
conducted using \textit{Mean-Shift} tracker on preprocessed images to generate
ground truth that will be used for training and validation. To prevent
overfitting and help the model generalize better, data augmentation via
horizontal flipping, scaling, rotating, shifting and color jittering is
performed with the aid of \href{https://keras.io/preprocessing/image/}{Keras
preprocessing module}.
\subsection{Model Learning}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{training_method.png}
\caption{Model learning methodology}
\label{fig:training_methodology}
\end{figure}
To improve discriminability of objects from background in undewater setting,
\textbf{preprocessing} steps are taken such as color normalization, illumination
compensation and image enhancement. Moving on, different type of features are
extracted from various color spaces that will be used in object classification.
Using the validation set, \textbf{feature selection}, \textbf{model selection}
and \textbf{hyperparameters optimization} are executed to determine the most
optimal combination of algorithm-parameters pair for a particular object class.
\subsection{Online object detection and tracking}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{tracking_method.png}
\caption{Object tracking methodology}
\label{fig:tracking_methodology}
\end{figure}
The next phase involves real-time \textbf{object proposals} adopting a)
superpixel-based clustering and b) edge detection. \textbf{Object classifier}
will rank these candidates according to classfication score. Tracking is
performed using a simple nearest neigbour approach and a new tracker will be
initialized after losing track of the target for 10 frames.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 4: Preprocessing
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Preprocessing}
In this section, we will look into detail the preprocessing steps that are
applied to each image.
\section{Color Normalization}
For underwater vision challenges, color cues are very important features because
other features like edge, texture and corner have poor visibility underwater
because of low contrast. In Robosub, there are several vision tasks that
requires classification of different objects based on their colors. Though color
cue is a simple and discriminative feature for underwater object
detection, color feature shows very poor repeatability under varying light
source. To achieve consistent feature extraction, this paper takes a
\textit{static} approach (fixed-parameters) because of simplicity and our
application does not require high degree of accuracy. With the static approach,
there are less parameters needed to be optimized.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{color_constancy.png}
\caption{Color normalization results (left to right): \newline Top row:
a) Raw input, b) Finlayson's comprehensive normalization, c) Grey-world \newline Bottom
row: d) IACE, e) Finlayson's non-iterative normalization f) Shade of Gray}
\label{fig:colorconstancy_results}
\end{figure}
This paper only considers color normalization methods that require single image
without prior information about the camera used. There are 2 main steps to any
color normalization method: a) illuminant estimation and b) image correction.
The aim of image correction is to achieve chromatic adaptation which can be
modelled with a diagonal transformation \outcite{von1970influence} with certain assumptions.
The mapping of an image under unknown light source to an image under canonical
light source is performed using a diagonal matrix as shown below:
\[
\begin{pmatrix}
R_c \\
G_c \\
B_c \\
\end{pmatrix}
=
\begin{pmatrix}
d_1 & 0 & 0 \\
0 & d_2 & 0 \\
0 & 0 & d_3\\
\end{pmatrix}
\begin{pmatrix}
R_u \\
G_u \\
B_u \\
\end{pmatrix}
\]
Results from \outcite{gijsenij2011computational} suggests that different
algorithms show their strenghts and weaknesses on different datasets. Therefore,
this paper proposes a some color normalization strategies that can be chosen
based on performance of object detection on the validation datasets.
\subsection{Algorithm Implementation}
\subsubsection{Grey-World based}
\begin{enumerate}
\item \textbf{Grey-World} \\
With the assumption that: \textit{the average reflectance
in a scene under a neutral light source is achromatic} \outcite{buchsbaum1980spatial}, the colour of the light
source is estimatd by computing the average color in the image.
\item \textbf{White patch} \\
With the assumption that: \textit{the maximum response of RGB channels is caused
by the perfect reflectance} \outcite{land1977retinex}, the colour of the light
source is estimatd by computing the maximum pixel value of each channel separately.
\item \textbf{Grey-Edge} \\
Instead of using raw pixel value, \outcite{van2005color} makes the assumption
that: \textit{the average of the reflectance differences in a scene is
achromatic}. The illuminant is estimated by calculating the average color
derivative of an image.
\item \textbf{Shade of Gray} \\
Instead of applying the maximum operation (max RGB) and average operation
(Grey-World) which are both specific instantiation using the Minkowski norm
\outcite{finlayson2004shades}. Grey-World when $p=1$ and Max RGB when $p=\infty$ .
\[
(\frac{\int (\abs{f_x(x)})^p dx}{\int dx})^{\frac{1}{p}} = ke
\]
\end{enumerate}
\subsubsection{Finlayson's approach}
\begin{enumerate}
\item \textbf{Comprehensive image normalization} \\
To remove dependency on lighting geometry, (r, g, b) is normalized to ($s_r$,
$s_g$, $s_b$). Effect of illuminant is removed using grey-world normalization.
These two-processes are performed succesively for 2 iterations (derived from
empirical results) \outcite{finlayson1998comprehensive}.
\item \textbf{Non-iterative comprehensive image normalization}
Operating on the log RGB space, normalization is performed by subtracting mean
of each row and mean of each column each element \outcite{finlayson2002non}.
\end{enumerate}
\subsection{Improvements}
\begin{enumerate}
\item \textbf{Gamma correction} \\
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{color_gamma.png}
\caption{Effect of applying gamma correction: (top row) no gamma correction,
(bottom row) with gamma correction}
\label{fig:color_gamma}
\end{figure}
To improve the result of color correction \outcite{cepeda2012combining}, gamma
correction is applied after color correction to illuminate dark areas in the
image (often effect of color normalization) which subsequently increase dynamic
range of the image.
\item \textbf{LAB color space} \\
Based on the evaluation of \outcite{kloss2009colour}, the CIE LAB color space
which reflects linearity of human colour perception is able to better represent
transformations for more subtle colours. Color normalization in LAB space will
rarely overcompensate or result in transformed image that looks unnatural.
\item \textbf{Grey pixel} \\
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth, height=0.2\textheight]{greypixel.png}
\caption{Applying novel grey pixel illumination estimation: a) Raw input, b)
Color corrected}
\label{fig:grey_pixel}
\end{figure}
\outcite{yang2015efficient} estimates the illuminant of the scene from
information of grey pixels detected in a color image. It assumes that
\textit{most of the natural images include some detectable pixels that are at
least approximately grey}.Firstly, color image is converted to logarithm
space, followed by calculating the illumination-invariant measure (IIM) which is
calculated from local contrast of each logarithm channels. Then the mean of
selected grey pixels ranked by the Grey-Index will give us the estimated illumination.
\item \textbf{PCA based} \\
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth, height=0.2\textheight]{spatialcolorconstancy.png}
\caption{Spatial domain based illumination estimation: a) Raw input, b)
Color corrected}
\label{fig:spatial_colorconstancy}
\end{figure}
\end{enumerate}
The work of \outcite{cheng2014illuminant} estimates the illuminant by finding
bright pixels and dark pixels in a color image. The paper selects colours by
choosing n pixels with largest and smallest projected distance to the mean
vector. Then PCA is performed on the selected pixels to generate estimated
illumination direction.
\section{Underwater Image Enhancement}
Color normalization alone is insufficient to restore the original appearance of
an underwater obstacles. Underwater images also suffer from poor contrast,
overexposed or underexposed and flickering caused by refraction of sun light.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{enhancement_results.png}
\caption{Underwater image enhancement results (left to right): \newline Top row:
a) Raw input, b) Dark channel prior, c) Single image fusion \newline Bottom
row: d) CLAHE, e) Red channel prior}
\label{fig:underwater_image_enhancement_results}
\end{figure}
\subsection{Fusion-based image restoration}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{fusion_pipeline.png}
\caption{Single underwater image enhancement by fusion}
\label{fig:fusion_pipeline}
\end{figure}
The work of \outcite{fang2013effective} suggests enhancement of underwater
images using a) white-balance image and b) contrast equalized image as inputs to
generate weight maps (chromatic, luminance, and saliency). The weight maps are
then normalized and fused using image pyramid approach to produce a smoother
enhanced image. The final output can be gamma corrected to adjust overall
brightness of the image.
\textbf{Chromatic map} controls the saturation gain of the enhanced image.
Higher saturation values yield more vivid color. \textbf{Luminance map} helps to
balance the brightness of the enhanced image while \textbf{Saliency map}
indicates area of high conspicuity. In other words, saliency map higlights area
that captures attention of the human visual system.
This approach has the added benefit of being extremely computationally fast and
simple to implement compared to other approach such as dark channel prior \outcite{he2011single}.
\subsection{Denoising \& Illumination Compensation}
According to the survey by \outcite{padmavathi2010comparison}, filters like
homomorphic filter, anisotropic filter and wavelet denoising filter are
necessary to suppress noise, preserve edge and smoothen underwater image. The
vision framework includes the following filters:
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{illumination_compensation.png}
\caption{Illumination compensation results (left to right): \newline Top row:
a) Underexposed input, b) Chih's light compensation, c) Chen's light compensation \newline Bottom
row: d) Flicker input, e) Homomorphic filter f) Gamma corrected}
\label{fig:illumination_compensation_results}
\end{figure}
\begin{enumerate}
\item \textbf{Homomorphic filter} \\
Underwater vision tasks in shallow water are prone to suffer from spatial
temporal illumination patterns. In this case, a homomorphic filter can help to
correct non-uniform illumination and sharpen the image. With the assumption that
the high frequency components of an image is associated with reflectance of the
image, a high pass filter is applied on the frequency domain (removing
multiplicative noise) removing the low frequency (flickers).
\item \textbf{Anisotropic filter} \\
Use in conjuction with the homomorphic filter is the anisotropic filter which
smoothens homogeneous area while preserving edges. The work of
\outcite{perona1990scale} helps to reduce small edges generated by homomorphic filter.
\item \textbf{Illumination compensation} \\
When executing underwater vision tasks, the AUV has to constantly deal with
fluctuation in illumination because of various factors such as position of the
sun and clouds. Instead of relying on manual tuning of camera parameters, some
automated light compensation is performed on captured image sequence. This is
extremely important as an overexposed or underexposed image lose most of its
chromatic information.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{lightcompensation.png}
\caption{Comparison between logarithm curve and gamma curve}
\label{fig:light_compensation}
\end{figure}
This paper refers to the work of \outcite{changlight} for 2 different
brightness adjustment algorithms. Firstly, a logarithm curve is used which obeys
the Weber-Fechner law of JND (Just noticeable difference) response instead of a
gamma curve which tends to enhance noise in dark regions.
\end{enumerate}
\section{Conclusion}
The preprocessing stage is one of the most important component for effective
underwater object tracking. Color normalization ensures repeatability of feature
extraction for different datasets. This allow for extracting domain invariant
features which can be used for object tracking in different water environments
such as public swimming pool, beach or a man-made lake (venue of Robosub competition).
However, it is necessary to keep the selection of preprocessing algorithms small
to reduce any overhead on real-time object tracking. Therefore, this paper
favors preprocessing algorithms that are less complex, effectively trading off
some degree of accuracy for lower detection latency.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 5: Object Proposals
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Object Proposals}
Recently we have seen more state-of-the-art trackers incorpate object proposals
as part of their pipeline \outcite{Kristan2016a}, \outcite{kristan2015visual}.
The rise of object proposals which is a segmentation-based candidates generation
slowly replaces the more traditional sliding window approach which can be slow
when multiscale detection is required.
In addition, object proposals can be thought as a generic object detector which
generate candidate window based on some measure of objectness. Choosing the
criteria to measure the presence of an object is very important and is unique
for each domain of application. Object proposals according to
\outcite{hosang2016makes} fall into 2 large categories: a) grouping and b)
window ranking.
\textbf{Grouping proposals} leverage on hierarchical segmentation approach to
generate overlapping segments with techniques such as a) superpixel grouping
(SP), b) solving multiple graph cut problem (GC) with random seeds or from c)
edge contour (EC). On the other hand, \textbf{Window scoring} proposals only
score each candidate window on likelihood of containing an object. This approach
is faster at the cost of lower localization accuracy.
From the mentioned paper, methods that are based on superpixel are not robust
towards illuminatin change while \textbf{BING} \outcite{cheng2014bing} and
\textbf{Edge-box} \outcite{zitnick2014edge} show promising result because of its
machine learning component (random forest).
\section{Algorithm Implementation}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{proposals_results.png}
\caption{Different object proposal paradigms}
\label{fig:proposal_results}
\end{figure}
This paper utilizes 4 different object proposals for different type of
underwater vision tasks. Referring to Figure \ref{fig:proposal_results},
detection of color buoys which are largely homogeneous with little edge
information are much better handled with \textbf{SelectiveSearch}
\outcite{uijlings2013selective} approach. In general, \textit{grouping
proposals} show more promising result than \textit{window scoring} approach
for all the underwater vision tasks. The additional speed gained from
\textit{window scoring} approach is almost nullified with the need to sample
large amount of windows to achieve decend localiztion accuracy.
Besides the \textit{SelectiveSearch} and \textit{Edge-box}, the following
section will discuss custom implementation of 2 other proposal methods.
\subsection{Maximially Stable Extremal Regions (MSER)}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{mserproposal.png}
\caption{Object proposals using MSER: a) Buoy task, b) Coin task, c) Set date task}
\label{fig:mser_proposal}
\end{figure}
Since most the underwater obstacles are blob-like , this paper uses the
implementation of \outcite{forssen2007maximally} by OpenCV to extract candidate
windows for object detection. Firstly, the image is converted to HSV color
space. The \textit{Saturation} channel is then used for blob-detection as most
underwater obstacles have more vivid color compared to the background.
Alternatively, a combination of different color channels are explored to
generate more segments such as L*a*b and YUV.
\subsection{Saliency-based}
Object proposals based on salient cues are also explored as they mimic closely
how human visual system works. Without any preprocessing, the results of salient
object proposal is mediocre at best compared to the other proposal methods.
However with appropriate color normalization and enhancement, this method can
produce results that can rival with \textit{SelectiveSearch} and
\textit{Edge-box} in this domain of application. This paper use an open-source implementation of:
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{saliency.png}
\caption{Object proposals using saliency approach}
\label{fig:salient_proposal}
\end{figure}
\begin{enumerate}
\item \textbf{Saliency optimization from robust background subtraction} \outcite{zhu2014saliency}
\item \textbf{Minimum Barrier Salient Object Detection at 80 FPS} \outcite{zhang2015minimum}
\item \textbf{Frequency-tuned Salient Region Detection} \outcite{achanta2009frequency}
\end{enumerate}
\section{Conclusion}
In this section we have explored different proposal methods that are
state-of-the-art and others (MSER and saliency) that are slightly different from
available literatures. Though \textit{MSER} managed to generate a lot of
segments, this method does not measure the quality of each segment unlike
\textit{Edge-box}. \textit{Salient object proposals} on the hand produce
candidate windows that are highly accuracte but with very few segments. In
general, not missing any possible object candidate is more important than
generating highly accurate candidate window. Therefore, this paper propose to
lower the threshold for \textit{saliency-based} proposals in order to generate
more candidate windows.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 7: Feature Design
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Feature Design}
Large amount of feature detector and descriptors used for this projects are available in
\href{http://opencv.org/}{OpenCV} or
\href{http://scikit-image.org/}{Scikit-image}. This paper has come to this list
of features based on the benchmarks by \outcite{lee2016recent} and \outcite{pieropan2016feature}.
Below is a summary of features available in this vision framework:
\begin{enumerate}
\item \textbf{SURF} \outcite{bay2006surf}
\item \textbf{SIFT} \outcite{lowe1999object}
\item \textbf{BRISK} \outcite{leutenegger2011brisk}
\item \textbf{ORB} \outcite{rublee2011orb}
\item \textbf{FREAK} \outcite{alahi2012freak}
\item \textbf{MSER} \outcite{forssen2007maximally}
\item \textbf{DAISY} \outcite{tola2010daisy}
\item \textbf{CenSure} \outcite{agrawal2008censure}
\item \textbf{LBP} \outcite{ojala2002multiresolution}
\item \textbf{AKAZE} \outcite{alcantarilla2011fast}
\item \textbf{Inner Shape Context} \outcite{ling2007shape}
\item \textbf{Elliptic Fourier Feature of Closed Contour} \outcite{kuhl1982elliptic}
\item \textbf{Histogram of Oriented Gradient} \outcite{dalal2005histograms}
\item \textbf{Hu moment and Zernike moment} \outcite{sabhara2013comparative}
\end{enumerate}
\section{Requirements}
There are various desired properties of features for underwater object tracking
in particular ones that are highly applicable in the competition setting of
Robosub competition.The 2 main properties are: a) \textit{repeatability} and b)
\textit{discriminability}. Firstly, it is important that we are able to
consistently extract the same set of features for the same object in order to
achieve consistent detection. However, there is always a trade-off with
\textit{discriminability} as features that are highly repeatable tend to
describe a more general representation of the object.
\subsection{Illumination invariance}
It is preferable to have features that are highly invariant to sudden changes in
illumination as the operational depth of the AUV during the competition is still
susceptible to external factors such as position of the sun and clouds.To
achieve this, this paper propose usafe of multiple illumination invariation
color space to describe appearance of the object.
\subsection{Scale \& Rotation invariance}
Secondly, the feature must also be scale and rotation invariant because the AUV
will be constantly navigating around its surrounding to identify object of
interest. The easies way to achieve this is to rely on purely color-based
feature that will be mentioned in section \ref{sec:color_descriptors}. Color-based features are easier to
compute and are less computationally expensive compared to features like SURF,
SIFT and HOG.
\subsection{Shape discriminability}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{similarcolor.png}
\caption{Objects with similar colors}
\label{fig:similar_color}
\end{figure}
Color-based features alone are not sufficient for our application as there
exists objects of similar color appearance. In this case, we will need shape
descriptors that will be elaborated in section \ref{sec:shape_descriptors}.
\section{Color space: Implementation}
\label{sec:color_descriptors}
Besides the usual color spaces, this section will look at some implementation
of color spaces based on these papers on the subject:
\begin{enumerate}
\item Detecting salient cue through color-ratio \outcite{todt2004detecting}
\item Color Invariants for Person Re-identification \outcite{kviatkovsky2013color}
\item Evaluation of Color Descriptors for Object and Scene Recognition \outcite{van2010evaluating}
\item Invariant color descriptors for efficient object recognition \outcite{sande2011invariant}
\item A Perception-based Color Space for Illumination-invariant Image
Processing \outcite{chong2008perception}
\item Illumination invariant color model robot soccer \outcite{luan2010illumination}
\item Illumination invariant imaging \outcite{maddern2014illumination}
\item Color Model Double Opponency \outcite{gao2013color}
\end{enumerate}
\subsection{rg chromacity}
\[
r = \frac{R}{R + G + B}, g = \frac{G}{R + G + B}, b = \frac{B}{R + G + B}
\]
\subsection{Normalized RGB}
\[
r = \frac{R - \mu(R)}{\sigma(R)}, g = \frac{G - \mu(G)}{\sigma(G)}, b =
\frac{B - \mu(B)}{\sigma(B)}
\]
\subsection{Opponent color space}
\[
\begin{aligned}
O1 = \frac{R - G}{\sqrt{2}}, O2 = \frac{R + G - 2B}{\sqrt{6}}, O3 = \frac{R +
G + B}{\sqrt{3}} \\
W_{o1} = \frac{O1}{O3}, W_{o2} = \frac{O2}{O3}
\end{aligned}
\]
\subsection{Log color ratio}
\[
L1 = \log{\frac{R}{B}}, L2 = \log{\frac{R}{B}}, L3 = \log{\frac{G}{B}}
\]
\subsection{RGBY opponent space}
\[
R_o = R - \frac{G + B}{2}, G_o = G - \frac{R + B}{2}, B_o = B - \frac{R +
G}{2}, Y_o = \frac{R + G}{2} - \abs{R - G} - B
\]
\subsection{DCD: Dominant color descriptor}
Convert to LUV color space or any other perceptually unifrom color space.
Perform K-mean clustering and return the percentage of pixels and variance of
each color centers.
\section{Shape descriptors}
\label{sec:shape_descriptors}
\subsection{Inner shape context}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth, height=0.1\textheight]{innerdistance.png}
\caption{Dashed lines denote shortest path withint the shape boundary}
\label{fig:inner_shapecontext}
\end{figure}
\textit{Inner-distance} is defined as the length of the shortest path between
landmark points within the shape silhouette. Inner distance is used instead of
Euclidean distance when building the shape context. This is an improvement over
the traditional shape context as it is able to describe more complicated shapes.
\subsection{Elliptic Fourier Feature of Closed Contour}
A chain-encoded closed contour is first obtained using OpenCV's
\textit{findContour}. Normalization of Fourier's coefficients using various
elliptical properties of the coefficients. This descriptors obtained are
invariant to \textit{rotation}, \textit{dilation} and \textit{translation} of
the contour.
\subsection{Moment-based descriptors}
The vision framework includes 2 common moment-based descriptors: a) \textit{Hu Moment}
and b) \textit{Zernike Moment}. According to the evaluation by
\outcite{sabhara2013comparative}, Zernike's moment is more robust and flexible
as one can varies the order of polynomial to describe more complex shape.
Furthermore, the Pseudo-Zernike's moment which more robust to noise is also part
of the supported shape descriptors.
In addition, simpler contour properties can also be used if the target of
interests consist of basic shapes that are largley different from each other.
These properties include:
\begin{enumerate}
\item \textbf{Eccentricity} \\
Fit a bounding box over the closed contour to obtain the lenght of major axis
and length of the minor axis.
\[
Eccentricity = \frac{L_{major axis}}{L_{minor axis}}
\]
\item \textbf{Circularity ratio} \\
Calculates the ratio between the area of original shape and area of its enclosing circle.
\[
Circularity = \frac{Area_{shape}}{Area_{circle}}
\]
\item \textbf{Rectangularity} \\
Similar to above, this calculates the ratio between area of the original shape
and area of its enclosing rectangle.
\[
Rectangularity = \frac{Area_{shape}}{Area_{rectangle}}
\]
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 8: Model Learning
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Model Learning}
The primary classifiers for object classification include: a) \textit{SVM}, b)
\textit{Random Forest}, and \textit{Gaussian Process}. These classifiers are
selected primarily because we have small amount of data as availability of
undewater data are quite limited and can be very expensive to collect. Neural
network and its more popular sibling: Deep Neural Network is largely ignored
because of a) scarcity of data and b) many parameters tuning are needed. In
addition, this vision framework hopes to achieve comparable accuracy in
underwater object tracking relying more simple features and less parameters
intensive tuning from human experts.
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth, height=0.1\textheight]{gp.png}
\caption{Gaussian process}
\label{fig:gp}
\end{figure}
\textit{Gaussian process (GP)} \outcite{rasmussen2006gaussian} is introduced
because of its unique ability to perform feature selection using a covariance
function that implements \textit{automatic relevance determination}. A GP model
also provides uncertainty scores of each classification and a prior knowledge
can be integrated easily into its prior function. One major downside of GP is
definitely its $O(n^3)$ complexity which makes it a poor choice for large amount
of data.
The following section will focus more on the effort to apply the
\textit{automatic machine learning} principle which aims to automate trivial
machine learning tasks such as \textit{feature selection}, \textit{model
selection} and \textit{hyperparameters optimization}. Most of these algorithms
are open-source and are readily available, this paper merely tries to integrate it
as part of the framework to remove the dependency on machine learning experts
for trivial tasks.
\section{Feature Selection}
Having multiple features is advantageous to allow for greater adaptation to
different challenging environments. For instance, detection of a textureless
object can be challenging using the popular HOG feature while a simple color
histogram can produce a better result. This paper would like to highlight that
choosing the right feature can improve accuracy and reduce needless
computational cost from using complex feature descriptors like SIFT. In
addition, choosing best features also reduce dimension of feature which is a
problem on small AUV equipped with less powerful computing unit.
Besides using the \textit{Automatic relevance determination} of a GP model, this
framework leverages on the widely used machine learning library,
\textit{Sklearn's feature selection module}. Below is the list of feature
selection functions used in the vision framework.
\begin{enumerate}
\item \textbf{Removing feature with low variance} \\
This approach removes features that are below certain variance threshold
labelling it as redundant.
\item \textbf{Univariate feature selection}
Univariate test such as F-test and chi-test are used to select the best
features before training the model.
\item \textbf{Tree-based}
Uses an ensemble model (forest of trees) to calculate feature importances
which is used to score input features.
\end{enumerate}
\section{Hyperparameters optimization: Implemetation}
Uses \href{https://github.com/automl/HPOlib}{HPOlib} which contains 3 libraries for hyperparameter optimizations:
\begin{enumerate}
\item Sequential model-based optimization \outcite{hutter2011sequential}
\item Spearmint Bayesian optimization codebase \outcite{snoek2012practical},
\outcite{swersky2013multi}, \outcite{snoek2014input},
\outcite{snoek2013bayesian}, \outcite{gelbart2014bayesian}
\item Hyperopt
\end{enumerate}
Ideally, there are very few hyperparameters optimization needed as the paper
actively tries to use non-parametric methods with the exception of SVM (choice
of kernel and misclassificatin penalty).
\section{Model Selection}
For model selection, again \textit{Sklearn's model selection and evaluation} is
used to select the best model for a particular tasks through cross-validation.
From our observation, the performance of each classifiers does not varies very
much. In addition, this paper also experimented with \textit{Auto sklearn}
\outcite{feurer2015efficient}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 9: Object Tracking
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Object Tracking}
This section will explore different tracking paradigm
\outcite{stalder2012paradigms} and analyze various surveys conducted to determine the best trackers
for underwater visual tracking. Because of the tracking strategy, only single
object tracking algorithms are evaluated. There are few reasons why the paper
proposed a single object tracking approach:
\begin{enumerate}
\item Simpler implementation
\item The AUV has limited number of manipulators which make it possible of
manipulating only a single obstacle at a time
\item Less computationally expensive
\end{enumerate}
\section{Benchmarks}
As for the benchmarks, the paper looks into the papers listed below:
\begin{enumerate}
\item Visual object tracking performance measures revisited \outcite{Cehovin2016a}
\item VOT 2016 Challenge Results \outcite{Kristan2016a}
\item VOT 2015 Challenge Results \outcite{kristan2015visual}
\item Is my new tracker really better than yours ? \outcite{Cehovin2014a}
\end{enumerate}
The top trackers almost always combine an adaptive tracking and a fixed-model
tracking approach. Online model update techniques such as \textit{Adaboost} and
\textit{Multiple Instance Learning} are capable of adapting to different
conditions as positive samples are sampled around vicinity of tracked object
while negative samples are extracted from background of the image. However,
adaptive model update comes at the cost of computational cycle and also more
complicated model.
\section{Tracking by detection: Implementation}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{tracker.png}
\caption{Tracking pipeline}
\label{fig:tracker}
\end{figure}
This paper utilizes a tracking by detection approach as detection is performed
on each frame and associated with previously tracked objects. A tracker is
\textbf{terminated} when an the tracker loses track of its target for at least
10 frames. This value is determined through empirical evaluation of applying the
tracker on existing image sequence. To handle \textbf{multiple instance} of the
object, the object with the shortest Euclidean distance will be selected. In
addition, association of object with previously tracked object is bounded on a
specific radius to reduce false positives with the assumption that the AUV is
perfectly stable and does not move randomly over a short period of time.
Prior knowledge such as geometric property of the target can be included through
a weighted summation of classification score and prior score. With more context,
a more accurate detection can be achieved.
\section{Model-free tracking: Implementation}
In addition to the main tracking strategy mentioned above, model-free tracking algorithms
based on correlation-filter are also included for: a) rapid data collection and
b) tracking generic object. These algorithms include:
\begin{enumerate}
\item High-speed kernelized correlation filter \outcite{henriques2015high}
\item Visual Object Tracking using Adaptive Correlation Filters \outcite{Bolme2010}
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Chapter 10: Experimental results
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Experimental results}
\section{Datasets}
The datasets are generated and categorized using the sequence annotator, AIBU
which is used by the Visual Object Tracking (VOT) committee. There are total of
6 datasets with different set of challenges. At the same time, 6 object classes
will be tested.
\subsection{Challenges}
Figure \ref{fig:dataset1} and \ref{fig:dataset2} are the datasets labelled with
bounding box ground truth used for evaluation of the proposed tracker.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{data1.png}
\caption{Dataset 1}
\label{fig:dataset1}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth, height=0.3\textheight]{data2.png}
\caption{Dataset 2}
\label{fig:dataset2}
\end{figure}
\subsection{Object classes}
The objects to be tracked composed of:
\begin{enumerate}
\item Red buoy
\item Green buoy
\item Yellow buoy
\item Set date cover
\item Red Coin
\item Bin cover
\end{enumerate}
\section{Results}
\subsection{Evaluation methodology}
Using Visual Object Tracking (VOT15) \outcite{kristan2015visual} as guideline,
following are the performance measures used:
\begin{enumerate}
\item \textbf{Accuracy, A} \\
Accuracy is measured the average overlap of predicted bounding box with the
ground truth bounding box. Accuracy for a sequence is obtained by averaging
per-frame accuracies.
\item \textbf{Robustness, R} \\
Robustness measures how many times the tracker loses the target (overlap is
zero). The tracker is reinitialized 10 frames after the failure. Again,
robustness of a sequence is calculated using the average failure rate.
\item \textbf{Frame per-second, FPS}
This is a naive measure of speed by calculating the average of FPS of the
tracker over different datasets.
\end{enumerate}
\subsection{Trackers}
The competing trackers can be categorized into 2 big categories: a) variations
of proposed tracker and b) open-source trackers. The baseline tracker is our
proposed tracker without any preprocessing, using only color thresholding along
with contour properties. Below is the list of trackers:
\begin{table}[H]
\centering
\begin{tabular}{|l|}
\hline
Trackers \\ \hline
Baseline \\ \hline
Baseline + preprocessing \\ \hline
Baseline + preprocessing + automl \\ \hline
MOSSE \\ \hline
KCF \\ \hline
EBT (Edge Box Tracker) \\ \hline
\end{tabular}
\caption{Competing trackers}
\label{table:competing_trackers}
\end{table}
It is to be made known that only minimal preprocessing such as smoothing and
denoising are performed when using open-source trackers. This is to ensure that
the inputs are not too perturbed with noise. This makes sure that the
state-of-the-art trackers are not at a big disadvantage compared to our proposed trackers.
\subsection{Raw results}
\begin{table}[H]
\centering
\begin{tabular}{|l|l|l|l|}
\hline
Trackers & Accuracy & Robustness & Speed \\ \hline
Baseline & 0.21 & 7.23 & 200 \\ \hline
Baseline + preprocessing & 0.34 & 6.11 & 50 \\ \hline
Baseline + preprocessing + automl & 0.53 & 2.53 & 50 \\ \hline
MOSSE & 0.30 & 8.10 & 100 \\ \hline
KCF & 0.35 & 4.91 & 70 \\ \hline
EBT (Edge Box Tracker) & 0.41 & 3.11 & 43 \\ \hline
\end{tabular}
\caption{Raw results across all datasets}
\label{table:raw_results}
\end{table}
\section{Discussion}
\subsection{Preprocessing}
Both correlation filter based trackers, KCF and MOSSE performed poorly for the
\textit{illumination-dataset} while our proposed tracker managed to consistently
track the object of interest. EBT on the other hand showed poor performance in
both \textit{overexposed} and \textit{low contrast} datasets as the edge
information is barely visible. For the \textit{size change} dataset, KCF and EBT in
particular shows the best performance. This is to be expected as these trackers
did show promosing result in VOT15. The baseline tracker without any
preprocessing performed miserably in almost all datasets. However, with added
preprocessing, the basline tracker is able to achieve decent accuracies for
datasets without any complex shapes.
\subsection{Automatic machine learning}
The result for performing feature selection showed promising result as it is
able to perform up to par with some of the state of the art trackers such as EBT
and KCF. However, it has to be mentioned that these trackers with preprocessing
are able to outperform the proposed tracker. This goes to show the importance of
preprocessing when performing object detection in underwater environment.
\subsection{Conclusion}
Looking at the Table \ref{table:raw_results}, one can conclude the importance of
preprocessing for underwater object tracking because the accuracies achieved by
state-of-the-art trackers do not justify their ranking in VOT15. Our proposed
tracker which combines both preprocessing and automatic machine learning
approach is able improve the baseline accuracies by leaps and bounds without
needing to really complex feature representations.
\bibliographystyle{socreport}
\bibliography{fyp}{}
%%% End document
\end{document}
|
{"hexsha": "faf2f2c39c490f3310a4efc2cad33eaee80ff943", "size": 59343, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/report/main.tex", "max_stars_repo_name": "tsoonjin/selam", "max_stars_repo_head_hexsha": "fbbb355490271bf09056e05b23245be1b75ae24d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-14T06:05:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-21T15:43:54.000Z", "max_issues_repo_path": "docs/report/main.tex", "max_issues_repo_name": "tsoonjin/selam", "max_issues_repo_head_hexsha": "fbbb355490271bf09056e05b23245be1b75ae24d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/report/main.tex", "max_forks_repo_name": "tsoonjin/selam", "max_forks_repo_head_hexsha": "fbbb355490271bf09056e05b23245be1b75ae24d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-08T12:59:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-08T12:59:35.000Z", "avg_line_length": 44.6859939759, "max_line_length": 113, "alphanum_fraction": 0.7729808065, "num_tokens": 13776}
|
# Different ways to simulate molecules
export
accelerations,
VelocityVerlet,
simulate!,
VelocityFreeVerlet
"""
accelerations(simulation, neighbours; parallel=true)
Calculate the accelerations of all atoms using the general and specific
interactions and Newton's second law.
"""
function accelerations(s::Simulation, neighbours; parallel::Bool=true)
n_atoms = length(s.coords)
if parallel && nthreads() > 1 && n_atoms >= 100
forces_threads = [zero(s.coords) for i in 1:nthreads()]
# Loop over interactions and calculate the acceleration due to each
for inter in values(s.general_inters)
if inter.nl_only
@threads for ni in 1:length(neighbours)
i, j = neighbours[ni]
force!(forces_threads[threadid()], inter, s, i, j)
end
else
@threads for i in 1:n_atoms
for j in 1:i
force!(forces_threads[threadid()], inter, s, i, j)
end
end
end
end
forces = sum(forces_threads)
else
forces = zero(s.coords)
for inter in values(s.general_inters)
if inter.nl_only
for ni in 1:length(neighbours)
i, j = neighbours[ni]
force!(forces, inter, s, i, j)
end
else
for i in 1:n_atoms
for j in 1:i
force!(forces, inter, s, i, j)
end
end
end
end
end
for inter_list in values(s.specific_inter_lists)
for inter in inter_list
force!(forces, inter, s)
end
end
for i in 1:n_atoms
forces[i] /= s.atoms[i].mass
end
return forces
end
"""
VelocityVerlet()
The velocity Verlet integrator.
"""
struct VelocityVerlet <: Simulator end
"""
simulate!(simulation; parallel=true)
simulate!(simulation, n_steps; parallel=true)
simulate!(simulation, simulator, n_steps; parallel=true)
Run a simulation according to the rules of the given simulator.
Custom simulators should implement this function.
"""
function simulate!(s::Simulation,
::VelocityVerlet,
n_steps::Integer;
parallel::Bool=true)
# See https://www.saylor.org/site/wp-content/uploads/2011/06/MA221-6.1.pdf for
# integration algorithm - used shorter second version
n_atoms = length(s.coords)
neighbours = find_neighbours(s, nothing, s.neighbour_finder, 0,
parallel=parallel)
accels_t = accelerations(s, neighbours, parallel=parallel)
accels_t_dt = zero(s.coords)
@showprogress for step_n in 1:n_steps
for logger in values(s.loggers)
log_property!(logger, s, step_n)
end
# Update coordinates
for i in 1:length(s.coords)
s.coords[i] += s.velocities[i] * s.timestep + accels_t[i] * (s.timestep ^ 2) / 2
s.coords[i] = adjust_bounds.(s.coords[i], s.box_size)
end
accels_t_dt = accelerations(s, neighbours, parallel=parallel)
# Update velocities
for i in 1:length(s.velocities)
s.velocities[i] += (accels_t[i] + accels_t_dt[i]) * s.timestep / 2
end
apply_thermostat!(s, s.thermostat)
neighbours = find_neighbours(s, neighbours, s.neighbour_finder, step_n,
parallel=parallel)
accels_t = accels_t_dt
s.n_steps_made[1] += 1
end
return s
end
"""
VelocityFreeVerlet()
The velocity-free Verlet integrator, also known as the Störmer method.
In this case the `velocities` given to the `Simulator` act as the previous step
coordinates for the first step.
"""
struct VelocityFreeVerlet <: Simulator end
function simulate!(s::Simulation,
::VelocityFreeVerlet,
n_steps::Integer;
parallel::Bool=true)
n_atoms = length(s.coords)
neighbours = find_neighbours(s, nothing, s.neighbour_finder, 0,
parallel=parallel)
coords_last = s.velocities
@showprogress for step_n in 1:n_steps
for logger in values(s.loggers)
log_property!(logger, s, step_n)
end
accels_t = accelerations(s, neighbours, parallel=parallel)
# Update coordinates
coords_copy = s.coords
for i in 1:length(s.coords)
s.coords[i] = 2 * s.coords[i] - coords_last[i] + accels_t[i] * s.timestep ^ 2
s.coords[i] = adjust_bounds.(s.coords[i], s.box_size)
end
coords_last = coords_copy
apply_thermostat!(s, s.thermostat)
neighbours = find_neighbours(s, neighbours, s.neighbour_finder, step_n,
parallel=parallel)
s.n_steps_made[1] += 1
end
return s
end
function simulate!(s::Simulation, n_steps::Integer; parallel::Bool=true)
simulate!(s, s.simulator, n_steps, parallel=parallel)
end
function simulate!(s::Simulation; parallel::Bool=true)
simulate!(s, s.n_steps - first(s.n_steps_made), parallel=parallel)
end
|
{"hexsha": "10b918081776a2e25506213cffb259330083ecc9", "size": 5462, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/simulators.jl", "max_stars_repo_name": "longemen3000/Molly.jl", "max_stars_repo_head_hexsha": "346bd5bd7bce3f7ff3169a01b414091cc7eb35a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/simulators.jl", "max_issues_repo_name": "longemen3000/Molly.jl", "max_issues_repo_head_hexsha": "346bd5bd7bce3f7ff3169a01b414091cc7eb35a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/simulators.jl", "max_forks_repo_name": "longemen3000/Molly.jl", "max_forks_repo_head_hexsha": "346bd5bd7bce3f7ff3169a01b414091cc7eb35a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3908045977, "max_line_length": 93, "alphanum_fraction": 0.5684730868, "num_tokens": 1236}
|
#!/usr/bin/env python3
import os
import time
import numpy as np
import cereal.messaging as messaging
from selfdrive.manager.process_config import managed_processes
N = int(os.getenv("N", "5"))
TIME = int(os.getenv("TIME", "30"))
if __name__ == "__main__":
sock = messaging.sub_sock('modelV2', conflate=False, timeout=1000)
execution_times = []
for _ in range(N):
os.environ['LOGPRINT'] = 'debug'
managed_processes['modeld'].start()
time.sleep(5)
t = []
start = time.monotonic()
while time.monotonic() - start < TIME:
msgs = messaging.drain_sock(sock, wait_for_one=True)
for m in msgs:
t.append(m.modelV2.modelExecutionTime)
execution_times.append(np.array(t[10:]) * 1000)
managed_processes['modeld'].stop()
print("\n\n")
print(f"ran modeld {N} times for {TIME}s each")
for n, t in enumerate(execution_times):
print(f"\tavg: {sum(t)/len(t):0.2f}ms, min: {min(t):0.2f}ms, max: {max(t):0.2f}ms")
print("\n\n")
|
{"hexsha": "2ea56d97ed187a2f8e0003689dea962b85897f1a", "size": 986, "ext": "py", "lang": "Python", "max_stars_repo_path": "selfdrive/modeld/test/timing/benchmark.py", "max_stars_repo_name": "TMORI135/openpilot", "max_stars_repo_head_hexsha": "bc986477eb34f554933caafeac71538c57fb6838", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2020-12-29T13:00:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-04T12:09:01.000Z", "max_issues_repo_path": "selfdrive/modeld/test/timing/benchmark.py", "max_issues_repo_name": "TMORI135/openpilot", "max_issues_repo_head_hexsha": "bc986477eb34f554933caafeac71538c57fb6838", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-04-01T00:28:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-22T15:37:07.000Z", "max_forks_repo_path": "selfdrive/modeld/test/timing/benchmark.py", "max_forks_repo_name": "TMORI135/openpilot", "max_forks_repo_head_hexsha": "bc986477eb34f554933caafeac71538c57fb6838", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-08-24T00:34:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-19T11:57:15.000Z", "avg_line_length": 25.9473684211, "max_line_length": 87, "alphanum_fraction": 0.6582150101, "include": true, "reason": "import numpy", "num_tokens": 288}
|
import torch
import torch.nn as nn
import numpy as np
class BBoxTransform(nn.Module):
def __init__(self, mean=None, std=None, gpu=False):
super(BBoxTransform, self).__init__()
if mean is None:
self.mean = torch.from_numpy(np.array([0, 0, 0, 0]).astype(np.float32))
else:
self.mean = mean
if std is None:
self.std = torch.from_numpy(np.array([0.1, 0.1, 0.2, 0.2]).astype(np.float32))
else:
self.std = std
if gpu and torch.cuda.is_available():
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def forward(self, boxes, deltas):
widths = boxes[:, :, 2] - boxes[:, :, 0]
heights = boxes[:, :, 3] - boxes[:, :, 1]
ctrx = boxes[:, :, 0] + 0.5 * widths
ctry = boxes[:, :, 1] + 0.5 * heights
dx = deltas[:, :, 0] * self.std[0] + self.mean[0]
dy = deltas[:, :, 1] * self.std[1] + self.mean[1]
dw = deltas[:, :, 2] * self.std[2] + self.mean[2]
dh = deltas[:, :, 3] * self.std[3] + self.mean[3]
pred_x = ctrx + dx * widths
pred_y = ctry + dy * heights
pred_w = torch.exp(dw) * widths
pred_h = torch.exp(dh) * heights
pred_boxes_x1 = pred_x - 0.5 * pred_w
pred_boxes_y1 = pred_y - 0.5 * pred_h
pred_boxes_x2 = pred_x + 0.5 * pred_w
pred_boxes_y2 = pred_y + 0.5 * pred_h
pred_boxes = torch.stack([
pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2
], dim=2)
return pred_boxes
class ClipBoxes(nn.Module):
def __init__(self):
super(ClipBoxes, self).__init__()
def forward(self, boxes, img):
batch_size, num_channels, height, width = img.shape
boxes[:, :, 0] = torch.clamp(boxes[:, :, 0], min=0)
boxes[:, :, 1] = torch.clamp(boxes[:, :, 1], min=0)
boxes[:, :, 2] = torch.clamp(boxes[:, :, 2], max=width)
boxes[:, :, 3] = torch.clamp(boxes[:, :, 3], max=height)
return boxes
class Anchors(nn.Module):
def __init__(self, pyramid_levels=None, strides=None, sizes=None, ratios=None, scales=None):
super(Anchors, self).__init__()
if pyramid_levels is None:
self.pyramid_levels = [3, 4, 5, 6, 7]
if strides is None:
self.strides = [2 ** x for x in self.pyramid_levels]
if sizes is None:
self.sizes = [2 ** (x + 2) for x in self.pyramid_levels]
if ratios is None:
self.ratios = np.array([0.5, 1, 2])
if scales is None:
self.scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
def forward(self, image):
image_shape = image.shape[2:]
image_shape = np.array(image_shape)
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]
all_anchors = np.zeros((0, 4)).astype(np.float32)
for idx, p in enumerate(self.pyramid_levels):
anchors = generate_anchors(base_size=self.sizes[idx], ratios=self.ratios, scales=self.scales)
shifted_anchors = shift(image_shapes[idx], self.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
all_anchors = np.expand_dims(all_anchors, axis=0)
anchors = torch.from_numpy(all_anchors.astype(np.float32))
if torch.cuda.is_available():
anchors = anchors.cuda()
return anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
if ratios is None:
ratios = np.array([0.5, 1, 2])
if scales is None:
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])
num_anchors = len(ratios) * len(scales)
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
areas = anchors[:, 2] * anchors[:, 3]
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def compute_shape(image_shape, pyramid_levels):
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def shift(shape, stride, anchors):
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def colors(label):
if isinstance(label, str):
label = int(label)
box_colors = [(39, 129, 113), (164, 80, 133), (83, 122, 114), (99, 81, 172), (95, 56, 104), (37, 84, 86), (14, 89, 122),
(80, 7, 65), (10, 102, 25), (90, 185, 109), (106, 110, 132), (169, 158, 85), (188, 185, 26), (103, 1, 17),
(82, 144, 81), (92, 7, 184), (49, 81, 155), (179, 177, 69), (93, 187, 158), (13, 39, 73), (12, 50, 60),
(16, 179, 33), (112, 69, 165), (15, 139, 63), (33, 191, 159), (182, 173, 32), (34, 113, 133), (90, 135, 34),
(53, 34, 86), (141, 35, 190), (6, 171, 8), (118, 76, 112), (89, 60, 55), (15, 54, 88), (112, 75, 181),
(42, 147, 38), (138, 52, 63), (128, 65, 149), (106, 103, 24), (168, 33, 45), (28, 136, 135), (86, 91, 108),
(52, 11, 76), (142, 6, 189), (57, 81, 168), (55, 19, 148), (182, 101, 89), (44, 65, 179), (1, 33, 26),
(122, 164, 26), (70, 63, 134), (137, 106, 82), (120, 118, 52), (129, 74, 42), (182, 147, 112), (22, 157, 50),
(56, 50, 20), (2, 22, 177), (156, 100, 106), (21, 35, 42), (13, 8, 121), (142, 92, 28), (45, 118, 33),
(105, 118, 30), (7, 185, 124), (46, 34, 146), (105, 184, 169), (22, 18, 5), (147, 71, 73), (181, 64, 91),
(31, 39, 184), (164, 179, 33), (96, 50, 18), (95, 15, 106), (113, 68, 54), (136, 116, 112), (119, 139, 130),
(31, 139, 34), (66, 6, 127), (62, 39, 2), (49, 99, 180), (49, 119, 155), (153, 50, 183), (125, 38, 3),
(129, 87, 143), (49, 87, 40), (128, 62, 120), (73, 85, 148), (28, 144, 118), (29, 9, 24), (175, 45, 108),
(81, 175, 64), (178, 19, 157), (74, 188, 190), (18, 114, 2), (62, 128, 96), (21, 3, 150), (0, 6, 95),
(2, 20, 184), (122, 37, 185)]
return box_colors[label]
|
{"hexsha": "47684238b5bbc036128164ba3eff6359bc6c8e57", "size": 6757, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvision/detection/efficientdet/lib/utils.py", "max_stars_repo_name": "indiradutta/PyVision", "max_stars_repo_head_hexsha": "cf74da32a3469ddcce9917ac1f2fcaaeefdeacdf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2020-05-03T07:03:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T15:29:22.000Z", "max_issues_repo_path": "pyvision/detection/efficientdet/lib/utils.py", "max_issues_repo_name": "indiradutta/PyVision", "max_issues_repo_head_hexsha": "cf74da32a3469ddcce9917ac1f2fcaaeefdeacdf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-05-25T14:23:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-04T10:38:02.000Z", "max_forks_repo_path": "pyvision/detection/efficientdet/lib/utils.py", "max_forks_repo_name": "indiradutta/PyVision", "max_forks_repo_head_hexsha": "cf74da32a3469ddcce9917ac1f2fcaaeefdeacdf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-05-24T22:26:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-03T18:30:51.000Z", "avg_line_length": 38.8333333333, "max_line_length": 124, "alphanum_fraction": 0.5311528785, "include": true, "reason": "import numpy", "num_tokens": 2428}
|
#File: fStd.jl
#Author:
#Date: 29-June-2020
#STANDARD DEVIATION
function fStd(x,flag,f)
#--------------------------------------------------------------------------
#x: returns vector
#flag: 0 = sample, 1 = population
#f: reporting frequency e.g. 12 (monthly)
#sd: standard deviation (sample or population)
#--------------------------------------------------------------------------
fStd = std(x,flag).*sqrt(f);
sd = fStd;
return sd
end
|
{"hexsha": "09029397031d0fa5f226ae961696dc0b84f4b7bf", "size": 462, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/optimisation/fStd.jl", "max_stars_repo_name": "tranquilhero/Sonic.jl", "max_stars_repo_head_hexsha": "ea3cd85956d90ee6a1bd18bbbf834688a6117d29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/optimisation/fStd.jl", "max_issues_repo_name": "tranquilhero/Sonic.jl", "max_issues_repo_head_hexsha": "ea3cd85956d90ee6a1bd18bbbf834688a6117d29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/optimisation/fStd.jl", "max_forks_repo_name": "tranquilhero/Sonic.jl", "max_forks_repo_head_hexsha": "ea3cd85956d90ee6a1bd18bbbf834688a6117d29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6666666667, "max_line_length": 75, "alphanum_fraction": 0.4372294372, "num_tokens": 111}
|
import numpy as np
import numexpr as ne
from .letter import letter
def __segment(points, n=100):
"""
Each letter is a represented by a bunch of points a,b,c,d...
There are straight segments between two adjacent points
We represent each such segment as a collection of n auxilliary points
"""
for (a, b) in zip(points[:-1], points[1:]):
yield np.linspace(a.real, b.real, n) + 1j * np.linspace(a.imag, b.imag, n)
def scribble(axes, text, f, title="Lydia & Thomas, August 17, Rhodes House", **kwargs):
axes.set_axis_off()
for n, l in enumerate(text):
# move a letter into the complex plane
points = letter(l) + n
# move the points to the unit-square
points = 2 * points / len(text) - 1
# each segment is a straight line of n points
for z in __segment(points, n=100):
t = ne.evaluate(f)
# plot the name in the complex plane
axes.plot(t.real, t.imag, **kwargs)
axes.set_title(title)
|
{"hexsha": "597ae5412692e1d6b1a6af0460ffc8c0511158c6", "size": 1015, "ext": "py", "lang": "Python", "max_stars_repo_path": "work/pyscribble/scribble.py", "max_stars_repo_name": "tschm/scribble", "max_stars_repo_head_hexsha": "31870e7b7d9bde3f706d1821f02fec21d4f093de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "work/pyscribble/scribble.py", "max_issues_repo_name": "tschm/scribble", "max_issues_repo_head_hexsha": "31870e7b7d9bde3f706d1821f02fec21d4f093de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "work/pyscribble/scribble.py", "max_forks_repo_name": "tschm/scribble", "max_forks_repo_head_hexsha": "31870e7b7d9bde3f706d1821f02fec21d4f093de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8529411765, "max_line_length": 87, "alphanum_fraction": 0.6216748768, "include": true, "reason": "import numpy,import numexpr", "num_tokens": 262}
|
__precompile__(true)
module VertexModels
export RandGenerator,
Orientation,
maxorientation,
twentyvertex,
picture,
height,
issink,
issource,
pushdown!,
pushup!
mutable struct RandGenerator
D::Dict{Int64,Tuple{Int64,Int64,Bool}}
m::Int64
n::Int64
end
function twentyvertex(m,n,maxiter=2^20)
U = RandGenerator(Dict(),m,n)
T = 1
upper0, lower0 = maxorientation(m,n), minorientation(m,n)
upper,lower = upper0, lower0
while upper != lower
upper,lower = copy(upper0), copy(lower0)
for t = -T:-1
pushdown!(upper,sample(U,t)...)
pushdown!(lower,sample(U,t)...)
end
T = 2T
if T >= maxiter
error("Did not converge")
end
end
return upper
end
function sample(R::RandGenerator,t::Integer)
if !(t in keys(R.D))
u = (rand(2:R.m-1),rand(3:R.n-2),rand(Bool))
R.D[t] = u
return u
else
return R.D[t]
end
end
mutable struct Orientation
D::Dict{Tuple{Tuple{Int64,Int64},Tuple{Int64,Int64}},Int64}
m::Int64
n::Int64
end
import Base: size, keys, getindex, setindex!, copy, ==, !=, hash
hash(o::Orientation) = hash((o.D,o.m,o.n))
==(o::Orientation,p::Orientation) = o.D == p.D
!=(o::Orientation,p::Orientation) = !(o == p)
size(o::Orientation) = (o.m,o.n)
keys(o::Orientation) = keys(o.D)
getindex(o::Orientation,I::Tuple{Tuple{Int64,Int64},Tuple{Int64,Int64}}) = o.D[I]
setindex!(o::Orientation, i::Int64, k::Tuple{Tuple{Int64,Int64},Tuple{Int64,Int64}}) = setindex!(o.D,i,k)
copy(o::Orientation) = Orientation(copy(o.D),o.m,o.n)
function sinks(h::Orientation)
m,n = size(h)
return [(i,j) for i=1:m,j=1:n][[issink(h,i,j) for i=1:m,j=1:n]]
end
function sources(h::Orientation)
m,n = size(h)
return [(i,j) for i=1:m,j=1:n][[issource(h,i,j) for i=1:m,j=1:n]]
end
function position(i::Integer,j::Integer,m::Integer,n::Integer)
h = sqrt(3)/3
if iseven(m-i) && isodd(j)
return [j,(m-i)*3h]
elseif iseven(m-i) && iseven(j)
return [j,(m-i)*3h-h]
elseif isodd(m-i) && isodd(j)
return [j,2h+(m-i-1)*3h]
else
return [j,2h+(m-i-1)*3h+h]
end
end
function neighbors(i::Integer,j::Integer,m::Integer,n::Integer)
nbs = Array{Int64,1}[]
t = iseven(j) && isodd(m-i) || isodd(j) && iseven(m-i) ? -1 : 1
for (a,b) in [(0,1),(0,-1),(t,0)]
if 1 ≤ i+a ≤ m && 1 ≤ j+b ≤ n
push!(nbs,[i+a,j+b])
end
end
return nbs
end
"""
function picture(filename::AbstractString,h::Orientation;kwargs...)
m,n = size(h)
return Graphics2D.showgraphics(filename,
[h[e]*Graphics2D.Arrow([position(e[1]...,m,n)'; position(e[2]...,m,n)'];
arrowsize=0.2,arrowloc=0.6) for e in keys(h)])
end
function picture(h::Orientation;show=true,kwargs...)
m,n = size(h)
if show
return Graphics2D.showgraphics(
[h[e]*Graphics2D.Arrow([position(e[1]...,m,n)';
position(e[2]...,m,n)'];
arrowsize=0.2,arrowloc=0.6,kwargs...) for e in keys(h)])
else
return [h[e]*Graphics2D.Arrow(
[position(e[1]...,m,n)'; position(e[2]...,m,n)'];
arrowsize=0.2,arrowloc=0.6,kwargs...) for e in keys(h)]
end
end
"""
function issource(O::Orientation,i::Integer,j::Integer)
m,n = size(O)
for (k,l) in neighbors(i,j,m,n)
if ((i,j),(k,l)) in keys(O)
if O[((i,j),(k,l))] == -1
return false
end
elseif ((k,l),(i,j)) in keys(O)
if O[((k,l),(i,j))] == 1
return false
end
end
end
return true
end
function issink(O::Orientation,i::Integer,j::Integer)
m,n = size(O)
for (k,l) in neighbors(i,j,m,n)
if ((i,j),(k,l)) in keys(O)
if O[((i,j),(k,l))] == 1
return false
end
elseif ((k,l),(i,j)) in keys(O)
if O[((k,l),(i,j))] == -1
return false
end
end
end
return true
end
function height(O::Orientation)
m,n = size(O)
D = Dict{Tuple{Int64,Int64},Int64}()
D[(1,1)] = 0
while length(D) < m*n
for t in keys(O.D)
if t[1] in keys(D)
if t[2] in keys(D)
if D[t[2]] - D[t[1]] != O.D[t]
error("height function inconsistent")
end
end
D[t[2]] = D[t[1]] + O.D[t]
elseif t[2] in keys(D)
if t[1] in keys(D)
if D[t[1]] - D[t[2]] != -O.D[t]
error("height function inconsistent")
end
end
D[t[1]] = D[t[2]] - O.D[t]
end
end
end
return [D[i,j] for i=1:m,j=1:n]
end
"""
function picture(D::Dict{Tuple{Int64,Int64},Int64})
m = maximum([m for (m,n) in keys(D)])
n = maximum([n for (m,n) in keys(D)])
grlist = Graphics2D.GraphicElement[]
for k in keys(D)
push!(grlist,Graphics2D.GraphicText(position(k...,m,n),string(D[k]);textsize=1.0))
end
return grlist
end
"""
function pushup!(h::Orientation,i::Integer,j::Integer)::Bool
pushdown!(h,i,j,true)
end
function pushdown!(h::Orientation,i::Integer,j::Integer,up=false)::Bool
m,n = size(h)
pushed = false
if up ? issource(h,i,j) : issink(h,i,j)
pushed = true
for v in neighbors(i,j,m,n)
v = tuple(v...)
if ((i,j),v) in keys(h)
h[((i,j),v)] *= -1
else
h[(v,(i,j))] *= -1
end
end
end
return pushed
end
function minorientation(m::Integer,n::Integer)::Orientation
return maxorientation(m,n,down=true)
end
function maxorientation(m::Integer,n::Integer;down=false)::Orientation
topedges = [((1,j),(1,j+1)) for j=1:n-1]
topedges = [isodd(i) ? (e[2],e[1]) : e for (i,e) in enumerate(topedges)]
bottomedges = [((m,j),(m,j+1)) for j=1:n-1][end:-1:1]
bottomedges = [isodd(i) ? (e[2],e[1]) : e for (i,e) in enumerate(bottomedges)]
leftedges = vcat([[((i,1),(i+1,1)), ((i+1,1),(i+1,2)),
((i+1,2),(i+2,2)), ((i+2,2),(i+2,1))] for i=1:2:m-1]...)[2m-3:-1:1]
leftedges = [isodd(i) ? (e[2],e[1]) : e for (i,e) in enumerate(leftedges)]
rightedges = vcat([[((i,n),(i+1,n)), ((i+1,n),(i+1,n-1)),
((i+1,n-1),(i+2,n-1)), ((i+2,n-1),(i+2,n))] for i=1:2:m-1]...)[1:2m-3]
rightedges = [isodd(i) ? (e[2],e[1]) : e for (i,e) in enumerate(rightedges)]
boundaryedges = [topedges; rightedges; bottomedges; leftedges]
interioredges = vcat([isodd(i+j) ? ((i,j),(i,j+1)) : ((i,j+1),(i,j)) for j=2:n-2,i=2:m-1]...)
interioredges = [interioredges;
vcat([((i+1,j),(i,j)) for i=1:2:m-1,j=3:2:n-2]...);
vcat([((i+1,j),(i,j)) for i=2:2:m-1,j=4:2:n-2]...);
];
alledges = [boundaryedges; interioredges];
O = Orientation(Dict(e=>1 for e in alledges),m,n)
for i=1:10*(m+n)
for j=2:m-1
for k=3:n-2
if down
pushdown!(O,j,k)
else
pushup!(O,j,k)
end
end
end
end
return O
end
end # module
|
{"hexsha": "2fcb8b9c1cf6da7ed717759c5c012be6057742af", "size": 7255, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/VertexModels.jl", "max_stars_repo_name": "dgoekmen/VertexModels.jl", "max_stars_repo_head_hexsha": "d94c838fccd374e0b0d693e38b72a9a21c235d11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/VertexModels.jl", "max_issues_repo_name": "dgoekmen/VertexModels.jl", "max_issues_repo_head_hexsha": "d94c838fccd374e0b0d693e38b72a9a21c235d11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/VertexModels.jl", "max_forks_repo_name": "dgoekmen/VertexModels.jl", "max_forks_repo_head_hexsha": "d94c838fccd374e0b0d693e38b72a9a21c235d11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9038461538, "max_line_length": 105, "alphanum_fraction": 0.5141281875, "num_tokens": 2440}
|
```python
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
```
```python
solve(x+3-4,x)
```
[1]
La orden Matrix() es una función de sympy para crear matrices. Donde () es el argumento de la función y para crear las matrices tenemos que usar corchetes cuadrados.
```python
A = Matrix([[3,8],[7,-2]])
```
```python
A
```
Matrix([
[3, 8],
[7, -2]])
```python
A.transpose()
```
Matrix([
[3, 7],
[8, -2]])
```python
A.T
```
Matrix([
[3, 7],
[8, -2]])
```python
A.inv()
```
Matrix([
[1/31, 4/31],
[7/62, -3/62]])
```python
X = Matrix([[1,0],[1,1],[1,2],[1,3]])
```
```python
X
```
Matrix([
[1, 0],
[1, 1],
[1, 2],
[1, 3]])
```python
X1 = X.T
```
```python
X1
```
Matrix([
[1, 1, 1, 1],
[0, 1, 2, 3]])
```python
X2 = X1*X
```
```python
X2
```
Matrix([
[4, 6],
[6, 14]])
```python
X3 = X2.inv()
```
```python
X3
```
Matrix([
[ 7/10, -3/10],
[-3/10, 1/5]])
```python
y = Matrix([[1],[1],[2],[2]])
```
```python
y
```
Matrix([
[1],
[1],
[2],
[2]])
```python
X4 = X1*y
```
```python
X4
```
Matrix([
[ 6],
[11]])
```python
beta = X3*X4
```
```python
beta
```
Matrix([
[9/10],
[ 2/5]])
```python
```
|
{"hexsha": "24004a4513600760584ce847d73f4d2343e145a0", "size": 6408, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "matrices.ipynb", "max_stars_repo_name": "CogniMath/regression_matrices", "max_stars_repo_head_hexsha": "1f8a384807dfc65f6e61462927f41cbcb500596c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "matrices.ipynb", "max_issues_repo_name": "CogniMath/regression_matrices", "max_issues_repo_head_hexsha": "1f8a384807dfc65f6e61462927f41cbcb500596c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matrices.ipynb", "max_forks_repo_name": "CogniMath/regression_matrices", "max_forks_repo_head_hexsha": "1f8a384807dfc65f6e61462927f41cbcb500596c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.3887468031, "max_line_length": 171, "alphanum_fraction": 0.4132334582, "converted": true, "num_tokens": 569}
|
(* *********************************************************************)
(* *)
(* The Compcert verified compiler *)
(* *)
(* Xavier Leroy, INRIA Paris-Rocquencourt *)
(* *)
(* Copyright Institut National de Recherche en Informatique et en *)
(* Automatique. All rights reserved. This file is distributed *)
(* under the terms of the GNU Lesser General Public License as *)
(* published by the Free Software Foundation, either version 2.1 of *)
(* the License, or (at your option) any later version. *)
(* This file is also distributed under the terms of the *)
(* INRIA Non-Commercial License Agreement. *)
(* *)
(* *********************************************************************)
(** This file collects a number of definitions and theorems that are
used throughout the development. It complements the Coq standard
library. *)
Require Export String.
Require Export ZArith.
Require Export Znumtheory.
Require Export List.
Require Export Bool.
Require Export Lia.
Global Set Asymmetric Patterns.
(** * Useful tactics *)
Ltac inv H := inversion H; clear H; subst.
Ltac predSpec pred predspec x y :=
generalize (predspec x y); case (pred x y); intro.
Ltac caseEq name :=
generalize (eq_refl name); pattern name at -1 in |- *; case name.
Ltac destructEq name :=
destruct name eqn:?.
Ltac decEq :=
match goal with
| [ |- _ = _ ] => f_equal
| [ |- (?X ?A <> ?X ?B) ] =>
cut (A <> B); [intro; congruence | try discriminate]
end.
Ltac byContradiction := exfalso.
Lemma modusponens: forall (P Q: Prop), P -> (P -> Q) -> Q.
Proof. auto. Qed.
Ltac exploit x :=
refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _ _) _)
|| refine (modusponens _ _ (x _ _ _) _)
|| refine (modusponens _ _ (x _ _) _)
|| refine (modusponens _ _ (x _) _).
(** * Definitions and theorems over the type [positive] *)
Definition peq: forall (x y: positive), {x = y} + {x <> y} := Pos.eq_dec.
Global Opaque peq.
Lemma peq_true:
forall (A: Type) (x: positive) (a b: A), (if peq x x then a else b) = a.
Proof.
intros. case (peq x x); intros.
auto.
elim n; auto.
Qed.
Lemma peq_false:
forall (A: Type) (x y: positive) (a b: A), x <> y -> (if peq x y then a else b) = b.
Proof.
intros. case (peq x y); intros.
elim H; auto.
auto.
Qed.
Definition Plt: positive -> positive -> Prop := Pos.lt.
Lemma Plt_ne:
forall (x y: positive), Plt x y -> x <> y.
Proof.
unfold Plt; intros. red; intro. subst y. eelim Pos.lt_irrefl; eauto.
Qed.
Global Hint Resolve Plt_ne: coqlib.
Lemma Plt_trans:
forall (x y z: positive), Plt x y -> Plt y z -> Plt x z.
Proof (Pos.lt_trans).
Lemma Plt_succ:
forall (x: positive), Plt x (Pos.succ x).
Proof.
unfold Plt; intros. apply Pos.lt_succ_r. apply Pos.le_refl.
Qed.
Global Hint Resolve Plt_succ: coqlib.
Lemma Plt_trans_succ:
forall (x y: positive), Plt x y -> Plt x (Pos.succ y).
Proof.
intros. apply Plt_trans with y. assumption. apply Plt_succ.
Qed.
Global Hint Resolve Plt_succ: coqlib.
Lemma Plt_succ_inv:
forall (x y: positive), Plt x (Pos.succ y) -> Plt x y \/ x = y.
Proof.
unfold Plt; intros. rewrite Pos.lt_succ_r in H.
apply Pos.le_lteq; auto.
Qed.
Definition plt (x y: positive) : {Plt x y} + {~ Plt x y}.
Proof.
unfold Plt, Pos.lt; intros. destruct (Pos.compare x y).
- right; congruence.
- left; auto.
- right; congruence.
Defined.
Global Opaque plt.
Definition Ple: positive -> positive -> Prop := Pos.le.
Lemma Ple_refl: forall (p: positive), Ple p p.
Proof (Pos.le_refl).
Lemma Ple_trans: forall (p q r: positive), Ple p q -> Ple q r -> Ple p r.
Proof (Pos.le_trans).
Lemma Plt_Ple: forall (p q: positive), Plt p q -> Ple p q.
Proof (Pos.lt_le_incl).
Lemma Ple_succ: forall (p: positive), Ple p (Pos.succ p).
Proof.
intros. apply Plt_Ple. apply Plt_succ.
Qed.
Lemma Plt_Ple_trans:
forall (p q r: positive), Plt p q -> Ple q r -> Plt p r.
Proof (Pos.lt_le_trans).
Lemma Plt_strict: forall p, ~ Plt p p.
Proof (Pos.lt_irrefl).
Global Hint Resolve Ple_refl Plt_Ple Ple_succ Plt_strict: coqlib.
Ltac extlia := unfold Plt, Ple in *; lia.
(** Peano recursion over positive numbers. *)
Section POSITIVE_ITERATION.
Lemma Plt_wf: well_founded Plt.
Proof.
apply well_founded_lt_compat with Pos.to_nat.
intros. apply nat_of_P_lt_Lt_compare_morphism. exact H.
Qed.
Variable A: Type.
Variable v1: A.
Variable f: positive -> A -> A.
Lemma Ppred_Plt:
forall x, x <> xH -> Plt (Pos.pred x) x.
Proof.
intros. elim (Pos.succ_pred_or x); intro. contradiction.
set (y := Pos.pred x) in *. rewrite <- H0. apply Plt_succ.
Qed.
Let iter (x: positive) (P: forall y, Plt y x -> A) : A :=
match peq x xH with
| left EQ => v1
| right NOTEQ => f (Pos.pred x) (P (Pos.pred x) (Ppred_Plt x NOTEQ))
end.
Definition positive_rec : positive -> A :=
Fix Plt_wf (fun _ => A) iter.
Lemma unroll_positive_rec:
forall x,
positive_rec x = iter x (fun y _ => positive_rec y).
Proof.
unfold positive_rec. apply (Fix_eq Plt_wf (fun _ => A) iter).
intros. unfold iter. case (peq x 1); intro. auto. decEq. apply H.
Qed.
Lemma positive_rec_base:
positive_rec 1%positive = v1.
Proof.
rewrite unroll_positive_rec. unfold iter. case (peq 1 1); intro.
auto. elim n; auto.
Qed.
Lemma positive_rec_succ:
forall x, positive_rec (Pos.succ x) = f x (positive_rec x).
Proof.
intro. rewrite unroll_positive_rec. unfold iter.
case (peq (Pos.succ x) 1); intro.
destruct x; simpl in e; discriminate.
rewrite Pos.pred_succ. auto.
Qed.
Lemma positive_Peano_ind:
forall (P: positive -> Prop),
P xH ->
(forall x, P x -> P (Pos.succ x)) ->
forall x, P x.
Proof.
intros.
apply (well_founded_ind Plt_wf P).
intros.
case (peq x0 xH); intro.
subst x0; auto.
elim (Pos.succ_pred_or x0); intro. contradiction. rewrite <- H2.
apply H0. apply H1. apply Ppred_Plt. auto.
Qed.
End POSITIVE_ITERATION.
(** * Definitions and theorems over the type [Z] *)
Definition zeq: forall (x y: Z), {x = y} + {x <> y} := Z.eq_dec.
Lemma zeq_true:
forall (A: Type) (x: Z) (a b: A), (if zeq x x then a else b) = a.
Proof.
intros. case (zeq x x); intros.
auto.
elim n; auto.
Qed.
Lemma zeq_false:
forall (A: Type) (x y: Z) (a b: A), x <> y -> (if zeq x y then a else b) = b.
Proof.
intros. case (zeq x y); intros.
elim H; auto.
auto.
Qed.
Open Scope Z_scope.
Definition zlt: forall (x y: Z), {x < y} + {x >= y} := Z_lt_dec.
Lemma zlt_true:
forall (A: Type) (x y: Z) (a b: A),
x < y -> (if zlt x y then a else b) = a.
Proof.
intros. case (zlt x y); intros.
auto.
extlia.
Qed.
Lemma zlt_false:
forall (A: Type) (x y: Z) (a b: A),
x >= y -> (if zlt x y then a else b) = b.
Proof.
intros. case (zlt x y); intros.
extlia.
auto.
Qed.
Definition zle: forall (x y: Z), {x <= y} + {x > y} := Z_le_gt_dec.
Lemma zle_true:
forall (A: Type) (x y: Z) (a b: A),
x <= y -> (if zle x y then a else b) = a.
Proof.
intros. case (zle x y); intros.
auto.
extlia.
Qed.
Lemma zle_false:
forall (A: Type) (x y: Z) (a b: A),
x > y -> (if zle x y then a else b) = b.
Proof.
intros. case (zle x y); intros.
extlia.
auto.
Qed.
(** Properties of powers of two. *)
Lemma two_power_nat_O : two_power_nat O = 1.
Proof. reflexivity. Qed.
Lemma two_power_nat_pos : forall n : nat, two_power_nat n > 0.
Proof.
induction n. rewrite two_power_nat_O. lia.
rewrite two_power_nat_S. lia.
Qed.
Lemma two_power_nat_two_p:
forall x, two_power_nat x = two_p (Z.of_nat x).
Proof.
induction x. auto.
rewrite two_power_nat_S. rewrite Nat2Z.inj_succ. rewrite two_p_S. lia. lia.
Qed.
Lemma two_p_monotone:
forall x y, 0 <= x <= y -> two_p x <= two_p y.
Proof.
intros.
replace (two_p x) with (two_p x * 1) by lia.
replace y with (x + (y - x)) by lia.
rewrite two_p_is_exp; try lia.
apply Zmult_le_compat_l.
assert (two_p (y - x) > 0). apply two_p_gt_ZERO. lia. lia.
assert (two_p x > 0). apply two_p_gt_ZERO. lia. lia.
Qed.
Lemma two_p_monotone_strict:
forall x y, 0 <= x < y -> two_p x < two_p y.
Proof.
intros. assert (two_p x <= two_p (y - 1)). apply two_p_monotone; lia.
assert (two_p (y - 1) > 0). apply two_p_gt_ZERO. lia.
replace y with (Z.succ (y - 1)) by lia. rewrite two_p_S. lia. lia.
Qed.
Lemma two_p_strict:
forall x, x >= 0 -> x < two_p x.
Proof.
intros x0 GT. pattern x0. apply natlike_ind.
simpl. lia.
intros. rewrite two_p_S; auto. generalize (two_p_gt_ZERO x H). lia.
lia.
Qed.
Lemma two_p_strict_2:
forall x, x >= 0 -> 2 * x - 1 < two_p x.
Proof.
intros. assert (x = 0 \/ x - 1 >= 0) by lia. destruct H0.
subst. vm_compute. auto.
replace (two_p x) with (2 * two_p (x - 1)).
generalize (two_p_strict _ H0). lia.
rewrite <- two_p_S. decEq. lia. lia.
Qed.
(** Properties of [Zmin] and [Zmax] *)
Lemma Zmin_spec:
forall x y, Z.min x y = if zlt x y then x else y.
Proof.
intros. case (zlt x y); unfold Z.lt, Z.ge; intro z.
unfold Z.min. rewrite z. auto.
unfold Z.min. caseEq (x ?= y); intro.
apply Z.compare_eq. auto.
contradiction.
reflexivity.
Qed.
Lemma Zmax_spec:
forall x y, Z.max x y = if zlt y x then x else y.
Proof.
intros. case (zlt y x); unfold Z.lt, Z.ge; intro z.
unfold Z.max. rewrite <- (Zcompare_antisym y x).
rewrite z. simpl. auto.
unfold Z.max. rewrite <- (Zcompare_antisym y x).
caseEq (y ?= x); intro; simpl.
symmetry. apply Z.compare_eq. auto.
contradiction. reflexivity.
Qed.
Lemma Zmax_bound_l:
forall x y z, x <= y -> x <= Z.max y z.
Proof.
intros. generalize (Z.le_max_l y z). lia.
Qed.
Lemma Zmax_bound_r:
forall x y z, x <= z -> x <= Z.max y z.
Proof.
intros. generalize (Z.le_max_r y z). lia.
Qed.
(** Properties of Euclidean division and modulus. *)
Lemma Zmod_unique:
forall x y a b,
x = a * y + b -> 0 <= b < y -> x mod y = b.
Proof.
intros. subst x. rewrite Z.add_comm.
rewrite Z_mod_plus. apply Z.mod_small. auto. lia.
Qed.
Lemma Zdiv_unique:
forall x y a b,
x = a * y + b -> 0 <= b < y -> x / y = a.
Proof.
intros. subst x. rewrite Z.add_comm.
rewrite Z_div_plus. rewrite (Zdiv_small b y H0). lia. lia.
Qed.
Lemma Zdiv_Zdiv:
forall a b c,
b > 0 -> c > 0 -> (a / b) / c = a / (b * c).
Proof.
intros. apply Z.div_div; lia.
Qed.
Lemma Zdiv_interval_1:
forall lo hi a b,
lo <= 0 -> hi > 0 -> b > 0 ->
lo * b <= a < hi * b ->
lo <= a/b < hi.
Proof.
intros.
generalize (Z_div_mod_eq a b H1). generalize (Z_mod_lt a b H1). intros.
set (q := a/b) in *. set (r := a mod b) in *.
split.
assert (lo < (q + 1)).
apply Zmult_lt_reg_r with b. lia.
apply Z.le_lt_trans with a. lia.
replace ((q + 1) * b) with (b * q + b) by ring.
lia.
lia.
apply Zmult_lt_reg_r with b. lia.
replace (q * b) with (b * q) by ring.
lia.
Qed.
Lemma Zdiv_interval_2:
forall lo hi a b,
lo <= a <= hi -> lo <= 0 -> hi >= 0 -> b > 0 ->
lo <= a/b <= hi.
Proof.
intros.
assert (lo <= a / b < hi+1).
apply Zdiv_interval_1. lia. lia. auto.
assert (lo * b <= lo * 1) by (apply Z.mul_le_mono_nonpos_l; lia).
replace (lo * 1) with lo in H3 by ring.
assert ((hi + 1) * 1 <= (hi + 1) * b) by (apply Z.mul_le_mono_nonneg_l; lia).
replace ((hi + 1) * 1) with (hi + 1) in H4 by ring.
lia.
lia.
Qed.
Lemma Zmod_recombine:
forall x a b,
a > 0 -> b > 0 ->
x mod (a * b) = ((x/b) mod a) * b + (x mod b).
Proof.
intros. rewrite (Z.mul_comm a b). rewrite Z.rem_mul_r by lia. ring.
Qed.
(** Properties of divisibility. *)
Lemma Zdivide_interval:
forall a b c,
0 < c -> 0 <= a < b -> (c | a) -> (c | b) -> 0 <= a <= b - c.
Proof.
intros. destruct H1 as [x EQ1]. destruct H2 as [y EQ2]. subst. destruct H0.
split. lia. exploit Zmult_lt_reg_r; eauto. intros.
replace (y * c - c) with ((y - 1) * c) by ring.
apply Zmult_le_compat_r; lia.
Qed.
(** Conversion from [Z] to [nat]. *)
Lemma Z_to_nat_neg:
forall n, n <= 0 -> Z.to_nat n = O.
Proof.
destruct n; unfold Z.le; simpl; auto. congruence.
Qed.
Lemma Z_to_nat_max:
forall z, Z.of_nat (Z.to_nat z) = Z.max z 0.
Proof.
intros. destruct (zle 0 z).
- rewrite Z2Nat.id by auto. extlia.
- rewrite Z_to_nat_neg by lia. extlia.
Qed.
(** Alignment: [align n amount] returns the smallest multiple of [amount]
greater than or equal to [n]. *)
Definition align (n: Z) (amount: Z) :=
((n + amount - 1) / amount) * amount.
Lemma align_le: forall x y, y > 0 -> x <= align x y.
Proof.
intros. unfold align.
generalize (Z_div_mod_eq (x + y - 1) y H). intro.
replace ((x + y - 1) / y * y)
with ((x + y - 1) - (x + y - 1) mod y).
generalize (Z_mod_lt (x + y - 1) y H). lia.
rewrite Z.mul_comm. lia.
Qed.
Lemma align_divides: forall x y, y > 0 -> (y | align x y).
Proof.
intros. unfold align. apply Z.divide_factor_r.
Qed.
(** * Definitions and theorems on the data types [option], [sum] and [list] *)
Set Implicit Arguments.
(** Comparing option types. *)
Definition option_eq (A: Type) (eqA: forall (x y: A), {x=y} + {x<>y}):
forall (x y: option A), {x=y} + {x<>y}.
Proof. decide equality. Defined.
Global Opaque option_eq.
(** Lifting a relation to an option type. *)
Inductive option_rel (A B: Type) (R: A -> B -> Prop) : option A -> option B -> Prop :=
| option_rel_none: option_rel R None None
| option_rel_some: forall x y, R x y -> option_rel R (Some x) (Some y).
(** Mapping a function over an option type. *)
Definition option_map (A B: Type) (f: A -> B) (x: option A) : option B :=
match x with
| None => None
| Some y => Some (f y)
end.
(** Mapping a function over a sum type. *)
Definition sum_left_map (A B C: Type) (f: A -> B) (x: A + C) : B + C :=
match x with
| inl y => inl C (f y)
| inr z => inr B z
end.
(** Properties of [List.nth] (n-th element of a list). *)
Global Hint Resolve in_eq in_cons: coqlib.
Lemma nth_error_in:
forall (A: Type) (n: nat) (l: list A) (x: A),
List.nth_error l n = Some x -> In x l.
Proof.
induction n; simpl.
destruct l; intros.
discriminate.
injection H; intro; subst a. apply in_eq.
destruct l; intros.
discriminate.
apply in_cons. auto.
Qed.
Global Hint Resolve nth_error_in: coqlib.
Lemma nth_error_nil:
forall (A: Type) (idx: nat), nth_error (@nil A) idx = None.
Proof.
induction idx; simpl; intros; reflexivity.
Qed.
Global Hint Resolve nth_error_nil: coqlib.
(** Compute the length of a list, with result in [Z]. *)
Fixpoint list_length_z_aux (A: Type) (l: list A) (acc: Z) {struct l}: Z :=
match l with
| nil => acc
| hd :: tl => list_length_z_aux tl (Z.succ acc)
end.
Remark list_length_z_aux_shift:
forall (A: Type) (l: list A) n m,
list_length_z_aux l n = list_length_z_aux l m + (n - m).
Proof.
induction l; intros; simpl.
lia.
replace (n - m) with (Z.succ n - Z.succ m) by lia. auto.
Qed.
Definition list_length_z (A: Type) (l: list A) : Z :=
list_length_z_aux l 0.
Lemma list_length_z_cons:
forall (A: Type) (hd: A) (tl: list A),
list_length_z (hd :: tl) = list_length_z tl + 1.
Proof.
intros. unfold list_length_z. simpl.
rewrite (list_length_z_aux_shift tl 1 0). lia.
Qed.
Lemma list_length_z_pos:
forall (A: Type) (l: list A),
list_length_z l >= 0.
Proof.
induction l; simpl. unfold list_length_z; simpl. lia.
rewrite list_length_z_cons. lia.
Qed.
Lemma list_length_z_map:
forall (A B: Type) (f: A -> B) (l: list A),
list_length_z (map f l) = list_length_z l.
Proof.
induction l. reflexivity. simpl. repeat rewrite list_length_z_cons. congruence.
Qed.
(** Extract the n-th element of a list, as [List.nth_error] does,
but the index [n] is of type [Z]. *)
Fixpoint list_nth_z (A: Type) (l: list A) (n: Z) {struct l}: option A :=
match l with
| nil => None
| hd :: tl => if zeq n 0 then Some hd else list_nth_z tl (Z.pred n)
end.
Lemma list_nth_z_in:
forall (A: Type) (l: list A) n x,
list_nth_z l n = Some x -> In x l.
Proof.
induction l; simpl; intros.
congruence.
destruct (zeq n 0). left; congruence. right; eauto.
Qed.
Lemma list_nth_z_map:
forall (A B: Type) (f: A -> B) (l: list A) n,
list_nth_z (List.map f l) n = option_map f (list_nth_z l n).
Proof.
induction l; simpl; intros.
auto.
destruct (zeq n 0). auto. eauto.
Qed.
Lemma list_nth_z_range:
forall (A: Type) (l: list A) n x,
list_nth_z l n = Some x -> 0 <= n < list_length_z l.
Proof.
induction l; simpl; intros.
discriminate.
rewrite list_length_z_cons. destruct (zeq n 0).
generalize (list_length_z_pos l); lia.
exploit IHl; eauto. lia.
Qed.
(** Properties of [List.incl] (list inclusion). *)
Lemma incl_cons_inv:
forall (A: Type) (a: A) (b c: list A),
incl (a :: b) c -> incl b c.
Proof.
unfold incl; intros. apply H. apply in_cons. auto.
Qed.
Global Hint Resolve incl_cons_inv: coqlib.
Lemma incl_app_inv_l:
forall (A: Type) (l1 l2 m: list A),
incl (l1 ++ l2) m -> incl l1 m.
Proof.
unfold incl; intros. apply H. apply in_or_app. left; assumption.
Qed.
Lemma incl_app_inv_r:
forall (A: Type) (l1 l2 m: list A),
incl (l1 ++ l2) m -> incl l2 m.
Proof.
unfold incl; intros. apply H. apply in_or_app. right; assumption.
Qed.
Global Hint Resolve incl_tl incl_refl incl_app_inv_l incl_app_inv_r: coqlib.
Lemma incl_same_head:
forall (A: Type) (x: A) (l1 l2: list A),
incl l1 l2 -> incl (x::l1) (x::l2).
Proof.
intros; red; simpl; intros. intuition.
Qed.
(** Properties of [List.map] (mapping a function over a list). *)
Lemma list_map_exten:
forall (A B: Type) (f f': A -> B) (l: list A),
(forall x, In x l -> f x = f' x) ->
List.map f' l = List.map f l.
Proof.
induction l; simpl; intros.
reflexivity.
rewrite <- H. rewrite IHl. reflexivity.
intros. apply H. tauto.
tauto.
Qed.
Lemma list_map_compose:
forall (A B C: Type) (f: A -> B) (g: B -> C) (l: list A),
List.map g (List.map f l) = List.map (fun x => g(f x)) l.
Proof.
induction l; simpl. reflexivity. rewrite IHl; reflexivity.
Qed.
Lemma list_map_identity:
forall (A: Type) (l: list A),
List.map (fun (x:A) => x) l = l.
Proof.
induction l; simpl; congruence.
Qed.
Lemma list_map_nth:
forall (A B: Type) (f: A -> B) (l: list A) (n: nat),
nth_error (List.map f l) n = option_map f (nth_error l n).
Proof.
induction l; simpl; intros.
repeat rewrite nth_error_nil. reflexivity.
destruct n; simpl. reflexivity. auto.
Qed.
Lemma list_length_map:
forall (A B: Type) (f: A -> B) (l: list A),
List.length (List.map f l) = List.length l.
Proof.
induction l; simpl; congruence.
Qed.
Lemma list_in_map_inv:
forall (A B: Type) (f: A -> B) (l: list A) (y: B),
In y (List.map f l) -> exists x:A, y = f x /\ In x l.
Proof.
induction l; simpl; intros.
contradiction.
elim H; intro.
exists a; intuition auto.
generalize (IHl y H0). intros [x [EQ IN]].
exists x; tauto.
Qed.
Lemma list_append_map:
forall (A B: Type) (f: A -> B) (l1 l2: list A),
List.map f (l1 ++ l2) = List.map f l1 ++ List.map f l2.
Proof.
induction l1; simpl; intros.
auto. rewrite IHl1. auto.
Qed.
Lemma list_append_map_inv:
forall (A B: Type) (f: A -> B) (m1 m2: list B) (l: list A),
List.map f l = m1 ++ m2 ->
exists l1, exists l2, List.map f l1 = m1 /\ List.map f l2 = m2 /\ l = l1 ++ l2.
Proof.
induction m1; simpl; intros.
exists (@nil A); exists l; auto.
destruct l; simpl in H; inv H.
exploit IHm1; eauto. intros [l1 [l2 [P [Q R]]]]. subst l.
exists (a0 :: l1); exists l2; intuition. simpl; congruence.
Qed.
(** Folding a function over a list *)
Section LIST_FOLD.
Variables A B: Type.
Variable f: A -> B -> B.
(** This is exactly [List.fold_left] from Coq's standard library,
with [f] taking arguments in a different order. *)
Fixpoint list_fold_left (accu: B) (l: list A) : B :=
match l with nil => accu | x :: l' => list_fold_left (f x accu) l' end.
(** This is exactly [List.fold_right] from Coq's standard library,
except that it runs in constant stack space. *)
Definition list_fold_right (l: list A) (base: B) : B :=
list_fold_left base (List.rev' l).
Remark list_fold_left_app:
forall l1 l2 accu,
list_fold_left accu (l1 ++ l2) = list_fold_left (list_fold_left accu l1) l2.
Proof.
induction l1; simpl; intros.
auto.
rewrite IHl1. auto.
Qed.
Lemma list_fold_right_eq:
forall l base,
list_fold_right l base =
match l with nil => base | x :: l' => f x (list_fold_right l' base) end.
Proof.
unfold list_fold_right; intros.
destruct l.
auto.
unfold rev'. rewrite <- ! rev_alt. simpl.
rewrite list_fold_left_app. simpl. auto.
Qed.
Lemma list_fold_right_spec:
forall l base, list_fold_right l base = List.fold_right f base l.
Proof.
induction l; simpl; intros; rewrite list_fold_right_eq; congruence.
Qed.
End LIST_FOLD.
(** Properties of list membership. *)
Lemma in_cns:
forall (A: Type) (x y: A) (l: list A), In x (y :: l) <-> y = x \/ In x l.
Proof.
intros. simpl. tauto.
Qed.
Lemma in_app:
forall (A: Type) (x: A) (l1 l2: list A), In x (l1 ++ l2) <-> In x l1 \/ In x l2.
Proof.
intros. split; intro. apply in_app_or. auto. apply in_or_app. auto.
Qed.
Lemma list_in_insert:
forall (A: Type) (x: A) (l1 l2: list A) (y: A),
In x (l1 ++ l2) -> In x (l1 ++ y :: l2).
Proof.
intros. apply in_or_app; simpl. elim (in_app_or _ _ _ H); intro; auto.
Qed.
(** [list_disjoint l1 l2] holds iff [l1] and [l2] have no elements
in common. *)
Definition list_disjoint (A: Type) (l1 l2: list A) : Prop :=
forall (x y: A), In x l1 -> In y l2 -> x <> y.
Lemma list_disjoint_cons_l:
forall (A: Type) (a: A) (l1 l2: list A),
list_disjoint l1 l2 -> ~In a l2 -> list_disjoint (a :: l1) l2.
Proof.
unfold list_disjoint; simpl; intros. destruct H1. congruence. apply H; auto.
Qed.
Lemma list_disjoint_cons_r:
forall (A: Type) (a: A) (l1 l2: list A),
list_disjoint l1 l2 -> ~In a l1 -> list_disjoint l1 (a :: l2).
Proof.
unfold list_disjoint; simpl; intros. destruct H2. congruence. apply H; auto.
Qed.
Lemma list_disjoint_cons_left:
forall (A: Type) (a: A) (l1 l2: list A),
list_disjoint (a :: l1) l2 -> list_disjoint l1 l2.
Proof.
unfold list_disjoint; simpl; intros. apply H; tauto.
Qed.
Lemma list_disjoint_cons_right:
forall (A: Type) (a: A) (l1 l2: list A),
list_disjoint l1 (a :: l2) -> list_disjoint l1 l2.
Proof.
unfold list_disjoint; simpl; intros. apply H; tauto.
Qed.
Lemma list_disjoint_notin:
forall (A: Type) (l1 l2: list A) (a: A),
list_disjoint l1 l2 -> In a l1 -> ~(In a l2).
Proof.
unfold list_disjoint; intros; red; intros.
apply H with a a; auto.
Qed.
Lemma list_disjoint_sym:
forall (A: Type) (l1 l2: list A),
list_disjoint l1 l2 -> list_disjoint l2 l1.
Proof.
unfold list_disjoint; intros.
apply not_eq_sym. apply H; auto.
Qed.
Lemma list_disjoint_dec:
forall (A: Type) (eqA_dec: forall (x y: A), {x=y} + {x<>y}) (l1 l2: list A),
{list_disjoint l1 l2} + {~list_disjoint l1 l2}.
Proof.
induction l1; intros.
left; red; intros. elim H.
case (In_dec eqA_dec a l2); intro.
right; red; intro. apply (H a a); auto with coqlib.
case (IHl1 l2); intro.
left; red; intros. elim H; intro.
red; intro; subst a y. contradiction.
apply l; auto.
right; red; intros. elim n0. eapply list_disjoint_cons_left; eauto.
Defined.
(** [list_equiv l1 l2] holds iff the lists [l1] and [l2] contain the same elements. *)
Definition list_equiv (A : Type) (l1 l2: list A) : Prop :=
forall x, In x l1 <-> In x l2.
(** [list_norepet l] holds iff the list [l] contains no repetitions,
i.e. no element occurs twice. *)
Inductive list_norepet (A: Type) : list A -> Prop :=
| list_norepet_nil:
list_norepet nil
| list_norepet_cons:
forall hd tl,
~(In hd tl) -> list_norepet tl -> list_norepet (hd :: tl).
Lemma list_norepet_dec:
forall (A: Type) (eqA_dec: forall (x y: A), {x=y} + {x<>y}) (l: list A),
{list_norepet l} + {~list_norepet l}.
Proof.
induction l.
left; constructor.
destruct IHl.
case (In_dec eqA_dec a l); intro.
right. red; intro. inversion H. contradiction.
left. constructor; auto.
right. red; intro. inversion H. contradiction.
Defined.
Lemma list_map_norepet:
forall (A B: Type) (f: A -> B) (l: list A),
list_norepet l ->
(forall x y, In x l -> In y l -> x <> y -> f x <> f y) ->
list_norepet (List.map f l).
Proof.
induction 1; simpl; intros.
constructor.
constructor.
red; intro. generalize (list_in_map_inv f _ _ H2).
intros [x [EQ IN]]. generalize EQ. change (f hd <> f x).
apply H1. tauto. tauto.
red; intro; subst x. contradiction.
apply IHlist_norepet. intros. apply H1. tauto. tauto. auto.
Qed.
Remark list_norepet_append_commut:
forall (A: Type) (a b: list A),
list_norepet (a ++ b) -> list_norepet (b ++ a).
Proof.
intro A.
assert (forall (x: A) (b: list A) (a: list A),
list_norepet (a ++ b) -> ~(In x a) -> ~(In x b) ->
list_norepet (a ++ x :: b)).
induction a; simpl; intros.
constructor; auto.
inversion H. constructor. red; intro.
elim (in_app_or _ _ _ H6); intro.
elim H4. apply in_or_app. tauto.
elim H7; intro. subst a. elim H0. left. auto.
elim H4. apply in_or_app. tauto.
auto.
induction a; simpl; intros.
rewrite <- app_nil_end. auto.
inversion H0. apply H. auto.
red; intro; elim H3. apply in_or_app. tauto.
red; intro; elim H3. apply in_or_app. tauto.
Qed.
Lemma list_norepet_app:
forall (A: Type) (l1 l2: list A),
list_norepet (l1 ++ l2) <->
list_norepet l1 /\ list_norepet l2 /\ list_disjoint l1 l2.
Proof.
induction l1; simpl; intros; split; intros.
intuition. constructor. red;simpl;auto.
tauto.
inversion H; subst. rewrite IHl1 in H3. rewrite in_app in H2.
intuition.
constructor; auto. red; intros. elim H2; intro. congruence. auto.
destruct H as [B [C D]]. inversion B; subst.
constructor. rewrite in_app. intuition. elim (D a a); auto. apply in_eq.
rewrite IHl1. intuition. red; intros. apply D; auto. apply in_cons; auto.
Qed.
Lemma list_norepet_append:
forall (A: Type) (l1 l2: list A),
list_norepet l1 -> list_norepet l2 -> list_disjoint l1 l2 ->
list_norepet (l1 ++ l2).
Proof.
generalize list_norepet_app; firstorder.
Qed.
Lemma list_norepet_append_right:
forall (A: Type) (l1 l2: list A),
list_norepet (l1 ++ l2) -> list_norepet l2.
Proof.
generalize list_norepet_app; firstorder.
Qed.
Lemma list_norepet_append_left:
forall (A: Type) (l1 l2: list A),
list_norepet (l1 ++ l2) -> list_norepet l1.
Proof.
generalize list_norepet_app; firstorder.
Qed.
Lemma list_norepet_rev:
forall (A: Type) (l: list A), list_norepet l -> list_norepet (List.rev l).
Proof.
induction 1; simpl.
- constructor.
- apply list_norepet_append_commut. simpl. constructor; auto. rewrite <- List.in_rev; auto.
Qed.
(** [is_tail l1 l2] holds iff [l2] is of the form [l ++ l1] for some [l]. *)
Inductive is_tail (A: Type): list A -> list A -> Prop :=
| is_tail_refl:
forall c, is_tail c c
| is_tail_cons:
forall i c1 c2, is_tail c1 c2 -> is_tail c1 (i :: c2).
Lemma is_tail_in:
forall (A: Type) (i: A) c1 c2, is_tail (i :: c1) c2 -> In i c2.
Proof.
induction c2; simpl; intros.
inversion H.
inversion H. tauto. right; auto.
Qed.
Lemma is_tail_cons_left:
forall (A: Type) (i: A) c1 c2, is_tail (i :: c1) c2 -> is_tail c1 c2.
Proof.
induction c2; intros; inversion H.
constructor. constructor. constructor. auto.
Qed.
Global Hint Resolve is_tail_refl is_tail_cons is_tail_in is_tail_cons_left: coqlib.
Lemma is_tail_incl:
forall (A: Type) (l1 l2: list A), is_tail l1 l2 -> incl l1 l2.
Proof.
induction 1; eauto with coqlib.
Qed.
Lemma is_tail_trans:
forall (A: Type) (l1 l2: list A),
is_tail l1 l2 -> forall (l3: list A), is_tail l2 l3 -> is_tail l1 l3.
Proof.
induction 1; intros. auto. apply IHis_tail. eapply is_tail_cons_left; eauto.
Qed.
(** [list_forall2 P [x1 ... xN] [y1 ... yM]] holds iff [N = M] and
[P xi yi] holds for all [i]. *)
Section FORALL2.
Variable A: Type.
Variable B: Type.
Variable P: A -> B -> Prop.
Inductive list_forall2: list A -> list B -> Prop :=
| list_forall2_nil:
list_forall2 nil nil
| list_forall2_cons:
forall a1 al b1 bl,
P a1 b1 ->
list_forall2 al bl ->
list_forall2 (a1 :: al) (b1 :: bl).
Lemma list_forall2_app:
forall a2 b2 a1 b1,
list_forall2 a1 b1 -> list_forall2 a2 b2 ->
list_forall2 (a1 ++ a2) (b1 ++ b2).
Proof.
induction 1; intros; simpl. auto. constructor; auto.
Qed.
Lemma list_forall2_length:
forall l1 l2,
list_forall2 l1 l2 -> length l1 = length l2.
Proof.
induction 1; simpl; congruence.
Qed.
Lemma list_forall2_in_left:
forall x1 l1 l2,
list_forall2 l1 l2 -> In x1 l1 -> exists x2, In x2 l2 /\ P x1 x2.
Proof.
induction 1; simpl; intros. contradiction. destruct H1.
subst; exists b1; auto.
exploit IHlist_forall2; eauto. intros (x2 & U & V); exists x2; auto.
Qed.
Lemma list_forall2_in_right:
forall x2 l1 l2,
list_forall2 l1 l2 -> In x2 l2 -> exists x1, In x1 l1 /\ P x1 x2.
Proof.
induction 1; simpl; intros. contradiction. destruct H1.
subst; exists a1; auto.
exploit IHlist_forall2; eauto. intros (x1 & U & V); exists x1; auto.
Qed.
End FORALL2.
Lemma list_forall2_imply:
forall (A B: Type) (P1: A -> B -> Prop) (l1: list A) (l2: list B),
list_forall2 P1 l1 l2 ->
forall (P2: A -> B -> Prop),
(forall v1 v2, In v1 l1 -> In v2 l2 -> P1 v1 v2 -> P2 v1 v2) ->
list_forall2 P2 l1 l2.
Proof.
induction 1; intros.
constructor.
constructor. auto with coqlib. apply IHlist_forall2; auto.
intros. auto with coqlib.
Qed.
(** Dropping the first N elements of a list. *)
Fixpoint list_drop (A: Type) (n: nat) (x: list A) {struct n} : list A :=
match n with
| O => x
| S n' => match x with nil => nil | hd :: tl => list_drop n' tl end
end.
Lemma list_drop_incl:
forall (A: Type) (x: A) n (l: list A), In x (list_drop n l) -> In x l.
Proof.
induction n; simpl; intros. auto.
destruct l; auto with coqlib.
Qed.
Lemma list_drop_norepet:
forall (A: Type) n (l: list A), list_norepet l -> list_norepet (list_drop n l).
Proof.
induction n; simpl; intros. auto.
inv H. constructor. auto.
Qed.
Lemma list_map_drop:
forall (A B: Type) (f: A -> B) n (l: list A),
list_drop n (map f l) = map f (list_drop n l).
Proof.
induction n; simpl; intros. auto.
destruct l; simpl; auto.
Qed.
(** * Definitions and theorems over boolean types *)
Definition proj_sumbool {P Q: Prop} (a: {P} + {Q}) : bool :=
if a then true else false.
Coercion proj_sumbool: sumbool >-> bool.
Lemma proj_sumbool_true:
forall (P Q: Prop) (a: {P}+{Q}), proj_sumbool a = true -> P.
Proof.
intros P Q a. destruct a; simpl. auto. congruence.
Qed.
Lemma proj_sumbool_is_true:
forall (P: Prop) (a: {P}+{~P}), P -> proj_sumbool a = true.
Proof.
intros. unfold proj_sumbool. destruct a. auto. contradiction.
Qed.
Ltac InvBooleans :=
match goal with
| [ H: _ && _ = true |- _ ] =>
destruct (andb_prop _ _ H); clear H; InvBooleans
| [ H: _ || _ = false |- _ ] =>
destruct (orb_false_elim _ _ H); clear H; InvBooleans
| [ H: proj_sumbool ?x = true |- _ ] =>
generalize (proj_sumbool_true _ H); clear H; intro; InvBooleans
| _ => idtac
end.
Section DECIDABLE_EQUALITY.
Variable A: Type.
Variable dec_eq: forall (x y: A), {x=y} + {x<>y}.
Variable B: Type.
Lemma dec_eq_true:
forall (x: A) (ifso ifnot: B),
(if dec_eq x x then ifso else ifnot) = ifso.
Proof.
intros. destruct (dec_eq x x). auto. congruence.
Qed.
Lemma dec_eq_false:
forall (x y: A) (ifso ifnot: B),
x <> y -> (if dec_eq x y then ifso else ifnot) = ifnot.
Proof.
intros. destruct (dec_eq x y). congruence. auto.
Qed.
Lemma dec_eq_sym:
forall (x y: A) (ifso ifnot: B),
(if dec_eq x y then ifso else ifnot) =
(if dec_eq y x then ifso else ifnot).
Proof.
intros. destruct (dec_eq x y).
subst y. rewrite dec_eq_true. auto.
rewrite dec_eq_false; auto.
Qed.
End DECIDABLE_EQUALITY.
Section DECIDABLE_PREDICATE.
Variable P: Prop.
Variable dec: {P} + {~P}.
Variable A: Type.
Lemma pred_dec_true:
forall (a b: A), P -> (if dec then a else b) = a.
Proof.
intros. destruct dec. auto. contradiction.
Qed.
Lemma pred_dec_false:
forall (a b: A), ~P -> (if dec then a else b) = b.
Proof.
intros. destruct dec. contradiction. auto.
Qed.
End DECIDABLE_PREDICATE.
(** * Well-founded orderings *)
Require Import Relations.
(** A non-dependent version of lexicographic ordering. *)
Section LEX_ORDER.
Variable A: Type.
Variable B: Type.
Variable ordA: A -> A -> Prop.
Variable ordB: B -> B -> Prop.
Inductive lex_ord: A*B -> A*B -> Prop :=
| lex_ord_left: forall a1 b1 a2 b2,
ordA a1 a2 -> lex_ord (a1,b1) (a2,b2)
| lex_ord_right: forall a b1 b2,
ordB b1 b2 -> lex_ord (a,b1) (a,b2).
Lemma wf_lex_ord:
well_founded ordA -> well_founded ordB -> well_founded lex_ord.
Proof.
intros Awf Bwf.
assert (forall a, Acc ordA a -> forall b, Acc ordB b -> Acc lex_ord (a, b)).
induction 1. induction 1. constructor; intros. inv H3.
apply H0. auto. apply Bwf.
apply H2; auto.
red; intros. destruct a as [a b]. apply H; auto.
Qed.
Lemma transitive_lex_ord:
transitive _ ordA -> transitive _ ordB -> transitive _ lex_ord.
Proof.
intros trA trB; red; intros.
inv H; inv H0.
left; eapply trA; eauto.
left; auto.
left; auto.
right; eapply trB; eauto.
Qed.
End LEX_ORDER.
(** * Nonempty lists *)
Inductive nlist (A: Type) : Type :=
| nbase: A -> nlist A
| ncons: A -> nlist A -> nlist A.
Definition nfirst {A: Type} (l: nlist A) :=
match l with nbase a => a | ncons a l' => a end.
Fixpoint nlast {A: Type} (l: nlist A) :=
match l with nbase a => a | ncons a l' => nlast l' end.
Fixpoint nIn {A: Type} (x: A) (l: nlist A) : Prop :=
match l with
| nbase a => a = x
| ncons a l => a = x \/ nIn x l
end.
Inductive nlist_forall2 {A B: Type} (R: A -> B -> Prop) : nlist A -> nlist B -> Prop :=
| nbase_forall2: forall a b, R a b -> nlist_forall2 R (nbase a) (nbase b)
| ncons_forall2: forall a l b m, R a b -> nlist_forall2 R l m -> nlist_forall2 R (ncons a l) (ncons b m).
Lemma nlist_forall2_imply:
forall (A B: Type) (P1: A -> B -> Prop) (l1: nlist A) (l2: nlist B),
nlist_forall2 P1 l1 l2 ->
forall (P2: A -> B -> Prop),
(forall v1 v2, nIn v1 l1 -> nIn v2 l2 -> P1 v1 v2 -> P2 v1 v2) ->
nlist_forall2 P2 l1 l2.
Proof.
induction 1; simpl; intros; constructor; auto.
Qed.
|
{"author": "gzqaq", "repo": "CS2612-PLaC", "sha": "fb7be0651785905b60d3e705324175daaadcc96b", "save_path": "github-repos/coq/gzqaq-CS2612-PLaC", "path": "github-repos/coq/gzqaq-CS2612-PLaC/CS2612-PLaC-fb7be0651785905b60d3e705324175daaadcc96b/assigns/compcert_lib/Coqlib.v"}
|
/*
This file is part of Mitsuba, a physically based rendering system.
Copyright (c) 2007-2014 by Wenzel Jakob and others.
Mitsuba is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 3
as published by the Free Software Foundation.
Mitsuba is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mitsuba/render/scene.h>
#include <mitsuba/core/statistics.h>
#include <boost/math/distributions/normal.hpp>
MTS_NAMESPACE_BEGIN
/*!\plugin{adaptive}{Adaptive integrator}
* \order{13}
* \parameters{
* \parameter{maxError}{\Float}{Maximum relative error
* threshold\default{0.05}}
* \parameter{pValue}{\Float}{
* Required p-value to accept a sample \default{0.05}
* }
* \parameter{maxSampleFactor}{\Integer}{
* Maximum number of samples to be generated \emph{relative} to the
* number of configured pixel samples. The adaptive integrator
* will stop after this many samples, regardless of whether
* or not the error criterion was satisfied.
* A negative value will be interpreted as $\infty$.
* \default{32---for instance, when 64 pixel samples are configured in
* the \code{sampler}, this means that the adaptive integrator
* will give up after 32*64=2048 samples}
* }
* }
*
* This ``meta-integrator'' repeatedly invokes a provided sub-integrator
* until the computed radiance values satisfy a specified relative error bound
* (5% by default) with a certain probability (95% by default). Internally,
* it uses a Z-test to decide when to stop collecting samples. While repeatedly
* applying a Z-test in this manner is not good practice in terms of
* a rigorous statistical analysis, it provides a useful mathematically
* motivated stopping criterion.
*
* \begin{xml}[caption={An example how to make the \pluginref{path} integrator adaptive}]
* <integrator type="adaptive">
* <integrator type="path"/>
* </integrator>
* \end{xml}
*
* \remarks{
* \item The adaptive integrator needs a variance estimate to work
* correctly. Hence, the underlying sample generator should be set to a reasonably
* large number of pixel samples (e.g. 64 or higher) so that this estimate can be obtained.
* \item This plugin uses a relatively simplistic error heuristic that does not
* share information between pixels and only reasons about variance in image space.
* In the future, it will likely be replaced with something more robust.
* }
*/
class AdaptiveIntegrator : public SamplingIntegrator {
public:
AdaptiveIntegrator(const Properties &props) : SamplingIntegrator(props) {
/* Maximum relative error threshold */
m_maxError = props.getFloat("maxError", 0.05f);
/* Maximum number of samples to take (relative to the number of pixel samples
that were configured in the sampler). The sample collection
will stop after this many samples even if the variance is still
too high. A negative value will be interpreted as infinity. */
m_maxSampleFactor = props.getInteger("maxSampleFactor", 32);
/* Required P-value to accept a sample. */
m_pValue = props.getFloat("pValue", 0.05f);
m_verbose = props.getBoolean("verbose", false);
}
AdaptiveIntegrator(Stream *stream, InstanceManager *manager)
: SamplingIntegrator(stream, manager) {
m_subIntegrator = static_cast<SamplingIntegrator *>(manager->getInstance(stream));
m_maxSampleFactor = stream->readInt();
m_maxError = stream->readFloat();
m_quantile = stream->readFloat();
m_averageLuminance = stream->readFloat();
m_pValue = stream->readFloat();
m_verbose = false;
}
void addChild(const std::string &name, ConfigurableObject *child) {
const Class *cClass = child->getClass();
if (cClass->derivesFrom(MTS_CLASS(Integrator))) {
if (!cClass->derivesFrom(MTS_CLASS(SamplingIntegrator)))
Log(EError, "The sub-integrator must be derived from the class SamplingIntegrator");
m_subIntegrator = static_cast<SamplingIntegrator *>(child);
} else {
Integrator::addChild(name, child);
}
}
void configureSampler(const Scene *scene, Sampler *sampler) {
SamplingIntegrator::configureSampler(scene, sampler);
m_subIntegrator->configureSampler(scene, sampler);
}
bool preprocess(const Scene *scene, RenderQueue *queue, const RenderJob *job,
int sceneResID, int sensorResID, int samplerResID) {
if (!SamplingIntegrator::preprocess(scene, queue, job, sceneResID, sensorResID, samplerResID))
return false;
if (m_subIntegrator == NULL)
Log(EError, "No sub-integrator was specified!");
Sampler *sampler = static_cast<Sampler *>(Scheduler::getInstance()->getResource(samplerResID, 0));
Sensor *sensor = static_cast<Sensor *>(Scheduler::getInstance()->getResource(sensorResID));
if (sampler->getClass()->getName() != "IndependentSampler")
Log(EError, "The error-controlling integrator should only be "
"used in conjunction with the independent sampler");
if (!m_subIntegrator->preprocess(scene, queue, job, sceneResID, sensorResID, samplerResID))
return false;
Vector2i filmSize = sensor->getFilm()->getSize();
bool needsApertureSample = sensor->needsApertureSample();
bool needsTimeSample = sensor->needsTimeSample();
const int nSamples = 10000;
Float luminance = 0;
Point2 apertureSample(0.5f);
Float timeSample = 0.5f;
RadianceQueryRecord rRec(scene, sampler);
/* Estimate the overall luminance on the image plane */
for (int i=0; i<nSamples; ++i) {
sampler->generate(Point2i(0));
rRec.newQuery(RadianceQueryRecord::ERadiance, sensor->getMedium());
rRec.extra = RadianceQueryRecord::EAdaptiveQuery;
Point2 samplePos(rRec.nextSample2D());
samplePos.x *= filmSize.x;
samplePos.y *= filmSize.y;
if (needsApertureSample)
apertureSample = rRec.nextSample2D();
if (needsTimeSample)
timeSample = rRec.nextSample1D();
RayDifferential eyeRay;
Spectrum sampleValue = sensor->sampleRay(
eyeRay, samplePos, apertureSample, timeSample);
sampleValue *= m_subIntegrator->Li(eyeRay, rRec);
luminance += sampleValue.getLuminance();
}
m_averageLuminance = luminance / nSamples;
boost::math::normal dist(0, 1);
m_quantile = (Float) boost::math::quantile(dist, 1-m_pValue/2);
Log(EInfo, "Configuring for a %.1f%% confidence interval, quantile=%f, avg. luminance=%f",
(1-m_pValue)*100, m_quantile, m_averageLuminance);
return true;
}
void renderBlock(const Scene *scene, const Sensor *sensor,
Sampler *sampler, ImageBlock *block, const bool &stop,
const std::vector< TPoint2<uint8_t> > &points) const {
typedef TSpectrum<Float, SPECTRUM_SAMPLES + 2> SpectrumAlphaWeight;
bool needsApertureSample = sensor->needsApertureSample();
bool needsTimeSample = sensor->needsTimeSample();
if (sampler->getSampleCount() < 8)
Log(EError, "Starting the adaptive integrator with less than 8 "
"samples per pixel does not make much sense -- giving up.");
RayDifferential eyeRay;
RadianceQueryRecord rRec(scene, sampler);
Float diffScaleFactor = 1.0f /
std::sqrt((Float) sampler->getSampleCount());
Point2 apertureSample(0.5f);
Float timeSample = 0.5f;
int borderSize = sensor->getFilm()->getReconstructionFilter()->getBorderSize();
size_t sampleCount;
block->clear();
SpectrumAlphaWeight *target = (SpectrumAlphaWeight *) block->getBitmap()->getUInt8Data();
SpectrumAlphaWeight *snapshot = (SpectrumAlphaWeight *) alloca(sizeof(SpectrumAlphaWeight)
* (2*borderSize+1)*(2*borderSize+1));
for (size_t i=0; i<points.size(); ++i) {
Point2i offset = Point2i(points[i]) + Vector2i(block->getOffset());
sampler->generate(offset);
/* Before starting to place samples within the area of a single pixel, the
following takes a snapshot of all surrounding spectrum+weight+alpha
values. Those are then used later to ensure that adjacent pixels will
not be disproportionately biased by this pixel's contributions. */
for (int y=0; y<2*borderSize+1; ++y) {
SpectrumAlphaWeight *src = target + ((y+points[i].y)
* block->getBitmap()->getWidth() + points[i].x);
SpectrumAlphaWeight *dst = snapshot + y*(2*borderSize+1);
memcpy(dst, src, sizeof(SpectrumAlphaWeight) * (2*borderSize+1));
}
Float mean = 0, meanSqr = 0.0f;
sampleCount = 0;
while (true) {
if (stop)
return;
rRec.newQuery(RadianceQueryRecord::ESensorRay, sensor->getMedium());
rRec.extra = RadianceQueryRecord::EAdaptiveQuery;
Point2 samplePos(Point2(offset) + Vector2(rRec.nextSample2D()));
if (needsApertureSample)
apertureSample = rRec.nextSample2D();
if (needsTimeSample)
timeSample = rRec.nextSample1D();
Spectrum sampleValue = sensor->sampleRayDifferential(
eyeRay, samplePos, apertureSample, timeSample);
eyeRay.scaleDifferential(diffScaleFactor);
sampleValue *= m_subIntegrator->Li(eyeRay, rRec);
Float sampleLuminance;
if (block->put(samplePos, sampleValue, rRec.alpha)) {
/* Check for problems with the sample */
sampleLuminance = sampleValue.getLuminance();
} else {
sampleLuminance = 0.0f;
}
++sampleCount;
sampler->advance();
/* Numerically robust online variance estimation using an
algorithm proposed by Donald Knuth (TAOCP vol.2, 3rd ed., p.232) */
const Float delta = sampleLuminance - mean;
mean += delta / sampleCount;
meanSqr += delta * (sampleLuminance - mean);
if (m_maxSampleFactor >= 0 && sampleCount >= m_maxSampleFactor * sampler->getSampleCount()) {
break;
} else if (sampleCount >= sampler->getSampleCount()) {
/* Variance of the primary estimator */
const Float variance = meanSqr / (sampleCount-1);
Float stdError = std::sqrt(variance/sampleCount);
/* Half width of the confidence interval */
Float ciWidth = stdError * m_quantile;
/* Relative error heuristic */
Float base = std::max(mean, m_averageLuminance * 0.01f);
if (m_verbose && (sampleCount % 100) == 0)
Log(EDebug, "%i samples, mean=%f, stddev=%f, std error=%f, ci width=%f, max allowed=%f", sampleCount, mean,
std::sqrt(variance), stdError, ciWidth, base * m_maxError);
if (ciWidth <= m_maxError * base)
break;
}
}
/* Ensure that a large amounts of samples in one pixel do not
bias neighboring pixels (due to the reconstruction filter) */
Float factor = 1.0f / sampleCount;
for (int y=0; y<2*borderSize+1; ++y) {
SpectrumAlphaWeight *dst = target + ((y+points[i].y)
* block->getBitmap()->getWidth() + points[i].x);
SpectrumAlphaWeight *backup = snapshot + y*(2*borderSize+1);
for (int x=0; x<2*borderSize+1; ++x)
dst[x] = backup[x] * (1-factor) + dst[x] * factor;
}
}
}
Spectrum Li(const RayDifferential &ray, RadianceQueryRecord &rRec) const {
return m_subIntegrator->Li(ray, rRec);
}
Spectrum E(const Scene *scene, const Intersection &its, const Medium *medium,
Sampler *sampler, int nSamples, bool includeIndirect) const {
return m_subIntegrator->E(scene, its, medium,
sampler, nSamples, includeIndirect);
}
void serialize(Stream *stream, InstanceManager *manager) const {
SamplingIntegrator::serialize(stream, manager);
manager->serialize(stream, m_subIntegrator.get());
stream->writeInt(m_maxSampleFactor);
stream->writeFloat(m_maxError);
stream->writeFloat(m_quantile);
stream->writeFloat(m_averageLuminance);
stream->writeFloat(m_pValue);
}
void bindUsedResources(ParallelProcess *proc) const {
m_subIntegrator->bindUsedResources(proc);
}
void wakeup(ConfigurableObject *parent,
std::map<std::string, SerializableObject *> ¶ms) {
m_subIntegrator->wakeup(this, params);
}
void cancel() {
SamplingIntegrator::cancel();
m_subIntegrator->cancel();
}
const Integrator *getSubIntegrator(int idx) const {
if (idx != 0)
return NULL;
return m_subIntegrator.get();
}
std::string toString() const {
std::ostringstream oss;
oss << "AdaptiveIntegrator[" << endl
<< " maxSamples = " << m_maxSampleFactor << "," << endl
<< " maxError = " << m_maxError << "," << endl
<< " quantile = " << m_quantile << "," << endl
<< " pvalue = " << m_pValue << "," << endl
<< " subIntegrator = " << indent(m_subIntegrator->toString()) << endl
<< "]";
return oss.str();
}
MTS_DECLARE_CLASS()
private:
ref<SamplingIntegrator> m_subIntegrator;
Float m_maxError, m_quantile, m_pValue, m_averageLuminance;
int m_maxSampleFactor;
bool m_verbose;
};
MTS_IMPLEMENT_CLASS_S(AdaptiveIntegrator, false, SamplingIntegrator)
MTS_EXPORT_PLUGIN(AdaptiveIntegrator, "Adaptive integrator");
MTS_NAMESPACE_END
|
{"hexsha": "502c4e1d00d8f3f88fe72721b0bc808705da31c7", "size": 13056, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "mitsuba-af602c6fd98a/src/integrators/misc/adaptive.cpp", "max_stars_repo_name": "NTForked-ML/pbrs", "max_stars_repo_head_hexsha": "0b405d92c12d257e2581366542762c9f0c3facce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 139.0, "max_stars_repo_stars_event_min_datetime": "2017-04-21T00:22:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T20:33:10.000Z", "max_issues_repo_path": "mitsuba-af602c6fd98a/src/integrators/misc/adaptive.cpp", "max_issues_repo_name": "NTForked-ML/pbrs", "max_issues_repo_head_hexsha": "0b405d92c12d257e2581366542762c9f0c3facce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11.0, "max_issues_repo_issues_event_min_datetime": "2017-08-15T18:22:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-01T05:44:41.000Z", "max_forks_repo_path": "mitsuba-af602c6fd98a/src/integrators/misc/adaptive.cpp", "max_forks_repo_name": "NTForked-ML/pbrs", "max_forks_repo_head_hexsha": "0b405d92c12d257e2581366542762c9f0c3facce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30.0, "max_forks_repo_forks_event_min_datetime": "2017-07-21T03:56:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T06:55:34.000Z", "avg_line_length": 37.1965811966, "max_line_length": 113, "alphanum_fraction": 0.7059589461, "num_tokens": 3480}
|
\section{Moltres} \label{sec:moltres}
In this section we describe Moltres \cite{lindsay_introduction_2018}, the
multiphysics reactor simulation tool, and the specific modeling approach for
simulating the CNRS Benchmark cases in Moltres. Much of Moltres' development
focuses on meeting the needs of \gls{MSR} multiphysics.
\subsection{Description of Moltres} \label{sec:description-of-moltres}
Moltres models coupled neutronics and thermal-hydraulics in reactors. While
generally applicable to most reactor concepts, much of
Moltres' development focuses on meeting the needs of \gls{MSR} multiphysics.
Together with \gls{MOOSE}'s \cite{permann_moose_2020} \texttt{Heat}
\texttt{Conduction} and \texttt{Navier-Stokes} \cite{peterson_overview_2018}
modules, Moltres solves the multigroup neutron diffusion
equations, for an arbitrary number of energy and precursor groups, and
thermal-hydraulics equations simultaneously on the same mesh (or separately
through fixed-point iterations if desired).
Previously, Lindsay et al. \cite{lindsay_introduction_2018}
demonstrated Moltres' \gls{MSR} neutronics modeling capabilities with 1D salt
flow in 2D-axisymmetric and 3D models of the \gls{MSRE}. The neutron flux and
temperature distributions agreed qualitatively with legacy
\gls{MSRE} data albeit with some minor quantitative discrepancies due to
simplifications and assumptions in the reactor geometry. Moltres has
since undergone further development to support 1) the looping of \gls{DNP}
drift back into the reactor core, 2) coupling the aforementioned \gls{DNP}
drift to incompressible Navier-Stokes velocity flows within the reactor core,
and 3) a decay heat model to simulate decay heat from fission products.
To perform neutronics calculations, Moltres requires homogenized group constant
data from dedicated high-fidelity neutronics software such as the NEWT module
in SCALE \cite{dehart_reactor_2011}, Serpent 2 \cite{leppanen_serpent_2014}, or
OpenMC \cite{romano_openmc:_2015}. Users
can run a Python script in Moltres' Github repository which automatically reads
user-provided SCALE/Serpent 2/OpenMC output data files and creates
Moltres-compatible JSON or text files containing all required group constant
data.
Moltres solves for the neutron fluxes governed by
the multigroup neutron diffusion equations given by:
%
\begin{align}
\frac{1}{v_g} \frac{\partial \phi_g}{\partial t} =& \nabla \cdot D_g
\nabla \phi_g - \Sigma^r_g \phi_g +
\sum^G_{g' \neq g} \Sigma^s_{g' \rightarrow g} \phi_{g'} \nonumber \\
&+ \chi^p_g \sum^G_{g'=1} \left( 1-\beta \right) \nu \Sigma^f_{g'}
\phi_{g'} + \chi^d_g \sum^I_i \lambda_i C_i \label{eq:neut} \\
%
\shortintertext{where}
v_g =& \text{ average speed of neutrons in group $g$,}
\nonumber \\
\phi_g =& \text{ neutron flux in group $g$,}
\nonumber \\
t =& \text{ time,} \nonumber \\
D_g =& \text{ diffusion coefficient of neutrons in} \nonumber \\
&\text{ group $g$,} \nonumber \\
\Sigma^r_g =& \text{ macroscopic cross section for removal of} \nonumber \\
&\text{ neutrons from group $g$,} \nonumber \\
\Sigma^s_{g' \rightarrow g} =& \text{ macroscopic cross section of
scattering from} \nonumber \\
&\text{ groups $g'$ to $g$,} \nonumber \\
\chi^p_g =& \text{ prompt fission spectrum for neutrons in} \nonumber \\
&\text{ group $g$,} \nonumber \\
G =& \text{ total number of discrete neutron groups,} \nonumber \\
\nu =& \text{ average number of neutrons produced per} \nonumber \\
&\text{ fission,} \nonumber \\
\Sigma^f_{g} =& \text{ macroscopic fission cross section for neutron}
\nonumber \\
&\text{ in group $g$,} \nonumber \\
\chi^d_g =& \text{ delayed fission spectrum for neutrons in} \nonumber \\
&\text{ group $g$,} \nonumber \\
I =& \text{ total number of delayed neutron precursor} \nonumber \\
&\text{ groups,} \nonumber \\
\beta =& \text{ total delayed neutron fraction.} \nonumber
\end{align}
The delayed neutron precursor concentrations are
governed by the following equation:
%
\begin{align}
\frac{\partial C_i}{\partial t} =& \beta_i \sum^G_{g'=1} \nu \Sigma^f_{g'}
\phi_{g'} - \lambda_i C_i - \vec{u} \cdot \nabla C_i + \nabla \cdot
D_{\text{P}} \nabla C_i \label{eq:dnp} \\
%
\shortintertext{where}
\beta_i =& \text{ delayed neutron fraction of precursor group $i$,}
\nonumber \\
\lambda_i =& \text{ average decay constant of delayed neutron} \nonumber \\
&\text{ precursors in precursor group $i$,} \nonumber \\
C_i =& \text{ concentration of delayed neutron precursors in}
\nonumber \\
&\text{ precursor group $i$,} \nonumber \\
\vec{u} =& \text{ molten salt flow velocity vector,}
\nonumber \\
D_{\text{P}} =& \text{ effective diffusion coefficient of the delayed}
\nonumber \\
&\text{ neutron precursors.} \nonumber
\end{align}
The last two terms in Equation \ref{eq:dnp} represent the advection and
diffusion terms, respectively, to model the movement of \gls{DNP} in
liquid-fuel \glspl{MSR}.
The governing equation for temperature is an advection-diffusion equation with
a fission heat source term $Q_f$ given by:
%
\begin{align}
\rho c_{p} \frac{\partial T}{\partial t} =& - \rho c_p \vec{u}
\cdot \nabla T + \nabla \cdot \left(k \nabla T \right) + Q_f
\label{eq:temp}
\shortintertext{and}
Q_f =& \sum^G_{g=1} \epsilon_g \Sigma_g^f \phi_g
\shortintertext{where}
\rho =& \text{ density of the molten salt,}
\nonumber \\
c_p =& \text{ specific heat capacity of molten salt,} \nonumber \\
T =& \text{ temperature of molten salt,} \nonumber \\
k =& \text{ effective thermal conductivity of molten salt,} \nonumber \\
Q_f =& \text{ fission heat source,} \nonumber \\
\epsilon_g =& \text{ average fission energy released by neutrons in group
$g$.} \nonumber
\end{align}
Lastly, the governing equations for the incompressible Navier-Stokes flow are
given by:
%
\begin{align}
\rho \frac{\partial \vec{u}}{\partial t} =&
-\rho (\vec{u}
\cdot \nabla) \vec{u} - \nabla p + \mu \nabla^2 \vec{u}
+ \rho \alpha \vec{g} \left(T - T_{\text{ref}} \right)
\label{eq:momemtum}
\shortintertext{and}
\nabla \cdot \vec{u} =& 0
\label{eq:divergence}
\shortintertext{where}
p =& \text{ pressure,} \nonumber \\
\mu =& \text{ dynamic viscosity,} \nonumber \\
\alpha =& \text{ coefficient of thermal expansion,} \nonumber \\
\vec{g} =& \text{ gravitational force vector,} \nonumber
\\
T_{\text{ref}} =& \text{ reference temperature at which the nominal}
\nonumber \\
&\text{ density is provided.} \nonumber
\nonumber
\end{align}
The velocity, temperature, and delayed neutron precursor
variables are all susceptible to numerical node-to-node oscillations
commonly observed when resolving advection-dominated transport using continuous
Galerkin methods \cite{kuhlmann_lid-driven_2018}.
\gls{MOOSE}'s \texttt{Navier-Stokes} module provides the
\gls{SUPG} stabilization scheme \cite{brooks_streamline_1982} for the velocity
and temperature variables to combat these oscillations. We
refer readers to \cite{peterson_overview_2018} for details on the
implementation of these methods in the \texttt{Navier-Stokes} module. On the
other hand, for the delayed neutron precursor variables,
we discretized them using
discontinuous shape functions supported by \gls{MOOSE}'s discontinuous finite
element solver to circumvent the numerical instability issue.
\subsection[Modeling approach]{Modeling approach\footnote{The input files for
all benchmark
cases are available on the Moltres GitHub repository at
\url{https://github.com/arfc/moltres/tree/devel/problems/2021-cnrs-benchmark}.
}} \label{sec:model}
For this work, we ran the benchmark cases on a uniformly-spaced mesh consisting
of 200$\times$200 elements. Thus, the dimensions of each mesh element are
0.01m$\times$0.01m. We adopt the group constant data
provided by Tiberga et al. \cite{tiberga_results_2020}. Next, we
discretized most of the relevant variables, i.e. neutron fluxes, velocity
components, pressure, and temperature, using continuous, first-order, Lagrange
shape functions. The only exception is the precursor concentration variables,
which we discretized using zeroth-order monomial shape functions and solved
using a \gls{DFEM}. We interpolated the resulting discontinuous,
cell-centered precursor values to obtain the nodal values for results
analysis.
As mentioned in Section \ref{sec:description-of-moltres}, the
\texttt{Navier-Stokes} and \texttt{Heat} \texttt{Conduction} modules from
\gls{MOOSE} provide some of the capabilities for
modeling incompressible flow and heat transfer. In particular, we stabilized
the incompressible flow and temperature governing equations using the
\gls{SUPG} stabilization method implemented in \gls{MOOSE}
\cite{peterson_overview_2018}. Without \gls{SUPG} stabilization, we
observed spurious numerical oscillations in the velocity and temperature near
the top boundary due to the singularity on the top left corner where different
velocity boundary conditions meet. We also applied the \gls{PSPG} stabilization
scheme \cite{hughes_new_1986} from the Navier-Stokes module
\cite{peterson_overview_2018}
which enables equal-order discretizations in the velocity and pressure
variables. Equal-order discretizations with \gls{PSPG} are computationally
cheaper and more convenient to work with than implementing higher-order
velocity discretizations for stability without \gls{PSPG}
\cite{chapelle_inf-sup_1993}.
We performed all eigenvalue calculations in Steps 0.2, 1.1, 1.2, 1.3, and 1.4
using the inverse power method solver in \gls{MOOSE}. All other steps
were performed using the Preconditioned Newton-Krylov solver
\cite{gaston_physics-based_2015}. The coupled steady-state problems in
Steps 1.2, 1.3, and 1.4 required segregated solvers for the neutronics
and the thermal-hydraulics due to the unique problem setups involving a
criticality search problem for the neutron multiplication factor
and a steady-state problem in thermal-hydraulics simultaneously.
\begin{table}[tb]
\caption{Timestep sizes used for the time-dependent cases in
Step 2.1, corresponding to 1/200th of the perturbation period.}
\centering
\setlength\tabcolsep{2.5pt}
\begin{tabular}{l l l l l l l l}
\toprule
Frequency [Hz] & 0.0125 & 0.025 & 0.05 & 0.1 & 0.2 & 0.4 & 0.8 \\
\midrule
Timestep size [s] & 0.2 & 0.2 & 0.1 & 0.05 & 0.025 & 0.0125 & 0.00625
\\
\bottomrule
\end{tabular}
\label{table:timestep}
\end{table}
For the time-dependent cases in Step 2.1, we employed fully coupled solves with
a second-order implicit Backward Differential Formula (BDF2) time-stepping
scheme. For each driving frequency in the heat transfer coefficient, we set the
timestep sizes to 1/200th of the perturbation period. Table
\ref{table:timestep} shows the timestep sizes. We assumed the
systems reached asymptotic behavior when the magnitudes of neighboring power
peaks differed by less than 0.001\% for at least ten wavelengths. Under this
assumption, the phase shift measurements between adjacent waves always
converged before the magnitude measurements of the power peaks.
Table \ref{table:software} compares the numerical methods, meshing schemes, and
neutronics models of Moltres and the four participating software packages in
the CNRS benchmark paper \cite{tiberga_results_2020}. The $SP_N$ and
$S_N$ neutronics models refer to the simplified $P_N$ spherical harmonics and
$S_N$ discrete ordinates neutron transport models, respectively. Based on the
solvers and methods of solution, Moltres is most similar to the
PHANTOM-$S_N$ + DGFlows \cite{tiberga_discontinuous_2019} multiphysics package
from \gls{TUD} with the $S_2$ neutron transport model. Participants from
\gls{CNRS} and \gls{PSI}
employed non-uniform meshes which were refined near the boundaries while we and
the participants from \gls{PoliMi} and \gls{TUD} employed uniform meshes.
\FloatBarrier
\begin{landscape}
\begin{table*}[p]
\caption{List of software packages and their corresponding model
specifications for the CNRS Benchmark simulations
\cite{tiberga_results_2020}.}
\centering
\begin{tabular}{p{4.2cm} p{7cm} p{3.3cm} p{2cm} p{2.7cm}}
\toprule
Software & Institute & Numerical method & Mesh & Neutronics model \\
\midrule
OpenFOAM & Centre national de la recherche scientifique (CNRS) & Finite volume & 200$\times$200 \newline Non-uniform & $SP_1$ \& $SP_3$ \\
OpenFOAM & Politecnico di Milano (PoliMi) & Finite volume & 400$\times$400 \newline Uniform & Neutron diffusion \\
GeN-Foam & Paul Scherrer Institute (PSI) & Finite volume & 200$\times$200 \newline Non-uniform & Neutron diffusion \\
PHANTOM-$S_N$+DGFlows & Delft University of Technology (TUD) & Discontinuous finite \newline element & 50$\times$50 \newline Uniform & $S_2$ \& $S_6$ \\
Moltres (This work) & University of Illinois at Urbana-Champaign (UIUC) & Continuous \& discontinuous finite element & 200$\times$200 \newline Uniform & Neutron diffusion \\
\bottomrule
\end{tabular}
\label{table:software}
\end{table*}
\end{landscape}
\FloatBarrier
|
{"hexsha": "b3a422050f961f2762cbe13efb97e747ad0c7d31", "size": 13295, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "moltres.tex", "max_stars_repo_name": "smpark7/2021-park-moltres-benchmark", "max_stars_repo_head_hexsha": "efaaaa70e0db4781a0dddad51151640aa820486f", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-17T16:40:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T22:56:12.000Z", "max_issues_repo_path": "moltres.tex", "max_issues_repo_name": "smpark7/2021-park-moltres-benchmark", "max_issues_repo_head_hexsha": "efaaaa70e0db4781a0dddad51151640aa820486f", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-02-25T14:45:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T19:14:44.000Z", "max_forks_repo_path": "moltres.tex", "max_forks_repo_name": "arfc/2021-park-moltres-benchmark", "max_forks_repo_head_hexsha": "efaaaa70e0db4781a0dddad51151640aa820486f", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-17T18:11:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-17T17:55:22.000Z", "avg_line_length": 49.6082089552, "max_line_length": 181, "alphanum_fraction": 0.7391500564, "num_tokens": 3804}
|
\section{Evaluation}
\label{sec:eval}
\lstMakeShortInline[mathescape=true]{|}
We have implemented analytic program repair in \toolname: a system for
repairing type errors for a purely functional subset of \ocaml. Next,
we describe our implementation and an evaluation that addresses three
questions:
\begin{itemize}
\item \textbf{RQ1}: How \emph{accurate} are \toolname's predicted repairs?
(\S~\ref{sec:eval:accuracy})
\item \textbf{RQ2}: How \emph{efficiently} can \toolname synthesize fixes?
(\S~\ref{sec:eval:efficiency})
\item \textbf{RQ3}: How \emph{useful} are \toolname's error messages?
(\S~\ref{sec:eval:useful})
\item \textbf{RQ4}: How \emph{precise} are \toolname's template fixes?
(\S~\ref{sec:eval:template_quality})
\end{itemize}
% \subsection{Implementation} \label{sec:eval:gen_method}
\mypara{Training Dataset}
%
For our evaluation, we use an \ocaml dataset gathered from an undergraduate
Programming Languages university course, previously used in related work
\citep{Seidel2017-ko,Seidel:2017}. It consists of erroneous programs and their
subsequent fixes and is divided in two parts; the Spring 2014 class (\SPRING)
and the Fall 2015 class (\FALL). The homework required students to write 23
distinct programs that demonstrate a range of functional programming idioms, \eg
higher-order functions and (polymorphic) algebraic data types.
\mypara{Feature Extraction}
%
\toolname represents programs with BOAT vectors of 449 features from each
expression in a program: 45 local syntactic, 315 contextual, 88 typing features,
and 1 expression size feature. For contextual features, for each expression we
extract the local syntactic features of its first 4 (left-to-right) children. In
addition, we extract those features for its ancestors, starting from its parent
and going up to two more parent nodes. For typing features, we support |int|s,
|float|s, |char|s, |string|s, and the user-defined |expr|. These features are
extracted for each expression and its context.
\mypara{Dataset Cleaning}
%
We extract fixes as expressions replacements over a program pair using \diffsym.
A disadvantage of using \diffsym s with this dataset is that some students may
have made many, potentially unrelated, changes between compilations; at some
point the ``fix'' becomes a ``rewrite''. These rewrites can lead to meaningless
fix templates and error locations. We discard such outliers when the fraction of
subexpressions that have changed in a program is more than one standard
deviation above the mean, establishing a \diffsym\ threshold of 40\%. We also
discard programs that have changes in 5 or more locations, noting that even
state-of-the-art multi-location repair techniques cannot reproduce such
``fixes'' \citep{Saha_2019}. The discarded changes account for roughly 32\% of
each dataset, leaving 2,475 program pairs for \SPRING and 2,177 pairs for \FALL.
Throughout, we use \SPRING as a training set and \FALL as a test set.
\mypara{\dnn based Classifier}
%
\toolname's template prediction uses a multi-layer neural network \dnn based
classifier with three fully-connected hidden layers of 512 neurons. The neurons
use rectified linear units (ReLU) as their activation function
\citep{Nair2010-xg}.
%
The \dnn was trained using \emph{early stopping} \citep{Hastie2009-bn}: training
is stopped when the accuracy on a distinct small part of the training set is not
improved after a certain amount of epochs (5 epochs, in our implementation).
%
We set the maximum number of epochs to 200.
%
We used the \textsc{Adam} optimizer \citep{Kingma2014-ng},
a variant of stochastic gradient descent that converges faster.
\subsection{RQ1: Accuracy}
\label{sec:eval:accuracy}
Most developers will consider around five or six suggestions before falling back
to manual debugging \citep{Kochhar2016-oc,Parnin2011-ce}.
%
Therefore, we consider \toolname's accuracy up to the \emph{top six} fix
template predictions, \ie we check if any of the top-N predicted templates
actually correspond to the users's edit. These predicted templates are not
shown to the user; they are only used to guide the synthesis of concrete
repairs which are then presented to the user.
\mypara{Baselines}
%
We compare \toolname's \dnn-based predictor against two baseline classifiers:
a \random classifier that returns templates chosen uniformly at random from the 50
templates learned from the \SPRING training dataset, and a \popular classifier
that returns the most popular templates in the training set in decreasing order.
We also compare to a \emph{decision tree} (\trees) and an \svm classifier
trained on the \SPRING data, since these are two of the most common learning
algorithms \citep{Hastie2009-bn}.
\input{evaluation-accuracy-graph}
\mypara{Results: Accuracy of Prediction}
%
\autoref{fig:accuracy-results} shows the accuracy results of our template
prediction experiments. The y-axis describes the fraction of \emph{erroneous}
sub-terms (locations) for which the actual repair was one of the top-K predicted
repairs.
%
The naive baseline of selecting templates at random achieves \RandomTopOne\%
Top-1 accuracy (\RandomTopSix\% Top-6), while the \popular classifier achieves a
Top-1 accuracy of \PopularTopOne\% (\PopularTopSix\% Top-6).
%
Our \dnn classifier significantly outperforms these naive classifiers, ranging
from \DnnTopOne\% Top-1 accuracy to \DnnTopSix\% Top-6 accuracy.
%
In fact, even with only \dnn's first prediction one outperforms top 6
predictions of both \random and \popular.
%
The \random classifier's low performance is as expected.
%
The \popular classifier performs better: some homework assignments were shared
between \SPRING and \FALL quarters and, while different groups of students
solved these problems for each quarter, the novice mistakes that they made seem
to have a pattern. Thus, the most \emph{popular ``fixes''} (and therefore the
relevant templates) from \SPRING were also popular in \FALL.
We also observe that \trees achieves a Top-1 accuracy close to that of \dnn's
(\ie \TreeTopOne\% vs. \DnnTopOne\%) but fails to improve with more predictions
(\ie with Top-6, \TreeTopSix\% vs. \DnnTopSix\%).
%
On the other hand, the \svm does poorly on the Top-1 accuracy (\ie \SVMTopOne\%
vs. \DnnTopOne\%) but does significantly better with more predictions (\ie with
Top-6, \SVMTopSix\% vs. \DnnTopSix\%).
%
Therefore, we observe that more sophisticated learning algorithms can actually
learn patterns from a corpus of fixed programs, with \dnn classifiers achieving
the best performance in each category.
\mypara{Results: Template ``Confusion''}
%
The \emph{confusion matrix} of the each location's top prediction shows which
templates our models mix up.
%
\autoref{fig:conf-matrix} shows this matrix for the top 30 templates acquired
from the \SPRING training set and were tested on the \FALL dataset.
%
Note that most templates are predicted correctly and only a few of them are
often mis-predicted for another template.
%
For example, we see that programs that require template 20
(\elet{\hat{z}}{\epcases{\hat{t}}{\hat{x}}{\hat{y}}{\hat{a}}}{\_}) to be fixed,
almost always are mis-predicted with template 11 (\elet{(\hat{x},\
\hat{y})}{\hat{t}}{(\_,\ \_)}). We observe that these templates are still very
similar, with both of them having a top-level |let| that manipulates tuples
$\hat{t}$.
\begin{figure}[t]
\centering
\includegraphics[trim={30 40 100 70},clip,height=2.3in]{evaluation-conf-matrix-no-title.pdf}
\caption{The confusion matrix of the \emph{top 30} templates. Bolder parts of
the heatmap show templates that are often mis-predicted with another template.
The bolder the diagonal is, the more accurate predictions we make.}
\label{fig:conf-matrix}
\end{figure}
\begin{framed}
\noindent \toolname learns correlations between program features and repair
templates, yielding almost \emph{2x higher} accuracy than the naive baselines
and 8\% more than the other sophisticated learning algorithms. By abstracting
programs into features, \toolname is able to \emph{generalize} across years
and different kinds of programs.
\end{framed}
\subsection{RQ2: Efficiency}
\label{sec:eval:efficiency}
\label{subsec:eval:man_rep_qual_eval}
Next we evaluate \toolname's efficiency by measuring how many programs it is
able to generate a (well-typed) repair for.
%
We limit the synthesizer to 90 seconds. (In general the procedure is
undecidable, and we conjecture that a longer timeout will diminish the practical
usability for novices.)
%
Recall that the repair synthesis algorithm is guided by the repair template
predictions.
%
We evaluate the efficiency of \toolname by comparing it against a baseline
\naive implementation that, given the predicted fix location, attempts to
synthesize a repair from the trivial ``hole'' template.
\autoref{fig:rite_naive} shows the cumulative distribution function of
\toolname's and \naive's repair rates over their synthesis time. We observe that
using the predicted templates for synthesis allows \toolname to generate
type-correct repairs for almost 70\% of the programs in under 20 seconds, which
is nearly 12 points higher than the \naive baseline. We also observe that
\toolname successfully repairs around 10\% more programs than \naive for times
greater than 20 seconds. While the \naive approach is still able to synthesize
well-typed repairs relatively quickly, we will see that these repairs are of
much lower quality than those generated from the predicted templates
(\S~\ref{sec:eval:template_quality}).
\begin{framed}
\noindent \toolname can generate type-correct repairs for the vast majority of
ill-typed programs in under 20 seconds.
\end{framed}
% The 11.85\% of the programs that fail to be repaired within that amount of time
% fall in the case of the combined failure of our predictive models to give high
% confidence scores to the correct locations and templates, thus making synthesis
% very expensive.
% \begin{table}
% \centering
% \begin{tabular}{l|ccc}
% Classifier & Completed & Repair Rate & Time (sec) \\
% \hline
% \naive & 77.86\% & 74.78\% & 11.72 \\
% \toolname & 88.15\% & 84.80\% & 8.81 \\
% \end{tabular}
% \caption{Experimental results of \toolname's synthesis.}
% \label{tab:rite_naive}
% \end{table}
\begin{figure}
\centering
\includegraphics[height=2.3in]{cdf.pdf}
\caption{The proportion of the test set that can be repaired within a given time.}
\label{fig:rite_naive}
\end{figure}
\subsection{RQ3: Usefulness}
\label{sec:eval:useful}
The primary outcome is whether the repair-based
error messages generated by \toolname were actually useful to novices.
%
To assess the quality of \toolname's repairs, we conducted an online human
study with 29 participants.
%
Each participant was asked to evaluate the quality of the program fixes
and their locations against a state-of-the-art baseline
(\seminal ~\citep{Lerner2007-dt}).
%
For each program, beyond the two repairs, participants were presented
with the original ill-typed program, along with the standard \ocaml
compiler's error message and a short description of what the original
author of the program intended it to do.
%
From this study, we found that both the edit locations and final
repairs produced by \toolname were better than
\seminal's in a statistically significant manner.
\mypara{User Study Setup}
%
Study participants were recruited from two public research institutes
(University of California, San Diego and University of Michigan), and
from advertisement on Twitter.
%
Participants had to assess the quality of, and give comprehensible
bug descriptions for, at least 5 / 10 stimuli. The study took around
25 minutes to complete. Participants were compensated by entering a
drawing for an Amazon Echo voice assistant. There were 29 valid participants.
%
We created the stimuli by randomly selecting a corpus of 21 buggy programs
from the 1834 programs in our dataset where repairs were synthesized.
%
From this corpus, each participant was shown 10 randomly-selected buggy
programs, and two candidate repairs: one generated by \toolname and one
by \seminal.
%
For both algorithms, we used the highest-ranked solution returned.
%
Participant were always unaware which tool generated which candidate
patch.
%
Participants were then asked to assess the quality of each
candidate repair on a Likert scale of 1 to 5 and were asked
for a binary assessment of the quality of each repair's edit
location.
%
We also collected self-reported estimates of both programming and
\ocaml-specific experience as well as qualitative data assessing factors
influencing each participant's subjective judgment of repair quality.
%
From the 29 participants, we collected 554 patch quality assessments,
277 each for \toolname and \seminal generated repairs.
%\begin{figure}
% \includegraphics[width=8cm]{SampleStimuli.png}
% \caption{A sample stimulus used for assessing repair quality.}
% \label{fig:stimulus}
%\end{figure}
\mypara{Results}
%
In a statistically-significant manner, humans perceive that
\toolname's fault localization and final repairs are both
of higher quality than those produced by \seminal ($p=0.030$
and $p=0.024$ respectively).\footnote{All tests for statistical
significance used the Wilcoxon signed-rank test.}
%
Regarding fault localization, we find that humans agreed
with \toolname-identified edit locations 81.6\% of the time
but only agreed with those of \seminal 74.0\% of the time.
%
% This 10\% increase is important because \ME{You should explain why this matters.}
%
As for the final repair, humans also preferred \toolname's patches
to those produced by \seminal. Specifically, \toolname's repairs
achieved an average quality rating of 2.41/5 while \seminal's
repairs had an average rating of only 2.11/5, a 14\% increase ($p=0.030$),
showing a statistically-significant improvement over \seminal.
\begin{figure*}
\begin{subfigure}[t]{.33\textwidth}
\centering
\includegraphics[height=1.4in]{comp1.png}
\caption{\toolname (4.5/5) better than \seminal(1.1/5) with 12 responses $p=0.002$.}
\label{subfig:good1}
\end{subfigure}
\begin{subfigure}[t]{.33\textwidth}
\centering
\includegraphics[height=1.4in]{comp2.png}
\caption{\toolname (1.5/5) worse than \seminal(4.1/5) with 18 responses $p=0.0002$.}
\label{subfig:bad}
\end{subfigure}
\begin{subfigure}[t]{.29\textwidth}
\centering
\includegraphics[height=1.4in]{comp3.png}
\caption{\toolname (4.8/5) better than \seminal(1.2/5) with 17 responses $p=0.0003$.}
\label{subfig:good2}
\end{subfigure}
\caption{Three erroneous programs with the repairs that \toolname and \seminal generated for the \colorbox{pink}{red} error locations.}
\end{figure*}
\mypara{Qualitative Comparison}
%
We consider several case studies where there were
statistically-significant differences between
the human ratings for \toolname's and \seminal's
repairs.
%
The task in Figure~\ref{subfig:good1} is that
\texttt{wwhile(f, b)} should return $x$ where
there exist values $v_0,...,v_n$ such that:
$b = v_0$, $x = v_n$, and for each $i$ between
0 and $n-2$, we have $f v_i = (v_i+1, true)$
and $f v_{n-1} = (v_n, false)$.
%
The task in Figure~\ref{subfig:bad} is to
return a list of \texttt{n} copies of \texttt{x}.
%
The task in Figure~\ref{subfig:good2} is to
return the sum of the squares of the numbers
in the list \texttt{xs}.
%
Humans rated \toolname's repairs better
for the programs in Fig~\ref{subfig:good1}
and~\ref{subfig:good2}.
%
In both cases, \toolname's found a solution
which type-checks and conforms to the problem's
semantic specification.
%
\seminal, however, found a repair that was
either incomplete (\ref{subfig:good1}) or
semantically incorrect (\ref{subfig:good2}).
On the other hand, in ~\ref{subfig:bad}, \toolname
does worse as the \emph{second} parameter should
be \verb|n-1|. In fact, \toolname's second ranked
repair is the correct one, but it is equal
to the first in terms of edit distance.
\begin{framed}
\noindent Humans perceive both \toolname's edit locations
and final repair quality to be better than those produced
by \seminal, a state-of-the-art \ocaml repair tool, in a
statistically-significant manner.
\end{framed}
\subsection{RQ4: Impact of Templates on Quality}
\label{sec:eval:template_quality}
Finally, we seek to evaluate whether \toolname's template-guided
approach is really at the heart of its effectiveness. To do so,
as in \S~\ref{sec:eval:efficiency}, we
compared the results of using \toolname's error messages
synthesized from predicted templates to those generated by
a \naive synthesizer that returns the first well-typed term
(\ie synthesized from the trivial ``hole'' template).
\mypara{User Study Setup}
%
For this user study, we used a corpus of 20 buggy programs randomly chosen in
\S~\ref{sec:eval:useful}. For each of the programs we generated three messages:
using \toolname, using \seminal, and using the \naive approach but at the
\emph{same location} predicted by \toolname. We then randomized and masked the
order in which the tools' messages were reported, and asked three experts
(authors of this paper who had not seen the output of any tool for any of those
instances) to rate the messages as one of ``Good'', ``Ok'' or ``Bad''.
\mypara{Results}
%
Figure~\ref{fig:comparison} summarizes the results of the rating.
%
Since each of 20 programs received 3 ratings, there are a
total of 60 ratings per tool.
%
\toolname dominates with 22 Good, 20 Ok and 18 Bad ratings;
\seminal follows with only 12 Good, 11 Ok and 37 Bad; while
\naive received no Good scores, 12 Ok scores and a
dismal 48 Bad scores.
%
On average (with Bad = 0, Ok = 0.5, Good = 1),
\toolname scored 0.53, \seminal 0.30, and \naive
just 0.1.
%
Our rating agreement kappa is 0.54, which is considered ``moderate agreement''.
\begin{framed}
\noindent Repairs generated from predicted
templates were of significantly higher quality
than those from expert-biased enumeration (\seminal)
or \naive enumeration.
\end{framed}
\begin{figure}[t]
\centering
\includegraphics[height=1.5in]{comparison.png}
\caption{Rating the errors generated by \toolname, \seminal and \naive enumeration.}
\label{fig:comparison}
\end{figure}
|
{"hexsha": "b68e6773d0bfcc076b2b20340e82fe1c7ea683ba", "size": 18314, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "paper/pldi20-cameraready/evaluation.tex", "max_stars_repo_name": "gsakkas/rite", "max_stars_repo_head_hexsha": "958a0ad2460e15734447bc07bd181f5d35956d3b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-05-10T13:06:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-15T19:23:32.000Z", "max_issues_repo_path": "paper/pldi20-cameraready/evaluation.tex", "max_issues_repo_name": "gsakkas/rite", "max_issues_repo_head_hexsha": "958a0ad2460e15734447bc07bd181f5d35956d3b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:51:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:36:31.000Z", "max_forks_repo_path": "paper/pldi20-cameraready/evaluation.tex", "max_forks_repo_name": "gsakkas/rite", "max_forks_repo_head_hexsha": "958a0ad2460e15734447bc07bd181f5d35956d3b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-19T12:24:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-18T11:44:40.000Z", "avg_line_length": 42.198156682, "max_line_length": 135, "alphanum_fraction": 0.7691383641, "num_tokens": 4811}
|
from __future__ import absolute_import
# EMAcs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
from ...api import write_data, slice_generator
from .. import generators as gen
from nose.tools import assert_equal, assert_raises
from numpy.testing import assert_almost_equal, assert_array_equal
shape = (10,20,30)
DATA = np.zeros(shape)
DATA2 = np.ones(shape)
shape = (3,5,4)
DATA3 = np.zeros(shape)
def test_read_slices():
for _, d in slice_generator(DATA):
assert_equal(d.shape, (20, 30))
for _, d in slice_generator(DATA, axis=1):
assert_equal(d.shape, (10, 30))
for _, d in slice_generator(DATA, axis=2):
assert_equal(d.shape, (10, 20))
def test_write_slices():
tmp = np.zeros(DATA.shape)
write_data(tmp, slice_generator(DATA))
assert_almost_equal(tmp, np.asarray(DATA))
tmp = np.zeros(DATA.shape)
write_data(tmp, slice_generator(DATA, axis=1))
assert_almost_equal(tmp, np.asarray(DATA))
tmp = np.zeros(DATA.shape)
write_data(tmp, slice_generator(DATA, axis=2))
assert_almost_equal(tmp, np.asarray(DATA))
def test_multi_slice():
for _, d in slice_generator(DATA, axis=[0, 1]):
assert_equal(d.shape, (30,))
for _, d in slice_generator(DATA, axis=[2, 1]):
assert_equal(d.shape, (10,))
def test_multi_slice_write():
a = np.zeros(DATA.shape)
write_data(a, slice_generator(DATA, axis=[0, 1]))
assert_almost_equal(a, np.asarray(DATA))
def test_parcel():
parcelmap = np.zeros(DATA3.shape)
parcelmap[0,0,0] = 1
parcelmap[1,1,1] = 1
parcelmap[2,2,2] = 1
parcelmap[1,2,1] = 2
parcelmap[2,3,2] = 2
parcelmap[0,1,0] = 2
parcelseq = (0, 1, 2, 3)
expected = [np.product(DATA3.shape) - 6, 3, 3, 0]
iterator = gen.data_generator(DATA3,
gen.parcels(parcelmap, labels=parcelseq))
for i, pair in enumerate(iterator):
s, d = pair
assert_equal((expected[i],), d.shape)
iterator = gen.data_generator(DATA3, gen.parcels(parcelmap))
for i, pair in enumerate(iterator):
s, d = pair
assert_equal((expected[i],), d.shape)
def test_parcel_exclude():
# Test excluding from parcels
data = np.arange(5)
ps = gen.parcels(data, (1, 3))
assert_array_equal(next(ps), [False, True, False, False, False])
assert_array_equal(next(ps), [False, False, False, True, False])
assert_raises(StopIteration, next, ps)
ps = gen.parcels(data, (1, 3), exclude=(1,))
assert_array_equal(next(ps), [False, False, False, True, False])
assert_raises(StopIteration, next, ps)
ps = gen.parcels(data, (1, 3), exclude=(3,))
assert_array_equal(next(ps), [False, True, False, False, False])
assert_raises(StopIteration, next, ps)
ps = gen.parcels(data, (1, 3), exclude=(3, 1))
assert_raises(StopIteration, next, ps)
# Test that two element exclude works
ps = gen.parcels(data, (1, 3, 4), exclude=(1, 4))
assert_array_equal(next(ps), [False, False, False, True, False])
assert_raises(StopIteration, next, ps)
# Also as np.array
ps = gen.parcels(data, (1, 3, 4), exclude=np.array((1, 4)))
assert_array_equal(next(ps), [False, False, False, True, False])
assert_raises(StopIteration, next, ps)
# Test that parcels continue to be returned in sorted order
rng = np.random.RandomState(42)
data = rng.normal(size=(10,))
uni = np.sort(np.unique(data)) # Should already be sorted in fact
values = [np.mean(data[p]) # should be scalar anyway
for p in gen.parcels(data, exclude=uni[0:2])]
assert_array_equal(values, uni[2:])
def test_parcel_write():
parcelmap = np.zeros(DATA3.shape)
parcelmap[0,0,0] = 1
parcelmap[1,1,1] = 1
parcelmap[2,2,2] = 1
parcelmap[1,2,1] = 2
parcelmap[2,3,2] = 2
parcelmap[0,1,0] = 2
parcelseq = (0, 1, 2, 3)
expected = [np.product(DATA3.shape) - 6, 3, 3, 0]
iterator = gen.parcels(parcelmap, labels=parcelseq)
for i, s in enumerate(iterator):
value = np.arange(expected[i])
DATA3[s] = value
iterator = gen.parcels(parcelmap, labels=parcelseq)
for i, pair in enumerate(gen.data_generator(DATA3, iterator)):
s, d = pair
assert_equal((expected[i],), d.shape)
assert_array_equal(d, np.arange(expected[i]))
iterator = gen.parcels(parcelmap)
for i, s in enumerate(iterator):
value = np.arange(expected[i])
DATA3[s] = value
iterator = gen.parcels(parcelmap)
for i, pair in enumerate(gen.data_generator(DATA3, iterator)):
s, d = pair
assert_equal((expected[i],), d.shape)
assert_array_equal(d, np.arange(expected[i]))
def test_parcel_copy():
parcelmap = np.zeros(DATA3.shape)
parcelmap[0,0,0] = 1
parcelmap[1,1,1] = 1
parcelmap[2,2,2] = 1
parcelmap[1,2,1] = 2
parcelmap[2,3,2] = 2
parcelmap[0,1,0] = 2
parcelseq = (0, 1, 2, 3)
expected = [np.product(DATA3.shape) - 6, 3, 3, 0]
tmp = DATA3.copy()
gen_parcels = gen.parcels(parcelmap, labels=parcelseq)
new_iterator = gen.data_generator(tmp, gen_parcels)
for i, slice_ in enumerate(new_iterator):
assert_equal((expected[i],), slice_[1].shape)
def test_sliceparcel():
parcelmap = np.asarray([[0,0,0,1,2],[0,0,1,1,2],[0,0,0,0,2]])
parcelseq = ((1, 2), 0, 2)
o = np.zeros(parcelmap.shape)
iterator = gen.slice_parcels(parcelmap, labels=parcelseq)
for i, pair in enumerate(iterator):
a, s = pair
o[a][s] = i
assert_array_equal(o,
np.array([[1,1,1,0,2],
[4,4,3,3,5],
[7,7,7,7,8]]))
|
{"hexsha": "b14b208427583a8763fd20fd7812510a83f18a63", "size": 5761, "ext": "py", "lang": "Python", "max_stars_repo_path": "nipy/core/utils/tests/test_generators.py", "max_stars_repo_name": "alexis-roche/nipy", "max_stars_repo_head_hexsha": "b765f258621c886538b77115128511cdfd4600fe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nipy/core/utils/tests/test_generators.py", "max_issues_repo_name": "alexis-roche/nipy", "max_issues_repo_head_hexsha": "b765f258621c886538b77115128511cdfd4600fe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nipy/core/utils/tests/test_generators.py", "max_forks_repo_name": "alexis-roche/nipy", "max_forks_repo_head_hexsha": "b765f258621c886538b77115128511cdfd4600fe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-19T17:26:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-19T17:26:43.000Z", "avg_line_length": 34.9151515152, "max_line_length": 75, "alphanum_fraction": 0.6294046173, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1758}
|
module Data.BitVector.Peano where
open import Data.BitVector
open import Algebra.FunctionProperties.Core
open import Data.Nat hiding (pred) renaming (suc to Nsuc; zero to Nzero)
open import Data.Vec hiding (fromList)
open import Relation.Binary.PropositionalEquality
open import Data.Digit hiding (Bit)
open import Data.Fin using () renaming (zero to Fzero; suc to Fsuc)
open import Data.List
open import Data.Product
suc : ∀ {n} → Op₁ (BitVector n)
suc [] = []
suc (0# ∷ xs) = 1# ∷ xs
suc (1# ∷ xs) = 0# ∷ suc xs
pred-helper : ∀ {n} → BitVector n → BitVector (Nsuc n)
pred-helper [] = 1# ∷ []
pred-helper (0# ∷ xs) = 1# ∷ pred-helper xs
pred-helper (1# ∷ xs) = 1# ∷ 0# ∷ xs
pred : ∀ {n} → Op₁ (BitVector n)
pred [] = []
pred (1# ∷ xs) = 0# ∷ xs
pred (0# ∷ xs) = pred-helper xs
suc∘pred-helper≡0# : ∀ {n} (x : BitVector n) → suc (pred-helper x) ≡ 0# ∷ x
suc∘pred-helper≡0# [] = refl
suc∘pred-helper≡0# (0# ∷ xs) = cong (_∷_ 0#) (suc∘pred-helper≡0# xs)
suc∘pred-helper≡0# (1# ∷ xs) = refl
pred-helper∘suc≡1# : ∀ {n} (x : BitVector n) → pred-helper (suc x) ≡ 1# ∷ x
pred-helper∘suc≡1# [] = refl
pred-helper∘suc≡1# (0# ∷ xs) = refl
pred-helper∘suc≡1# (1# ∷ xs) = cong (_∷_ 1#) (pred-helper∘suc≡1# xs)
suc∘pred≡id : ∀ {n} (x : BitVector n) → suc (pred x) ≡ x
suc∘pred≡id [] = refl
suc∘pred≡id (0# ∷ xs) = suc∘pred-helper≡0# xs
suc∘pred≡id (1# ∷ xs) = refl
pred∘suc≡id : ∀ {n} (x : BitVector n) → pred (suc x) ≡ x
pred∘suc≡id [] = refl
pred∘suc≡id (0# ∷ xs) = refl
pred∘suc≡id (1# ∷ xs) = pred-helper∘suc≡1# xs
data Peano : ∀ {n} → BitVector n → Set where
Pzero : ∀ {n} → Peano (zero n)
Psuc : ∀ {n} {x : BitVector n} → (p : Peano x) → Peano (suc x)
Pdouble : ∀ {n} {x : BitVector n} → Peano x → Peano (0# ∷ x)
Pdouble Pzero = Pzero
Pdouble (Psuc p) = Psuc (Psuc (Pdouble p))
toPeano : ∀ {n} (x : BitVector n) → Peano x
toPeano [] = Pzero
toPeano (0# ∷ xs) = Pdouble (toPeano xs)
toPeano (1# ∷ xs) = Psuc (Pdouble (toPeano xs))
peanoInduction : ∀ {n} → (P : ∀ {x : BitVector n} → Peano x → Set) → P Pzero
→ (∀ {x : BitVector n} {m : Peano x} → P m → P (Psuc m))
→ ∀ {x : BitVector n} (q : Peano x) → P q
peanoInduction P z s Pzero = z
peanoInduction P z s (Psuc p) = s {_} {p} (peanoInduction P z s p)
induction : ∀ {n} (P : BitVector n → Set) → P (zero n)
→ (∀ {m} → P m → P (suc m)) → ∀ x → P x
induction P z s x = peanoInduction (λ {x} _ → P x) z s (toPeano x)
toℕ : ∀ {n} → BitVector n → ℕ
toℕ = induction _ 0 Nsuc
fromDigit : Digit 2 → Bit
fromDigit Fzero = 0#
fromDigit (Fsuc Fzero) = 1#
fromDigit (Fsuc (Fsuc ()))
fromList : ∀ {n} → List (Digit 2) → BitVector n
fromList [] = zero _
fromList {Nzero} (x ∷ xs) = []
fromList {Nsuc n} (x ∷ xs) = fromDigit x ∷ fromList xs
fromℕ : ∀ {n} → ℕ → BitVector n
fromℕ n with toDigits 2 n
fromℕ .(fromDigits ds) | ds , refl = fromList ds
-- TODO: the terrifying proofs that toℕ and fromℕ are inverses
|
{"hexsha": "395c602a58c5147e2cac949a5c0d4eacfa9d956d", "size": 2907, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Data/BitVector/Peano.agda", "max_stars_repo_name": "copumpkin/bitvector", "max_stars_repo_head_hexsha": "6902f4bce0330f1b58f48395dac4406056713687", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-01-04T07:19:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T01:41:07.000Z", "max_issues_repo_path": "Data/BitVector/Peano.agda", "max_issues_repo_name": "copumpkin/bitvector", "max_issues_repo_head_hexsha": "6902f4bce0330f1b58f48395dac4406056713687", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-05-25T02:00:59.000Z", "max_issues_repo_issues_event_max_datetime": "2016-05-25T02:00:59.000Z", "max_forks_repo_path": "Data/BitVector/Peano.agda", "max_forks_repo_name": "copumpkin/bitvector", "max_forks_repo_head_hexsha": "6902f4bce0330f1b58f48395dac4406056713687", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-05-25T00:15:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T01:40:57.000Z", "avg_line_length": 31.597826087, "max_line_length": 76, "alphanum_fraction": 0.5964912281, "num_tokens": 1256}
|
#!/usr/bin/python
#
# Convert to COCO-style panoptic segmentation format (http://cocodataset.org/#format-data).
#
# python imports
from __future__ import print_function, absolute_import, division, unicode_literals
import os
import glob
import sys
import argparse
import json
import numpy as np
# Image processing
from PIL import Image
EVAL_LABELS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
EVAL_LABEL_NAMES = ["wall", "floor", "cabinet", "bed", "chair", "sofa", "table", "door", "window", "bookshelf", "picture", "counter", "desk", "curtain", "refrigerator", "shower curtain", "toilet", "sink", "bathtub", "otherfurniture"]
EVAL_LABEL_CATS = ["indoor", "indoor", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "furniture", "appliance", "furniture", "furniture", "appliance", "furniture", "furniture"]
EVAL_LABEL_COLORS = [(174, 199, 232), (152, 223, 138), (31, 119, 180), (255, 187, 120), (188, 189, 34), (140, 86, 75), (255, 152, 150), (214, 39, 40), (197, 176, 213), (148, 103, 189), (196, 156, 148), (23, 190, 207), (247, 182, 210), (219, 219, 141), (255, 127, 14), (158, 218, 229), (44, 160, 44), (112, 128, 144), (227, 119, 194), (82, 84, 163)]
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
# The main method
def convert2panoptic(scannetPath, outputFolder=None):
if outputFolder is None:
outputFolder = scannetPath
# find files
search = os.path.join(scannetPath, "*", "instance", "*.png")
files = glob.glob(search)
files.sort()
# quit if we did not find anything
if not files:
print(
"Did not find any files for using matching pattern {}. Please consult the README.".format(search)
)
sys.exit(-1)
# a bit verbose
print("Converting {} annotation files.".format(len(files)))
outputBaseFile = "scannet_panoptic"
outFile = os.path.join(outputFolder, "{}.json".format(outputBaseFile))
print("Json file with the annotations in panoptic format will be saved in {}".format(outFile))
panopticFolder = os.path.join(outputFolder, outputBaseFile)
if not os.path.isdir(panopticFolder):
print("Creating folder {} for panoptic segmentation PNGs".format(panopticFolder))
os.mkdir(panopticFolder)
print("Corresponding segmentations in .png format will be saved in {}".format(panopticFolder))
categories = []
for idx in range(len(EVAL_LABELS)):
label = EVAL_LABELS[idx]
name = EVAL_LABEL_NAMES[idx]
cat = EVAL_LABEL_CATS[idx]
color = EVAL_LABEL_COLORS[idx]
isthing = label > 2
categories.append({'id': int(label),
'name': name,
'color': color,
'supercategory': cat,
'isthing': isthing})
images = []
annotations = []
for progress, f in enumerate(files):
originalFormat = np.array(Image.open(f))
parts = splitall(f)
fileName = parts[-1]
sceneName = parts[-3]
outputFileName = "{}__{}".format(sceneName, fileName)
inputFileName = os.path.join(sceneName, "color", fileName)
imageId = os.path.splitext(outputFileName)[0]
# image entry, id for image is its filename without extension
images.append({"id": imageId,
"width": int(originalFormat.shape[1]),
"height": int(originalFormat.shape[0]),
"file_name": outputFileName})
#"file_name": inputFileName})
pan_format = np.zeros(
(originalFormat.shape[0], originalFormat.shape[1], 3), dtype=np.uint8
)
segmentIds = np.unique(originalFormat)
segmInfo = []
for segmentId in segmentIds:
isCrowd = 0
if segmentId < 1000:
semanticId = segmentId
else:
semanticId = segmentId // 1000
if semanticId not in EVAL_LABELS:
continue
mask = originalFormat == segmentId
color = [segmentId % 256, segmentId // 256, segmentId // 256 // 256]
pan_format[mask] = color
area = np.sum(mask) # segment area computation
# bbox computation for a segment
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = hor_idx[-1] - x + 1
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = vert_idx[-1] - y + 1
bbox = [int(x), int(y), int(width), int(height)]
segmInfo.append({"id": int(segmentId),
"category_id": int(semanticId),
"area": int(area),
"bbox": bbox,
"iscrowd": isCrowd})
annotations.append({'image_id': imageId,
'file_name': outputFileName,
"segments_info": segmInfo})
Image.fromarray(pan_format).save(os.path.join(panopticFolder, outputFileName))
print("\rProgress: {:>3.2f} %".format((progress + 1) * 100 / len(files)), end=' ')
sys.stdout.flush()
print("\nSaving the json file {}".format(outFile))
d = {'images': images,
'annotations': annotations,
'categories': categories}
with open(outFile, 'w') as f:
json.dump(d, f, sort_keys=True, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-folder",
dest="scannetPath",
help="path to the ScanNet data 'scannet_frames_25k' folder",
required=True,
type=str)
parser.add_argument("--output-folder",
dest="outputFolder",
help="path to the output folder.",
default=None,
type=str)
args = parser.parse_args()
convert2panoptic(args.scannetPath, args.outputFolder)
# call the main
if __name__ == "__main__":
main()
|
{"hexsha": "648561ff4edc4db1c644515ed643f5d862a173dc", "size": 6672, "ext": "py", "lang": "Python", "max_stars_repo_path": "BenchmarkScripts/convert2panoptic.py", "max_stars_repo_name": "Skywalker666666/scannet_dataset_prep", "max_stars_repo_head_hexsha": "0cda8c360512eda8c2ade892c5f23ed21320cc69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BenchmarkScripts/convert2panoptic.py", "max_issues_repo_name": "Skywalker666666/scannet_dataset_prep", "max_issues_repo_head_hexsha": "0cda8c360512eda8c2ade892c5f23ed21320cc69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BenchmarkScripts/convert2panoptic.py", "max_forks_repo_name": "Skywalker666666/scannet_dataset_prep", "max_forks_repo_head_hexsha": "0cda8c360512eda8c2ade892c5f23ed21320cc69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5664739884, "max_line_length": 348, "alphanum_fraction": 0.5614508393, "include": true, "reason": "import numpy", "num_tokens": 1693}
|
# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
'''
Uses numerical integration to calculate accurate values to test against.
This should only be run after `python setup.py build_ext --inplace`.
'''
import os
import sys
import fdint
tests_dir = os.path.join(os.path.dirname(__file__), '../fdint/tests/')
import numpy
phi = numpy.array([-50,-3,-2,-1,0,1,4,5,7,10,15,20,30,40,50], dtype=float)
def write_header(f, modname, dependencies=''):
f.write("""# Copyright (c) 2015, Scott J Maddox. All rights reserved.
# Use of this source code is governed by the BSD-3-Clause
# license that can be found in the LICENSE file.
# This file was generated by `scripts/gen_test_ifd.py`.
# Do not edit this file directly, or your changes will be lost.
'''
Tests the `{modname}` module.
'''
# Make sure we import the local package
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from fdint import {modname}
import unittest
import numpy
import warnings
""".format(modname=modname))
f.write(dependencies)
f.write('\n')
f.write('class Test_{modname}(unittest.TestCase):\n'
''.format(modname=modname.upper()))
f.write('''
def assertRTOL(self, a, b, RTOL):
assert RTOL >= 0
rerr = abs(a-b)/a
if rerr > RTOL:
self.fail('Outside of relative tolerance of {}: {}'
''.format(RTOL, rerr))
''')
f.write('''
def assert_all_rtol(self, a, b, rtol):
assert rtol >= 0
a = numpy.array(a)
b = numpy.array(b)
rtol = numpy.array(rtol)
rerr = abs(a-b)/a
if (rerr > rtol).all():
self.fail('Outside of relative tolerance of {}: {}'
''.format(rtol, rerr))
''')
#################
# Test ifd module
modname='ifd'
fpath = os.path.join(tests_dir, 'test_{modname}.py'.format(modname=modname))
with open(fpath, 'w') as f:
mod = getattr(fdint, modname)
write_header(f, modname, 'from fdint import fd\n')
# scalar
i = 0
for x in phi:
i += 1
# scalar
f.write('\n')
f.write(' def test_ifd1h_{i}(self):\n'.format(i=i))
f.write(' '
'self.assertRTOL({ifname}({fname}({phi})), {phi}, 1e-14)\n'
''.format(ifname='ifd.ifd1h',fname='fd.fd1h',phi=x))
# vector
f.write('\n')
f.write(' def test_vifd1h_{i}(self):\n'.format(i=i))
f.write(' '
'self.assert_all_rtol({ifname}({fname}(numpy.array(({phi},)))),\n'
' '
' '
'({phi},), (1e-14,))\n'
''.format(ifname='ifd.ifd1h',fname='fd.fd1h',phi=x))
f.write('\n')
f.write('if __name__ == "__main__":\n')
f.write(' unittest.main()')
|
{"hexsha": "9654c22e4ec6f149fb26d94966c274497b2bb5d6", "size": 2953, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/gen_test_ifd.py", "max_stars_repo_name": "jgukelberger/fdint", "max_stars_repo_head_hexsha": "0237323d6fd5d4161190ff7982811d8ae290f89e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2015-10-25T18:51:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-26T13:05:07.000Z", "max_issues_repo_path": "scripts/gen_test_ifd.py", "max_issues_repo_name": "jgukelberger/fdint", "max_issues_repo_head_hexsha": "0237323d6fd5d4161190ff7982811d8ae290f89e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2015-04-23T19:41:20.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-01T02:04:04.000Z", "max_forks_repo_path": "scripts/gen_test_ifd.py", "max_forks_repo_name": "jgukelberger/fdint", "max_forks_repo_head_hexsha": "0237323d6fd5d4161190ff7982811d8ae290f89e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-05-31T07:27:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-28T15:34:09.000Z", "avg_line_length": 31.0842105263, "max_line_length": 82, "alphanum_fraction": 0.5719607179, "include": true, "reason": "import numpy", "num_tokens": 829}
|
function iclust = initialize_clusters(Ucell, Nk, type, Lx, Ly)
switch type
case 'random'
vs = randn(size(Ucell,1), Nk);
vs = bsxfun(@rdivide, vs, sum(vs.^2,1).^.5 + 1e-8);% normalize activity vectors
vs = single(vs);
xs = vs' * Ucell;
[~, iclust] = max(xs,[],1);
case 'Voronoi'
xs = repmat(1:Lx, Ly, 1);
ys = repmat((1:Ly)', 1, Lx);
randx = rand(1, Nk) * Lx;
randy = rand(1, Nk) * Ly;
dx = repmat(xs(:), 1, Nk) - repmat(randx, numel(xs(:)), 1);
dy = repmat(ys(:), 1, Nk) - repmat(randy, numel(ys(:)), 1);
dxy = dx.^2 + dy.^2;
[~, iclust] = min(dxy, [], 2);
case 'squares'
nsqrt = round(sqrt(Nk));
xs = repmat(round(linspace(1, nsqrt, Lx)), Ly, 1);
ys = repmat(round(linspace(1, nsqrt, Ly))', 1, Lx);
iclust = xs + (ys-1) * nsqrt;
end
iclust = iclust(:)';
|
{"author": "cortex-lab", "repo": "Suite2P", "sha": "c6a8ea9f01ffc8555429978e7fe97f843ad5b6d5", "save_path": "github-repos/MATLAB/cortex-lab-Suite2P", "path": "github-repos/MATLAB/cortex-lab-Suite2P/Suite2P-c6a8ea9f01ffc8555429978e7fe97f843ad5b6d5/cellDetection/initialize_clusters.m"}
|
#!/usr/bin/env python3
"""
A script for preprocessing data from the lingspam corpus data set and save it as
a numpy data files. The dataset can be downloaded from
http://www.aueb.gr/users/ion/data/lingspam_public.tar.gz
Usage:
Assuming the data set was downloaded and exctracted to the script's directory
location, you can invoke it as
$ ./preprocess.py lingspam_public/lemm_stop/*/*
This parses all files inside the lemm_stop folder of the data set.
"""
__author__ = "Sérgio Agostinho"
__email__ = "sergio(dot)r(dot)agostinho(at)gmail.com"
import sys
import re
import os.path
import numpy as np
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
# Mimicking the static variable functionality from C
# p1 replace connections
# p2:
# - filter everything with less than 3 chars mid string
# - filter everything with less than 3 chars string start
# - filter everything non alfanumeric end of string
# p3 remove duplicate spaces
# p4 All numbers are treated the same
@static_vars(p1=re.compile(r"-"),\
p2=re.compile(r" ([^ ]{1,2} )+|^([^ ]{1,2} )+|[^a-zA-Z0-9]+?$"),\
p3=re.compile(r" {2,}"),\
p4=re.compile(r"\b[\d]+\b"))
def filter_email(content: str):
return filter_email.p4.sub("#",\
filter_email.p3.sub(" ",\
filter_email.p2.sub(" ",\
filter_email.p1.sub(" ", content)))).strip()
# Storage Holders
target_names = ["ham", "spam"]
targets = []
file_names = []
words = []
# script accepts path wildcards
pr = re.compile("spmsg")
for i, path in enumerate(sys.argv[1:]):
# Retrieve and store file name
file_name = os.path.basename(path)
file_names.append(file_name)
# Check if spam
targets.append(bool(pr.match(file_name)))
# Store list with email words
content = str(open(path).read()).splitlines()[2]
email_words = filter_email(content).split()
words.append(email_words)
# Create a counter object from the words
sys.stdout.write("\r%.1f%%" % (i * 100/ (len(sys.argv) - 1)))
sys.stdout.flush()
print("\nTargets: ", len(targets), " ", targets[0])
print("FileNames: ", len(file_names), " ", file_names[0])
print("Words: ", len(words), " ", words[0][0:10])
np.savez("lingspam",\
file_names=np.array(file_names, dtype=str),\
target_names=np.array(target_names, dtype=str),\
targets=np.array(targets, dtype=bool),\
words=np.array(words, dtype=list))
|
{"hexsha": "1f164f6d8befc6434ee6024aeedb6047df64df4d", "size": 2413, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/lingspam/preprocess.py", "max_stars_repo_name": "SergioRAgostinho/bootstrap-ml", "max_stars_repo_head_hexsha": "1f96c58ee09a8a7fcb61e5f1017c9dea74c31805", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-03-22T10:54:52.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-16T09:55:42.000Z", "max_issues_repo_path": "data/lingspam/preprocess.py", "max_issues_repo_name": "SergioRAgostinho/bootstrap-ml", "max_issues_repo_head_hexsha": "1f96c58ee09a8a7fcb61e5f1017c9dea74c31805", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-03-22T20:24:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-26T09:07:32.000Z", "max_forks_repo_path": "data/lingspam/preprocess.py", "max_forks_repo_name": "SergioRAgostinho/bootstrap-ml", "max_forks_repo_head_hexsha": "1f96c58ee09a8a7fcb61e5f1017c9dea74c31805", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8111111111, "max_line_length": 80, "alphanum_fraction": 0.688769167, "include": true, "reason": "import numpy", "num_tokens": 664}
|
from os.path import dirname, join
import numpy as np
import obspy
import pytest
from pyfk import mpi_info
from pyfk.config.config import Config, SeisModel, SourceModel
from pyfk.gf.gf import calculate_gf
class TestFunctioncalculateGf_MPI(object):
@pytest.mark.mpi
def test_mpi_info(self):
assert mpi_info().startswith("MPI installed correctly")
@pytest.mark.mpi
def test_val_correct(self):
# test if the result of using only a single core equal to the mpirun result
# * the same as test_gf
# * perl fk.pl -Mhk/15/k -N512/0.1 10 20 30
model_path = join(dirname(__file__), f"../data/hk")
model_data = np.loadtxt(model_path)
model_hk = SeisModel(model=model_data, use_kappa=True)
source_hk = SourceModel(sdep=15)
config_hk = Config(
model=model_hk,
source=source_hk,
npt=512,
dt=0.1,
receiver_distance=[
10,
20,
30])
result = calculate_gf(config_hk)
# * for all the gf in data/hk_gf, test if the results are close (in FK, it uses float but we are using double)
for irec, each_rec in enumerate([10, 20, 30]):
for icomn in range(9):
hk_gf_data = obspy.read(
join(
dirname(__file__),
f"../data/hk_gf/{each_rec}.grn.{icomn}"))[0]
coef = np.corrcoef(
hk_gf_data.data,
result[irec][icomn].data,
)[0, 1]
if np.isnan(coef):
coef = 1.0
assert coef > 0.99
|
{"hexsha": "d8cedcd6329e0ba5d0a545f483143390d45a1601", "size": 1688, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyfk/tests/gf/test_gf_mpi.py", "max_stars_repo_name": "ziyixi/pyfk", "max_stars_repo_head_hexsha": "2db56621cd4f9db5cf6a866fa0ca25fcb994b1d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2019-09-08T03:43:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T06:13:08.000Z", "max_issues_repo_path": "pyfk/tests/gf/test_gf_mpi.py", "max_issues_repo_name": "ziyixi/pyfk", "max_issues_repo_head_hexsha": "2db56621cd4f9db5cf6a866fa0ca25fcb994b1d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-12-16T01:52:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T14:04:27.000Z", "max_forks_repo_path": "pyfk/tests/gf/test_gf_mpi.py", "max_forks_repo_name": "ziyixi/pyfk", "max_forks_repo_head_hexsha": "2db56621cd4f9db5cf6a866fa0ca25fcb994b1d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-02-17T14:46:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T02:43:03.000Z", "avg_line_length": 33.76, "max_line_length": 118, "alphanum_fraction": 0.5539099526, "include": true, "reason": "import numpy", "num_tokens": 429}
|
#!/usr/bin/env python3
"""Main script for gaze direction inference from webcam feed."""
import argparse
import os
import queue
import threading
import time
from gazedb import GazeDB
import coloredlogs
import cv2 as cv
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
import keras
from keras import backend as K
from datasources import Video, Webcam
from models import ELG
import util.gaze
from keras import backend as K
if __name__ == '__main__':
# Initialise the obj
database = GazeDB()
# Set global log level
parser = argparse.ArgumentParser(description='Demonstration of landmarks localization.')
parser.add_argument('-v', type=str, help='logging level', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--from_video', type=str, help='Use this video path instead of webcam')
parser.add_argument('--record_video', type=str, help='Output path of video of demonstration.')
parser.add_argument('--fullscreen', action='store_true')
parser.add_argument('--headless', action='store_true')
parser.add_argument('--fps', type=int, default=60, help='Desired sampling rate of webcam')
parser.add_argument('--camera_id', type=int, default=0, help='ID of webcam to use')
args = parser.parse_args()
coloredlogs.install(
datefmt='%d/%m %H:%M',
fmt='%(asctime)s %(levelname)s %(message)s',
level=args.v.upper(),
)
# Check if GPU is available
# from tensorflow.python.client import device_lib
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
gpu_available = False
try:
gpus = [d for d in K.tensorflow_backend._get_available_gpus()
if d.device_type == 'GPU']
print("\t\t GPUS: [{}]".format(gpus))
gpu_available = len(gpus) > 0
except:
pass
# Initialize Tensorflow session
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session(config=session_config) as session:
# Declare some parameters
batch_size = 2
# Define webcam stream data source
# Change data_format='NHWC' if not using CUDA
if args.from_video:
assert os.path.isfile(args.from_video)
data_source = Video(args.from_video,
tensorflow_session=session, batch_size=batch_size,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(108, 180))
else:
data_source = Webcam(tensorflow_session=session, batch_size=batch_size,
camera_id=args.camera_id, fps=args.fps,
data_format='NCHW' if gpu_available else 'NHWC',
eye_image_shape=(36, 60))
# Define model
if args.from_video:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=3,
num_modules=3,
num_feature_maps=64,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
else:
model = ELG(
session, train_data={'videostream': data_source},
first_layer_stride=1,
num_modules=2,
num_feature_maps=32,
learning_schedule=[
{
'loss_terms_to_optimize': {'dummy': ['hourglass', 'radius']},
},
],
)
# Record output frames to file if requested
if args.record_video:
video_out = None
video_out_queue = queue.Queue()
video_out_should_stop = False
video_out_done = threading.Condition()
video_recorder = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'XVID'),
20, (1280, 720),
)
def _record_frame():
global video_out
last_frame_time = None
out_fps = 60
out_frame_interval = 1.0 / out_fps
while not video_out_should_stop:
frame_index = video_out_queue.get()
if frame_index is None:
break
# assert frame_index in data_source._frames
frame = data_source._frames[frame_index]['bgr']
h, w, _ = frame.shape
if video_out is None:
video_out = cv.VideoWriter(
args.record_video, cv.VideoWriter_fourcc(*'XVID'),
60, (w, h),
)
now_time = time.time()
if last_frame_time is not None:
time_diff = now_time - last_frame_time
while time_diff > 0.0:
# video_out.write(frame)
time_diff -= out_frame_interval
last_frame_time = now_time
video_out.release()
with video_out_done:
video_out_done.notify_all()
# record_thread = threading.Thread(target=_record_frame, name='record')
# record_thread.daemon = True
# record_thread.start()
# Begin visualization thread
inferred_stuff_queue = queue.Queue()
def _visualize_output():
last_frame_index = 0
last_frame_time = time.time()
fps_history = []
all_gaze_histories = []
if args.fullscreen:
cv.namedWindow('vis', cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty('vis', cv.WND_PROP_FULLSCREEN, cv.WINDOW_FULLSCREEN)
while True:
# If no output to visualize, show unannotated frame
if inferred_stuff_queue.empty():
next_frame_index = last_frame_index + 1
if next_frame_index in data_source._frames:
next_frame = data_source._frames[next_frame_index]
if 'faces' in next_frame and len(next_frame['faces']) == 0:
if not args.headless:
resized_img = cv.resize(next_frame['bgr'], (1280, 720))
cv.imshow('vis', resized_img)
video_recorder.write(resized_img)
# cv.imshow('vis', flipped_bgr)
if args.record_video:
video_out_queue.put_nowait(next_frame_index)
last_frame_index = next_frame_index
if cv.waitKey(1) & 0xFF == ord('q'):
return
continue
# Get output from neural network and visualize
output = inferred_stuff_queue.get()
bgr = None
line_lengths = []
look_flag = False
for j in range(batch_size):
print("Batch Size, J: ", batch_size, j)
frame_index = output['frame_index'][j]
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
# Decide which landmarks are usable
heatmaps_amax = np.amax(output['heatmaps'][j, :].reshape(-1, 18), axis=0)
can_use_eye = np.all(heatmaps_amax > 0.7)
can_use_eyelid = np.all(heatmaps_amax[0:8] > 0.75)
can_use_iris = np.all(heatmaps_amax[8:16] > 0.8)
start_time = time.time()
eye_index = output['eye_index'][j]
bgr = frame['bgr']
eye = frame['eyes'][eye_index]
eye_image = eye['image']
eye_side = eye['side']
eye_landmarks = output['landmarks'][j, :]
eye_radius = output['radius'][j][0]
if eye_side == 'left':
eye_landmarks[:, 0] = eye_image.shape[1] - eye_landmarks[:, 0]
eye_image = np.fliplr(eye_image)
# Embed eye image and annotate for picture-in-picture
eye_upscale = 2
eye_image_raw = cv.cvtColor(cv.equalizeHist(eye_image), cv.COLOR_GRAY2BGR)
eye_image_raw = cv.resize(eye_image_raw, (0, 0), fx=eye_upscale, fy=eye_upscale)
eye_image_annotated = np.copy(eye_image_raw)
if can_use_eyelid:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[0:8]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
eye_image_annotated,
[np.round(eye_upscale*eye_landmarks[8:16]).astype(np.int32)
.reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
eye_image_annotated,
tuple(np.round(eye_upscale*eye_landmarks[16, :]).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
face_index = int(eye_index / 2)
eh, ew, _ = eye_image_raw.shape
v0 = face_index * 2 * eh
v1 = v0 + eh
v2 = v1 + eh
u0 = 0 if eye_side == 'left' else ew
u1 = u0 + ew
bgr[v0:v1, u0:u1] = eye_image_raw
bgr[v1:v2, u0:u1] = eye_image_annotated
# Visualize preprocessing results
frame_landmarks = (frame['smoothed_landmarks']
if 'smoothed_landmarks' in frame
else frame['landmarks'])
for f, face in enumerate(frame['faces']):
# for landmark in frame_landmarks[f][:-1]:
# cv.drawMarker(bgr, tuple(np.round(landmark).astype(np.int32)),
# color=(0, 0, 255), markerType=cv.MARKER_STAR,
# markerSize=2, thickness=1, line_type=cv.LINE_AA)
cv.rectangle(
bgr, tuple(np.round(face[:2]).astype(np.int32)),
tuple(np.round(np.add(face[:2], face[2:])).astype(np.int32)),
color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
# Transform predictions
eye_landmarks = np.concatenate([eye_landmarks,
[[eye_landmarks[-1, 0] + eye_radius,
eye_landmarks[-1, 1]]]])
eye_landmarks = np.asmatrix(np.pad(eye_landmarks, ((0, 0), (0, 1)),
'constant', constant_values=1.0))
eye_landmarks = (eye_landmarks *
eye['inv_landmarks_transform_mat'].T)[:, :2]
eye_landmarks = np.asarray(eye_landmarks)
eyelid_landmarks = eye_landmarks[0:8, :]
iris_landmarks = eye_landmarks[8:16, :]
iris_centre = eye_landmarks[16, :]
eyeball_centre = eye_landmarks[17, :]
eyeball_radius = np.linalg.norm(eye_landmarks[18, :] -
eye_landmarks[17, :])
# Smooth and visualize gaze direction
num_total_eyes_in_frame = len(frame['eyes'])
if len(all_gaze_histories) != num_total_eyes_in_frame:
all_gaze_histories = [list() for _ in range(num_total_eyes_in_frame)]
gaze_history = all_gaze_histories[eye_index]
if can_use_eye:
# Visualize landmarks
# cv.drawMarker( # Eyeball centre
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# color=(0, 255, 0), markerType=cv.MARKER_CROSS, markerSize=4,
# thickness=1, line_type=cv.LINE_AA,
# )
# cv.circle( # Eyeball outline
# bgr, tuple(np.round(eyeball_centre).astype(np.int32)),
# int(np.round(eyeball_radius)), color=(0, 255, 0),
# thickness=1, lineType=cv.LINE_AA,
# )
# Draw "gaze"
# from models.elg import estimate_gaze_from_landmarks
# current_gaze = estimate_gaze_from_landmarks(
# iris_landmarks, iris_centre, eyeball_centre, eyeball_radius)
i_x0, i_y0 = iris_centre
e_x0, e_y0 = eyeball_centre
theta = -np.arcsin(np.clip((i_y0 - e_y0) / eyeball_radius, -1.0, 1.0))
phi = np.arcsin(np.clip((i_x0 - e_x0) / (eyeball_radius * -np.cos(theta)),
-1.0, 1.0))
current_gaze = np.array([theta, phi])
gaze_history.append(current_gaze)
gaze_history_max_len = 10
if len(gaze_history) > gaze_history_max_len:
gaze_history = gaze_history[-gaze_history_max_len:]
bgr, line_length = util.gaze.draw_gaze(bgr, iris_centre, np.mean(gaze_history, axis=0),
length=120.0, thickness=1)
line_lengths.append(line_length)
else:
gaze_history.clear()
if can_use_eyelid:
cv.polylines(
bgr, [np.round(eyelid_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(255, 255, 0), thickness=1, lineType=cv.LINE_AA,
)
if can_use_iris:
cv.polylines(
bgr, [np.round(iris_landmarks).astype(np.int32).reshape(-1, 1, 2)],
isClosed=True, color=(0, 255, 255), thickness=1, lineType=cv.LINE_AA,
)
cv.drawMarker(
bgr, tuple(np.round(iris_centre).astype(np.int32)),
color=(0, 255, 255), markerType=cv.MARKER_CROSS, markerSize=4,
thickness=1, line_type=cv.LINE_AA,
)
dtime = 1e3*(time.time() - start_time)
if 'visualization' not in frame['time']:
frame['time']['visualization'] = dtime
else:
frame['time']['visualization'] += dtime
def _dtime(before_id, after_id):
return int(1e3 * (frame['time'][after_id] - frame['time'][before_id]))
def _dstr(title, before_id, after_id):
return '%s: %dms' % (title, _dtime(before_id, after_id))
if eye_index == len(frame['eyes']) - 1:
# Calculate timings
frame['time']['after_visualization'] = time.time()
fps = int(np.round(1.0 / (time.time() - last_frame_time)))
fps_history.append(fps)
if len(fps_history) > 60:
fps_history = fps_history[-60:]
fps_str = '%d FPS' % np.mean(fps_history)
last_frame_time = time.time()
fh, fw, _ = bgr.shape
cv.putText(bgr, fps_str, org=(fw - 110, fh - 20),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 0), thickness=1, lineType=cv.LINE_AA)
cv.putText(bgr, fps_str, org=(fw - 111, fh - 21),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.79,
color=(255, 255, 255), thickness=1, lineType=cv.LINE_AA)
if j == 1:
print("\n\n\t\t Line Lengths: ", line_lengths)
print("\t\t Frame Index: ", frame['frame_index'])
# print("\n\n\t\t Face: ", (np.round(face[2] + 5).astype(np.int32), np.round(face[3] - 10).astype(np.int32)))
for line_length in line_lengths:
if line_length < 40:
look_flag = True
if look_flag and line_lengths:
text_look = "Looking"
print("\t LOOKING")
rgb_image = cv.cvtColor(frame['bgr'], cv.COLOR_BGR2RGB)
database.MarkingProcess(img = rgb_image, bboxs = frame['faces'], lookingflag = look_flag, frameindex = frame['frame_index'])
else:
text_look = "Not Looking"
print("\t Not Looking")
rgb_image = cv.cvtColor(frame['bgr'], cv.COLOR_BGR2RGB)
database.MarkingProcess(img = rgb_image, bboxs = frame['faces'], lookingflag = look_flag, frameindex = frame['frame_index'])
cv.putText(bgr, text_look, (np.round(face[0] + 5).astype(np.int32), np.round(face[1] - 10).astype(np.int32)),
fontFace=cv.FONT_HERSHEY_DUPLEX, fontScale=0.8,
color=(0, 0, 255), thickness=1, lineType=cv.LINE_AA)
if not args.headless:
resized_img = cv.resize(bgr, (1280, 720))
cv.imshow('vis', resized_img)
video_recorder.write(resized_img)
# cv.imshow('vis', bgr) 1.14.0
last_frame_index = frame_index
# Record frame?
if args.record_video:
video_out_queue.put_nowait(frame_index)
# Quit?
if cv.waitKey(1) & 0xFF == ord('q'):
return
# Print timings
if frame_index % 60 == 0:
latency = _dtime('before_frame_read', 'after_visualization')
processing = _dtime('after_frame_read', 'after_visualization')
timing_string = ', '.join([
_dstr('read', 'before_frame_read', 'after_frame_read'),
_dstr('preproc', 'after_frame_read', 'after_preprocessing'),
'infer: %dms' % int(frame['time']['inference']),
'vis: %dms' % int(frame['time']['visualization']),
'proc: %dms' % processing,
'latency: %dms' % latency,
])
print('%08d [%s] %s' % (frame_index, fps_str, timing_string))
visualize_thread = threading.Thread(target=_visualize_output, name='visualization')
visualize_thread.daemon = True
visualize_thread.start()
# Do inference forever
infer = model.inference_generator()
while True:
output = next(infer)
for frame_index in np.unique(output['frame_index']):
if frame_index not in data_source._frames:
continue
frame = data_source._frames[frame_index]
if 'inference' in frame['time']:
frame['time']['inference'] += output['inference_time']
else:
frame['time']['inference'] = output['inference_time']
inferred_stuff_queue.put_nowait(output)
if not visualize_thread.isAlive():
break
if not data_source._open:
break
# Close video recording
if args.record_video and video_out is not None:
video_out_should_stop = True
video_out_queue.put_nowait(None)
with video_out_done:
video_out_done.wait()
|
{"hexsha": "9c22d1849476b159a27d9c682472d9bdeb27c6fc", "size": 21965, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/elg_demo.py", "max_stars_repo_name": "KayaDevSolutions/GazeML", "max_stars_repo_head_hexsha": "a0cc072bad7d77b8c5b5698082b77cfb1011f45b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/elg_demo.py", "max_issues_repo_name": "KayaDevSolutions/GazeML", "max_issues_repo_head_hexsha": "a0cc072bad7d77b8c5b5698082b77cfb1011f45b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/elg_demo.py", "max_forks_repo_name": "KayaDevSolutions/GazeML", "max_forks_repo_head_hexsha": "a0cc072bad7d77b8c5b5698082b77cfb1011f45b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2747252747, "max_line_length": 152, "alphanum_fraction": 0.4705212839, "include": true, "reason": "import numpy", "num_tokens": 4391}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.