text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.express as px
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/processed")
results_path = "2020/02/8_9_finetune_palminized_resnet_new_lr/"
src_results_path = root_source_dir / results_path / "results_layers.csv"
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/")
output_dir = root_output_dir / results_path / "histogrammes"
output_dir.mkdir(parents=True, exist_ok=True)
df = pd.read_csv(src_results_path, header=0)
df = df.fillna("None")
# df["compression-rate"] = df["nb-non-zero-base"] / df["nb-non-zero-compressed"]
# df["non-zero-rate"] = df["nb-non-zero-base"] / df["nb-non-zero-reconstructed"]
# df["non-zero-prop"] = df["nb-non-zero-reconstructed"] / df["nb-non-zero-base"]
# sparsity_factors = sorted(set(df_palminized["--sparsity-factor"]))
# fig = go.Figure()
# fig.add_trace(go.Scatter(x=df["entropy-base-sv-normalized"],
# y=df["entropy-recons-sv-normalized"],
# mode='markers',
# ))
models = set(df["model"].values)
datasets = set(df["data"].values)
for data in datasets:
df_data = df[df["data"] == data]
for model in models:
df_model = df_data[df_data["model"] == model]
fig = px.scatter(df_model, x="entropy-base-sv-normalized", y="entropy-recons-sv-normalized", color="nb-factor-param",
size='diff-approx', hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed', 'compression-rate', 'non-zero-rate'])
fig.update_layout(title="{} {} Entropie SV reconstruit en fonction de Entropie SV base".format(data, model),
xaxis_title="Entropie SV base",
yaxis_title="Entropie SV reconstruit",
yaxis_type="linear"
)
fig.show()
fig = px.scatter(df_model, x="entropy-base-sv-normalized", y="non-zero-prop", color="nb-factor-param",
size='diff-approx', hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed',
'compression-rate', 'non-zero-rate', "entropy-recons-sv-normalized"])
fig.update_layout(title="{} {} Prop non zero reconstruit en fonction entropie base sv".format(data, model),
xaxis_title="Entropie SV base",
yaxis_title="Prop non zero reconstruit",
yaxis_type="linear"
)
fig.show()
fig = px.scatter(df_model, x="entropy-base-sv-normalized", y="diff-approx", color="nb-factor-param",
hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed',
'compression-rate', 'non-zero-rate', "entropy-recons-sv-normalized"])
fig.update_layout(title="{} {} Erreur en fonction entropie base sv".format(data, model),
xaxis_title="Entropie SV base",
yaxis_title="Erreur reconstruction",
yaxis_type="linear"
)
fig.show()
fig = px.scatter(df_model, x="nb-non-zero-base", y="diff-approx", color="nb-factor-param",
hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed',
'compression-rate', 'non-zero-rate', "entropy-recons-sv-normalized"])
fig.update_layout(title="{} {} Erreur en fonction taille".format(data, model),
xaxis_title="Nb non-zero base",
yaxis_title="Erreur reconstruction",
yaxis_type="linear"
)
fig.show()
fig = px.scatter(df_model, x="idx-layer", y="diff-approx", color="nb-factor-param",
hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed',
'compression-rate', 'non-zero-rate', "entropy-recons-sv-normalized"])
fig.update_layout(title="{} {} Erreur en fonction profondeur".format(data, model),
xaxis_title="Idx layer (processing order)",
yaxis_title="Erreur reconstruction",
yaxis_type="linear"
)
fig.show()
fig = px.scatter(df_model, x="idx-layer", y="entropy-base-sv-normalized", color="nb-factor-param",
hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed',
'compression-rate', 'non-zero-rate', "entropy-recons-sv-normalized"])
fig.update_layout(title="{} {} entropie SV en fonction profondeur".format(data, model),
xaxis_title="Idx layer (processing order)",
yaxis_title="Entropie SV",
yaxis_type="linear"
)
fig.show()
fig = px.scatter(df_model, x="nb-non-zero-base", y="entropy-base-sv-normalized", color="nb-factor-param",
hover_data=['model', 'layer-name', 'data', 'nb-factor-param',
'sparsity-factor', 'nb-non-zero-compressed',
'nb-non-zero-base', 'nb-non-zero-reconstructed',
'compression-rate', 'non-zero-rate', "entropy-recons-sv-normalized"])
fig.update_layout(title="{} {} entropie SV en fonction taille".format(data, model),
xaxis_title="Taille couche",
yaxis_title="Entropie SV",
yaxis_type="linear"
)
fig.show()
|
{"hexsha": "e5fc011c0d60d2849c8fd5593068e3b62d0ada2b", "size": 7722, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/visualization/2020/02/8_9_finetune_palminized_analyze_matrices.py", "max_stars_repo_name": "lucgiffon/psm-nets", "max_stars_repo_head_hexsha": "dec43c26281febf6e5c8b8f42bfb78098ae7101d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-15T07:05:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-15T07:05:18.000Z", "max_issues_repo_path": "code/visualization/2020/02/8_9_finetune_palminized_analyze_matrices.py", "max_issues_repo_name": "lucgiffon/psm-nets", "max_issues_repo_head_hexsha": "dec43c26281febf6e5c8b8f42bfb78098ae7101d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-15T06:12:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-16T10:05:36.000Z", "max_forks_repo_path": "code/visualization/2020/02/8_9_finetune_palminized_analyze_matrices.py", "max_forks_repo_name": "lucgiffon/psm-nets", "max_forks_repo_head_hexsha": "dec43c26281febf6e5c8b8f42bfb78098ae7101d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.625, "max_line_length": 148, "alphanum_fraction": 0.4882154882, "include": true, "reason": "import numpy", "num_tokens": 1471}
|
"""Use water polygons from openstreetmapdata.org to mask
seas and oceans, and `natural=water|wetland` polygons
from the OSM database.
"""
import os
import shutil
import requests
import fiona
import geopandas as gpd
import numpy as np
import psycopg2
import rasterio.features
from shapely.geometry import shape
from tqdm import tqdm
from appdirs import user_data_dir
from metadata import CASE_STUDIES, DATA_DIR
URL = 'http://data.openstreetmapdata.com/water-polygons-split-4326.zip'
def download():
"""Download water-polygons shapefile."""
dst_dir = user_data_dir(appname='osmxtract')
os.makedirs(dst_dir, exist_ok=True)
filename = URL.split('/')[-1]
dst_file = os.path.join(dst_dir, filename)
r = requests.head(URL)
content_length = int(r.headers['Content-Length'])
progress = tqdm(total=content_length, unit='B', unit_scale=True)
chunk_size = 1024 ** 2
with requests.get(URL, stream=True) as r:
with open(dst_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
progress.update(chunk_size)
def is_downloaded():
"""Check if seas shapefile is downloaded."""
data_dir = user_data_dir(appname='osmxtract')
expected_path = os.path.join(
data_dir, 'water-polygons-split-4326.zip'
)
return os.path.isfile(expected_path)
def clean():
"""Clean downloaded data."""
data_dir = user_data_dir(appname='osmxtract')
if os.path.isdir(data_dir):
shutil.rmtree(data_dir)
def get_sea_polygons(bounds):
"""Get sea polygons according to the provided bounds.
Parameters
----------
bounds : tuple
Bounds decimal lat/lon coordinates (xmin, ymin, xmax, ymax).
Returns
-------
feature : iterable
Output features as an iterable of GeoJSON-like dicts.
"""
data_dir = user_data_dir(appname='osmxtract')
if not is_downloaded():
download()
zip_path = os.path.join(data_dir, 'water-polygons-split-4326.zip')
shp_path = '/water-polygons-split-4326/water_polygons.shp'
with fiona.open(shp_path, vfs=f'zip://{zip_path}') as src:
features = [feature for _, feature in src.items(bbox=bounds)]
return features
def get_water_bodies(db, case_study_name, values=['water', 'wetland']):
"""Get the geometries with `water` or `wetland` values for the key
`natural` in the OSM database for a given case study.
"""
query = f"""
SELECT
osm_polygon.way AS geom, osm_polygon.natural AS value
FROM
osm_polygon, datafusion
WHERE
osm_polygon.natural IN ('water', 'wetland')
AND
datafusion.name = '{case_study_name}'
AND
ST_Intersects(osm_polygon.way, datafusion.geom)
"""
water_bodies = gpd.read_postgis(query, db)
water_bodies.crs = {'init': 'epsg:4326'}
return water_bodies
def get_sea_raster(bounds, height, width, crs, affine):
"""Get binary water mask (sea and ocean only) according to
the provided bounds.
Parameters
----------
bounds : tuple
Bounds decimal lat/lon coordinates (xmin, ymin, xmax, ymax).
height : int
Raster height.
width : int
Raster width.
crs : dict
Target CRS.
affine : Affine
Target affine transformation.
Returns
-------
water : numpy 2d array
Water binary mask as a 2D numpy array.
"""
features = get_sea_polygons(bounds)
if len(features) > 0:
geodataframe = gpd.GeoDataFrame.from_features(features)
geodataframe.crs = {'init': 'epsg:4326'}
geodataframe = geodataframe.to_crs(crs)
geoms = ((geom, 1) for geom in geodataframe.geometry)
return rasterio.features.rasterize(
shapes=geoms, fill=0, all_touched=True,
transform=affine, out_shape=(height, width),
dtype=np.uint8
)
else:
return np.zeros(shape=(height, width), dtype=np.uint8)
def get_water_bodies_raster(db, case_study_name, height, width, crs, affine):
"""Get binary water mask (water bodies + wetland) for
a given case study.
Parameters
----------
db : db connection
OSM database connection.
case_study_name : str
Case study ID.
height : int
Target raster height.
width : int
Target raster width.
crs : dict
Target raster CRS.
affine : Affine
Target raster affine transformation.
Returns
-------
water : numpy 2d array
Water binary mask as a 2d numpy array.
"""
features = get_water_bodies(db, case_study_name)
features = features.to_crs(crs)
if len(features) > 0:
return rasterio.features.rasterize(
shapes=((geom, 1) for geom in features.geometry),
transform=affine,
out_shape=(height, width),
dtype=np.uint8)
else:
return np.zeros(shape=(height, width), dtype=np.uint8)
if __name__ == '__main__':
db = psycopg2.connect(
database='osm',
user='maupp',
password='maupp',
host='localhost'
)
for case_study in CASE_STUDIES:
print(f'Processing {case_study.name}...')
aoi = shape(case_study.aoi['geometry'])
sea = get_sea_raster(
bounds=aoi.bounds,
height=case_study.height,
width=case_study.width,
crs=case_study.crs,
affine=case_study.affine
)
water_bodies = get_water_bodies_raster(
db=db, case_study_name=case_study.id,
height=case_study.height,
width=case_study.width,
affine=case_study.affine,
crs=case_study.crs
)
mask = np.max([sea, water_bodies], axis=0)
dst_dir = os.path.join(DATA_DIR, 'processed', 'masks', case_study.id)
os.makedirs(dst_dir, exist_ok=True)
dst_file = os.path.join(dst_dir, 'water.tif')
dst_profile = case_study.profile.copy()
dst_profile.update(dtype=np.uint8, compression='LZW', nodata=None)
with rasterio.open(dst_file, 'w', **dst_profile) as dst:
dst.write(mask, 1)
db.close()
|
{"hexsha": "1d7fb901184a4ae23cacc3722842274783867ee5", "size": 6230, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/land_masks.py", "max_stars_repo_name": "yannforget/landsat-sentinel-fusion", "max_stars_repo_head_hexsha": "13872d733f4b3958a479b6da9477a83dc6ab1369", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-11-24T17:43:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T03:44:29.000Z", "max_issues_repo_path": "src/land_masks.py", "max_issues_repo_name": "shepherdmeng/landsat-sentinel-fusion", "max_issues_repo_head_hexsha": "13872d733f4b3958a479b6da9477a83dc6ab1369", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/land_masks.py", "max_forks_repo_name": "shepherdmeng/landsat-sentinel-fusion", "max_forks_repo_head_hexsha": "13872d733f4b3958a479b6da9477a83dc6ab1369", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-04-26T15:42:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T14:42:41.000Z", "avg_line_length": 29.9519230769, "max_line_length": 77, "alphanum_fraction": 0.632905297, "include": true, "reason": "import numpy", "num_tokens": 1547}
|
from __future__ import division
import numpy as np
import pyopencl as cl
import loopy as lp
from pyopencl.tools import pytest_generate_tests_for_pyopencl \
as pytest_generate_tests
def test_laplacian_stiffness(ctx_factory):
dtype = np.float32
ctx = ctx_factory()
order = "C"
dim = 2 # (baked into code)
Nq = 40 # num. quadrature points (baked into code)
Nb = 20 # num. basis functions (baked into code)
Nc = 100 # num. cells (run-time symbolic)
from pymbolic import var
Nc_sym = var("Nc")
knl = lp.make_kernel(ctx.devices[0],
"[Nc] -> {[K,i,j,q, dx_axis, ax_b]: 0<=K<Nc and 0<=i,j<%(Nb)d and 0<=q<%(Nq)d "
"and 0<= dx_axis, ax_b < %(dim)d}"
% dict(Nb=Nb, Nq=Nq, dim=dim),
[
"dPsi(ij, dxi) := sum_float32(@ax_b,"
" jacInv[ax_b,dxi,K,q] * DPsi[ax_b,ij,q])",
"A[K, i, j] = sum_float32(q, w[q] * jacDet[K,q] * ("
"sum_float32(dx_axis, dPsi$one(i,dx_axis)*dPsi$two(j,dx_axis))))"
],
[
lp.GlobalArg("jacInv", dtype, shape=(dim, dim, Nc_sym, Nq), order=order),
lp.ConstantArg("DPsi", dtype, shape=(dim, Nb, Nq), order=order),
lp.GlobalArg("jacDet", dtype, shape=(Nc_sym, Nq), order=order),
lp.ConstantArg("w", dtype, shape=(Nq,), order=order),
lp.GlobalArg("A", dtype, shape=(Nc_sym, Nb, Nb), order=order),
lp.ValueArg("Nc", np.int32, approximately=1000),
],
name="lapquad", assumptions="Nc>=1")
knl = lp.tag_inames(knl, dict(ax_b="unr"))
seq_knl = knl
def variant_fig31(knl):
# This (mostly) reproduces Figure 3.1.
knl = lp.tag_inames(knl, {"dx_axis": "unr"})
return knl, ["K", "i", "j", "q", "ax_b_insn"]
def variant_pg4(knl):
# This (mostly) reproduces the unlabeled code snippet on pg. 4.
knl = lp.tag_inames(knl, {"dx_axis": "unr"})
Ncloc = 16
knl = lp.split_iname(knl, "K", Ncloc,
outer_iname="Ko", inner_iname="Kloc")
return knl, ["Ko", "Kloc", "i", "j", "q", "ax_b_insn"]
def variant_fig32(knl):
# This (mostly) reproduces Figure 3.2.
Ncloc = 16
knl = lp.split_iname(knl, "K", Ncloc,
outer_iname="Ko", inner_iname="Kloc")
knl = lp.precompute(knl, "dPsi", np.float32, ["i", "q", "dx_axis"],
default_tag=None)
knl = lp.tag_inames(knl, {"dx_axis": "unr", "dxi": "unr"})
return knl, ["Ko", "Kloc", "dPsi_q", "ij", "i", "j", "q", "ax_b_insn"]
def variant_fig33(knl):
# This is meant to (mostly) reproduce Figure 3.3.
Ncloc = 16
knl = lp.split_iname(knl, "K", Ncloc,
outer_iname="Ko", inner_iname="Kloc")
knl = lp.precompute(knl, "dPsi$one", np.float32, ["dx_axis"], default_tag=None)
knl = lp.tag_inames(knl, {"j": "ilp.seq"})
return knl, ["Ko", "Kloc"]
def variant_simple_gpu(knl):
# This is a simple GPU-ish variant.
# It's not the same thing as Matt's code, but I'll need some more time
# to reverse-engineer what is going on there. Some discussion might
# help, too. :)
knl = lp.tag_inames(knl, {"dx_axis": "unr"})
Ncloc = 16
knl = lp.split_iname(knl, "K", Ncloc,
outer_iname="Ko", inner_iname="Kloc",
outer_tag="g.0")
knl = lp.tag_inames(knl, {"i": "l.1", "j": "l.0"})
return knl, ["K", "i", "j", "q", "ax_b_insn"]
def variant_simple_gpu_prefetch(knl):
# This adds prefetching to the GPU variant above.
# In this variant (on my machine), loopy makes a silly choice
# for the upper bound of Kloc (it uses Nc). I'll investigate and
# fix that. (FIXME)
knl = lp.tag_inames(knl, {"dx_axis": "unr"})
Ncloc = 16
knl = lp.split_iname(knl, "K", Ncloc,
outer_iname="Ko", inner_iname="Kloc",
outer_tag="g.0")
knl = lp.tag_inames(knl, {"i": "l.1", "j": "l.0"})
knl = lp.add_prefetch(knl, "w", ["q"], default_tag="l.auto")
knl = lp.add_prefetch(knl, "DPsi", [0, 1, 2], default_tag="l.auto")
knl = lp.add_prefetch(knl, "jacInv", [0, 1, 3], default_tag="l.auto")
knl = lp.add_prefetch(knl, "jacDet", [1], default_tag="l.auto")
return knl, ["K", "i", "j", "q", "ax_b_insn"]
# Plug in variant name here
# |
# v
for variant in [variant_fig33]:
var_knl, loop_prio = variant(knl)
kernel_gen = lp.generate_loop_schedules(var_knl,
loop_priority=loop_prio)
kernel_gen = lp.check_kernels(kernel_gen, dict(Nc=Nc))
#print lp.preprocess_kernel(var_knl)
lp.auto_test_vs_ref(seq_knl, ctx, kernel_gen,
op_count=0, op_label="GFlops",
parameters={"Nc": Nc}, print_ref_code=True)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from py.test.cmdline import main
main([__file__])
|
{"hexsha": "18f2a5bfabdd52abad9d78aacf4f1d5be53b5ac1", "size": 5193, "ext": "py", "lang": "Python", "max_stars_repo_path": "proto-tests/test_fem_assembly.py", "max_stars_repo_name": "danshapero/loopy", "max_stars_repo_head_hexsha": "d3ede00fa60680aa8df487d56fb0549d4582b976", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-02T19:55:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T19:55:04.000Z", "max_issues_repo_path": "proto-tests/test_fem_assembly.py", "max_issues_repo_name": "danshapero/loopy", "max_issues_repo_head_hexsha": "d3ede00fa60680aa8df487d56fb0549d4582b976", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proto-tests/test_fem_assembly.py", "max_forks_repo_name": "danshapero/loopy", "max_forks_repo_head_hexsha": "d3ede00fa60680aa8df487d56fb0549d4582b976", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3265306122, "max_line_length": 91, "alphanum_fraction": 0.5459272097, "include": true, "reason": "import numpy", "num_tokens": 1628}
|
import numpy as np
import input_data
class NearestNeighbor:
def __init__(self):
pass
def train(self, X, y):
""" X is N x D where each row is an example. Y is 1-dimension of size N """
# the nearest neighbor classifier simply remembers all the training data
self.Xtr = X
self.ytr = y
def predict(self, X):
""" X is N x D where each row is an example we wish to predict label for """
num_test = X.shape[0]
# lets make sure that the output type matches the input type
Ypred = np.zeros(num_test, dtype = self.ytr.dtype)
# loop over all test rows
for i in xrange(num_test):
# find the nearest training image to the i'th test image
# using the squared L2 distance
distances = np.sum(np.square(self.Xtr - X[i,:]), axis = 1)
min_index = np.argmin(distances) # get the index with smallest distance
Ypred[i] = self.ytr[min_index] # predict the label of the nearest example
return Ypred
datasets = input_data.read_data_sets('data')
Xtr_rows = datasets.train.images
Ytr = datasets.train.labels
Xte_rows = datasets.validation.images
validation = datasets.validation.labels
nn = NearestNeighbor() # create a Nearest Neighbor classifier class
nn.train(Xtr_rows, Ytr) # train the classifier on the training images and labels
Yte_predict = nn.predict(Xte_rows) # predict labels on the test images
correct = sum(Yte_predict == validation)
# and now print the classification accuracy, which is the average number
# of examples that are correctly predicted (i.e. label matches)
print 'accuracy: %f' % (float(correct)/len(validation))
|
{"hexsha": "ab2fac70c8a99c8150e752c730198232d20b9947", "size": 1609, "ext": "py", "lang": "Python", "max_stars_repo_path": "mnist/nearestNeighbour.py", "max_stars_repo_name": "andpol5/whaleDetector", "max_stars_repo_head_hexsha": "2dd0bb9eaa3ba0281a72b268a87a8f8a6c40d4bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-01-23T18:24:45.000Z", "max_stars_repo_stars_event_max_datetime": "2016-01-23T18:24:45.000Z", "max_issues_repo_path": "mnist/nearestNeighbour.py", "max_issues_repo_name": "andpol5/whaleDetector", "max_issues_repo_head_hexsha": "2dd0bb9eaa3ba0281a72b268a87a8f8a6c40d4bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mnist/nearestNeighbour.py", "max_forks_repo_name": "andpol5/whaleDetector", "max_forks_repo_head_hexsha": "2dd0bb9eaa3ba0281a72b268a87a8f8a6c40d4bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2340425532, "max_line_length": 80, "alphanum_fraction": 0.7134866377, "include": true, "reason": "import numpy", "num_tokens": 398}
|
from sympy import oo
def wall_time(pos, vel, radius): return (1.0-radius-pos)/vel if vel>0.0 else (pos-radius)/abs(vel) if vel<0.0 else float(oo)
|
{"hexsha": "3c3b1f75cf099592f20b9f3fb8aa02e9dab8062a", "size": 148, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter 2/wall_time.py", "max_stars_repo_name": "indrag49/Computational-Stat-Mech", "max_stars_repo_head_hexsha": "0877f54a0245fce815f03478f4fb219fd6314951", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2018-06-29T12:22:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T03:18:18.000Z", "max_issues_repo_path": "Chapter 2/wall_time.py", "max_issues_repo_name": "indrag49/Computational-Stat-Mech", "max_issues_repo_head_hexsha": "0877f54a0245fce815f03478f4fb219fd6314951", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter 2/wall_time.py", "max_forks_repo_name": "indrag49/Computational-Stat-Mech", "max_forks_repo_head_hexsha": "0877f54a0245fce815f03478f4fb219fd6314951", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-11-30T01:56:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-23T15:29:56.000Z", "avg_line_length": 49.3333333333, "max_line_length": 125, "alphanum_fraction": 0.7094594595, "include": true, "reason": "from sympy", "num_tokens": 50}
|
# Copyright 2017 You8M team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn.model_selection import KFold
from tensorflow import gfile
import numpy as np
def split_fold(in_pattern, rettrain=True, fold=0, cvs=5, include_vlaidation=True, split_seed=0):
"""
Splits the elements of the in_pattern into training and test sets
:param in_pattern: string of tfrecord patterns
:param rettrain: return training set (True) or leave out set (False)
:param fold: which fold to process
:param cvs: how many folds you want
:param include_vlaidation: include validation set
:return: subset of tfrecords
"""
assert fold < cvs
files = gfile.Glob(in_pattern)
if split_seed > 0:
kf = KFold(n_splits=cvs, shuffle=True, random_state=split_seed)
else:
kf = KFold(n_splits=cvs)
for i, (train, test) in enumerate(kf.split(files)):
if i == fold:
break
if rettrain:
retfiles = list(np.array(files)[train])
else:
retfiles = list(np.array(files)[test])
if include_vlaidation:
addition = [fname.replace('train', 'validate') for fname in retfiles]
retfiles += addition
return retfiles
|
{"hexsha": "cc84c6f65a78d91b3b2f8d52cf0f0d4afa92c2b3", "size": 1714, "ext": "py", "lang": "Python", "max_stars_repo_path": "frame_level_code/splitutils.py", "max_stars_repo_name": "mpekalski/Y8M", "max_stars_repo_head_hexsha": "24b61107a0f482fdb36ab8b15b768cea24e5808a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2017-06-16T06:12:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-19T17:22:02.000Z", "max_issues_repo_path": "frame_level_code/splitutils.py", "max_issues_repo_name": "Kimilovesy/Y8M", "max_issues_repo_head_hexsha": "24b61107a0f482fdb36ab8b15b768cea24e5808a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-05-21T07:52:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-21T07:52:04.000Z", "max_forks_repo_path": "frame_level_code/splitutils.py", "max_forks_repo_name": "Kimilovesy/Y8M", "max_forks_repo_head_hexsha": "24b61107a0f482fdb36ab8b15b768cea24e5808a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2017-06-11T16:45:48.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-13T15:04:45.000Z", "avg_line_length": 32.9615384615, "max_line_length": 96, "alphanum_fraction": 0.7007001167, "include": true, "reason": "import numpy", "num_tokens": 420}
|
function [Population,FrontNo,DWeight] = EnvironmentalSelection(Population,N)
% The environmental selection of DWU
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform
% for evolutionary multi-objective optimization [educational forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
% This function is written by Gladston Moreira
%% Non-dominated sorting
[FrontNo] = NDSort(Population.objs,Population.cons,N);
%% Calculate the dominance information each solution
DWeight = InfoDominance(Population.objs);
%% Environment Select
Next = ReplacementUniformity(Population,N,FrontNo,DWeight);
%% Population for next generation
Population = Population(Next);
FrontNo = FrontNo(Next);
DWeight = DWeight(Next);
end
function InfoD = InfoDominance(PopObj)
% Calculate the information dominance each solution
N = size(PopObj,1);
%% Dominance count each solution
D = false(N);
for i = 1 : N-1
for j = i+1 : N
k = any(PopObj(i,:)<PopObj(j,:)) - any(PopObj(i,:)>PopObj(j,:));
if k == 1
D(i,j) = true;
elseif k == -1
D(j,i) = true;
end
end
end
CountDominance = sum(D,2);
%% Calculate information dominance each solution
InfoD = D'*CountDominance;
end
|
{"author": "BIMK", "repo": "PlatEMO", "sha": "c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5", "save_path": "github-repos/MATLAB/BIMK-PlatEMO", "path": "github-repos/MATLAB/BIMK-PlatEMO/PlatEMO-c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5/PlatEMO/Algorithms/Multi-objective optimization/DWU/EnvironmentalSelection.m"}
|
# from skimage.io import imread
import datetime
import os
import pickle
import sys
import math
from os import mkdir
# from torchsummary import summary
from os.path import join
from time import time
from memory_profiler import profile
import cv2
import matplotlib.pyplot as plt
import numpy as np
from src.data.utils.utils import get_mask
import src.data.utils.utils as utils
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
import torch.utils.data
from torchvision.utils import make_grid
import torchvision
from PIL import Image
from src.data.constants import (CV2_CONNECTED_ALGORITHM, DATA_DIR, IMG_DIR,
MASK_DIR, MEDIAN_FILTER_KERNEL,
NUMBER_CONNECTIVITY, SIMPLE_THRESHOLD)
from src.models.utils import transforms as T
from src.models.utils.model import get_instance_segmentation_model
from torch import optim
from torch.cuda.amp import GradScaler, autocast
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from src.models.BetaCellDataset import BetaCellDataset, get_dataloaders
# import torch.optim as optim
# import torchvision
def train(model, device, opt, epochs, data_tr, data_val, time_str, hparam_dict, writer, save=False, write=False):
'''Train'''
torch.backends.cudnn.benchmark = True
print(f'Training has begun for model: {time_str}')
size = hparam_dict['image_size']
batch_size = hparam_dict['batch_size']
# TODO: send HPC support email
# regarding why it runs out of memory but shows only
# 2 gb used
scheduler = ReduceLROnPlateau(opt, threshold=0.01, verbose=True)
log_every = 1 # How often to print out losses
save_every = 10 # How often to save model
scaler = GradScaler()
loss_list = hparam_dict['losses'].split(';')
# loss_classifier, loss_objectness
# TODO: remove loss_classifier from loss_list
# and also objectness? if we don't care about detecting
# objects, maybe the faint ones will be caught as well.
# Transforms
scale_jitter = T.ScaleJitter((size / 2, size / 2), scale_range=[0.7, 1.5])
transforms_list = [T.RandomIoUCrop(), scale_jitter]
transforms = T.Compose(transforms_list)
tot_train_losses = []
tot_val_losses = []
# print(x_val[0].unsqueeze(0).shape)
# writer.add_graph(model, [x_val[0].to(device)])
for i, epoch in enumerate(range(epochs)):
model.train() # train mode
tic = time()
print(f'\n* Epoch {epoch+1}/{epochs}')
x_val, y_val = next(iter(data_val))
train_loss = 0
for j, (x_batch, y_batch) in enumerate(data_tr):
with autocast():
x_batch = [x.to(device) for x in x_batch]
y_batch = [{k: v.to(device) for k, v in t.items()}
for t in y_batch]
# TODO: Invalid box coordinates
# Transforms that can't run parallel in dataloader
# need to be performed here
# for (x, y) in zip(x_batch, y_batch):
# x, y = transforms_list[0](x.squeeze(0), y)
# print(np.unique(y['boxes'].cpu()), 'after crop')
# x, y = transforms_list[1](x.squeeze(0), y)
# print(np.unique(y['boxes'].cpu()), 'after scale jitter')
x_batch = torch.stack(x_batch)
x_batch.to(device)
# set parameter gradients to zero
opt.zero_grad(set_to_none=True)
# forward pass
Y_pred = model(x_batch, y_batch)
# print(x_batch.shape, len(y_batch))
# print(np.unique(x_batch.cpu()), '\n' * 5)
# print(np.unique(y_batch.cpu()), '\n' * 7)
# Select only losses of interest
losses = [value for loss, value in Y_pred.items()
if loss in loss_list]
losses = sum(losses)
# End of training loop for mini-batch
scaler.scale(losses).backward()
scaler.step(opt)
scaler.update()
# calculate metrics to show the user
train_loss += float(losses / len(data_tr))
# End training loop for epoch
tot_train_losses.append(train_loss)
writer.add_scalar('Training Loss', train_loss, epoch)
# Validation
val_losses = 0
for x_val, y_val in data_val:
with torch.no_grad(), autocast():
model.train()
# x_val, y_val = to_device([x_val, y_val], device)
x_val = [x.to(device) for x in x_val]
y_val = [{k: v.to(device) for k, v in t.items()}
for t in y_val]
val_losses += get_loss(model, loss_list, x_val, y_val)
# TODO: make sure scheduler works
# by printing out the learning rate each epoch
# if write:
# if i == save_every:
# debug_opencv_mask()
model.eval()
y_hat = model(x_val)
# Convert ys to masks
# yhat_boxes = [y['boxes']] .....
# Convert y_hat to CUDA
# y_hat = to_device([y_hat], device)
# y_hat = [{k: v.to(device) for k, v in t.items()}
# for t in y_hat]
# Strip everything except masks
y_hat, y_val = y_to_mask([y_hat, y_val])
# Consolidate masks in batch
y_hat = [get_mask(y) for y in y_hat]
y_val = [get_mask(y) for y in y_val]
# y_hat = torch.cat(y_hat, dim=0) # .detach().cpu()
x_val = [x.squeeze() for x in x_val]
# end validation loop
val_loss = float(val_losses) / len(data_val)
tot_val_losses.append(val_loss)
writer.add_scalar('Validation Loss',
val_loss, epoch)
if write:
image_grid = create_grid(x_val, y_val, y_hat,
batch_size)
writer.add_image(f'epoch_{epoch}', image_grid,
epoch, dataformats='NCHW')
# writer.add_hparams(
# hparam_dict, {'hparam/loss': val_losses.item()}, run_name=f'runs/{time_str}')
scheduler.step(val_losses)
if i % log_every == 0:
# loss is nan; cancel training
if math.isnan(float(train_loss)):
print('training loss is nan\n')
return train_loss, np.nan
print(f'Training loss: {train_loss:.3f}')
print(f'Validation loss: {val_loss:.3f}')
elapsed = utils.time_report(tic, time())
print('Time:', elapsed)
# Save progress every `save_every` epochs
if (i + 1) % save_every == 0 and save:
dump_model(model, time_str)
# early stopping
patience = 15 # number of epochs to wait for validation loss to improve
if i > patience:
early_thresh = 0.95 # ratio threshold at which to stop at
val_prev = tot_val_losses[i-patience:i]
val_now = val_loss
if val_now / np.mean(val_prev) > early_thresh:
print('Early stopping activated; stopping training.')
break
# end epoch
# select random images and their target indices
# images, labels = select_n_random()
# # get the class labels for each image
# # class_labels = [classes[lab] for lab in labels]
# # log embeddings
# # features = images.view(-1, 28 * 28)
# writer.add_embedding(images,
# label_img=images.unsqueeze(1))
return tot_train_losses, tot_val_losses
def create_grid(x_val, y_val, y_hat, batch_size):
image_grid = make_grid(
[*x_val, *y_hat, *y_val], nrow=batch_size, pad_value=220, padding=30)
image_grid = image_grid.squeeze().unsqueeze(1)
image_grid = (image_grid * 255).type(torch.uint8)
return image_grid
def to_device(tensor_list, device):
'''
Moves data onto device.
'''
main_list = []
for batch in tensor_list:
if type(batch) == dict:
batch = [{k: v.to(device) for k, v in t.items()}
for t in batch]
elif type(batch) == torch.Tensor:
batch = [x.to(device) for x in batch]
main_list.append(batch)
return main_list
def y_to_mask(ys):
if type(ys) == dict:
ys = [item['masks'] for item in ys]
ys = [item.squeeze(1) for item in ys]
return ys
for y in ys:
y = [item['masks'] for item in y]
y = [item.squeeze(1) for item in y]
return ys
def get_loss(model, loss_list, x_val, y_val):
output = model(x_val, y_val) # losses
# float(sum(loss for loss in output.values()))
losses = [value
for loss, value in output.items()
if loss in loss_list]
losses = sum(losses)
return losses
def debug_opencv_mask():
# Visualize the masks generated by opencv
# for debugging purposes
dataset = BetaCellDataset(DATA_DIR)
img, target = dataset[500]
plt.subplot(1, 2, 1)
plt.imshow(img, cmap='viridis')
plt.subplot(1, 2, 2)
# Values in target['masks'] are either 0 or 1
# so multiply by 255 for image pixel values
plotted = torch.sum(target['masks'], dim=0) * 255
plt.imshow(plotted, cmap='gray')
plt.savefig(join(save, 'opencv_mask.jpg'))
def dump_model(model, time_str):
# Make folder unique to this run in order to save model and loss
utils.make_dir(save)
pickle.dump(model, open(join(save, f'model_{time_str}.pkl'), 'wb'))
def predict(model, data):
'''Predict'''
model.eval() # testing mode
Y_pred = [F.sigmoid(model(X_batch.to(device))) for X_batch, _ in data]
return np.array(Y_pred)
# helper function
def select_n_random(n=100):
'''
Selects n random datapoints and their corresponding labels from a dataset
source:
https://pytorch.org/tutorials/intermediate/tensorboard_tutorial.html
'''
assert len(data) == len(labels)
data = BetaCellDataset()
perm = torch.randperm(len(data))
return data[perm][:n]
def bce_loss(y_real, y_pred):
'''bce_loss'''
return torch.mean(y_pred - y_real * y_pred +
torch.log(1 + torch.exp(-y_pred)))
if __name__ == '__main__':
tic = time()
# Environment variable for memory management
alloc_conf = 'PYTORCH_CUDA_ALLOC_CONF'
try:
print(alloc_conf, os.environ[alloc_conf])
except KeyError:
print(alloc_conf, 'not found')
conn = NUMBER_CONNECTIVITY
algo = CV2_CONNECTED_ALGORITHM
kernel = MEDIAN_FILTER_KERNEL
threshold = SIMPLE_THRESHOLD
device = utils.set_device()
print(f'Running on {device}.')
utils.setcwd(__file__)
# our dataset has two classes only - background and person
num_classes = 2
# hyperparameters
size = 1024
batch_size = 8 # 2
pretrained = True
num_epochs = 30 # 500
lr = 3.418507038460298e-06
wd = 1.2957404400334042e-08
beta1 = 0.2438598958001344
beta2 = 0.9849760264270886
n_img_select = 1101
manual_select = 1
img_filter = 'bilateral'
data_tr, data_val = get_dataloaders(
batch_size=batch_size, num_workers=4, resize=size,
n_img_select=(n_img_select, 1), manual_select=(manual_select, 1), img_filter=img_filter)
# get the model using our helper function
model = get_instance_segmentation_model(pretrained=pretrained)
model.to(device)
# Unique identifier for newly saved objects
now = datetime.datetime.now()
time_str = f'{now.day:02d}_{now.month:02d}_{now.hour}H_{now.minute}M_{now.second}S'
save = f'interim/run_{time_str}'
params = [p for p in model.parameters() if p.requires_grad]
opt = optim.Adam(params, lr=lr, weight_decay=wd, betas=[beta1, beta2])
loss_list = ['loss_mask', 'loss_rpn_box_reg', 'loss_box_reg',
'loss_classifier', 'loss_objectness']
# loss_list = ['loss_mask', 'loss_rpn_box_reg']
hparam_dict = {
'learning_rate': lr,
'weight_decay': wd,
'num_epochs': num_epochs,
'optimizer': f'{opt}',
'losses': ';'.join(loss_list),
'image_size': size if size else 1024,
'batch_size': batch_size,
'pretrained': pretrained
}
# TODO: add "how many weakly annotated"
# TODO: add /pred/ folder in addition to /runs/
# so make a writer in predict_model which saves images
# add_video í SummaryWriter
description = f'''{time_str}\n
Learning rate: {lr}\n
Weight decay: {wd}\n
Optimizer: {opt}\n
Losses: {loss_list}
'''
with SummaryWriter(f'runs/{time_str}') as w:
losses = train(model, device, opt, num_epochs,
data_tr, data_val, time_str, hparam_dict, w, save=True, write=False)
w.add_text('description', description)
losses = np.array(losses).T
pickle.dump(model, open(join('interim', f'run_{time_str}', f'model_{time_str}.pkl'), 'wb'))
elapsed = utils.time_report(tic, time())
print('train_model finished after', elapsed)
|
{"hexsha": "a269cff06a1d6e7a93d07e6488f860a29aa2d6d6", "size": 13530, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/train_model.py", "max_stars_repo_name": "gummz/cell", "max_stars_repo_head_hexsha": "a741ca4900a11f1080b7572ac969f765e5ac2ffd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/train_model.py", "max_issues_repo_name": "gummz/cell", "max_issues_repo_head_hexsha": "a741ca4900a11f1080b7572ac969f765e5ac2ffd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/train_model.py", "max_forks_repo_name": "gummz/cell", "max_forks_repo_head_hexsha": "a741ca4900a11f1080b7572ac969f765e5ac2ffd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4460431655, "max_line_length": 113, "alphanum_fraction": 0.5999260902, "include": true, "reason": "import numpy", "num_tokens": 3303}
|
[STATEMENT]
lemma ntsmcf_0_NTMap_vsv[smc_cs_intros]: "vsv (ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vsv (ntsmcf_0 \<CC>\<lparr>NTMap\<rparr>)
[PROOF STEP]
unfolding ntsmcf_0_components
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vsv []\<^sub>\<circ>
[PROOF STEP]
by simp
|
{"llama_tokens": 160, "file": "CZH_Foundations_czh_semicategories_CZH_SMC_Simple", "length": 2}
|
#--------------------------------------------------------------------------#
# This function will create a list of csv files that contain the isolates in
# rows and loci in columns. This currently only works for unlabeled files,
# but what it will do is allow the user to specify an entire directory to
# analyze.
#
# It will change in the future, but it works for my purposes for now.
#--------------------------------------------------------------------------#
getfile <- function(multFile=NULL, pattern=NULL){
# the default option is to grab all of the files in a directory matching a
# specified pattern. If no pattern is set, all files will be listed.
if (is.null(multFile) || multFile == "yes"){
# this sets the path variable that the user can use to set the path
# to the files with setwd(x$path), where x is the datastructure
# this function dumped into.
path <- sub("(^.+?/).+?\\.[a-z]{3}", "\\1", file.path(file.choose()))
if (!is.null(pattern)){
pat <- pattern
x <- list.files(path, pattern=pat)
}
else {
x <- list.files(path)
}
}
else {
# if the user chooses to analyze only one file, a pattern is not needed
csv <- file.choose()
path <- sub("(^.+?/).+?\\.[a-z]{3}", "\\1", file.path(csv))
csv <- sub("^.+?/(.+?\\.[a-z]{3})", "\\1", csv)
x <- csv
}
filepath <- list(files=x, path=path)
return(filepath)
}
#--------------------------------------------------------------------------#
# Here is the function that will produce the descrete distance matrix for
# diploid organisms.
# pop.matrix is a matrix of rows of individuals with columns of Loci.
# Currently the loci are every two columns in the matrix.
# Specifically, this compares two individuals at one locus. A loop for each
# type of distance matrix will need to be constructued separately
#
# i is the row for each new individual in the pairwise comparison
# j is the row for the reference individual
# m is the column for the first allele in the locus
# n is the column for the second allele in the locus
# z is the measure of distance. It should be zero before the loop
#
# Again, this is for diploids. position of the allele at the locus does not
# matter as the inheritance is unknown, so this algorithm takes that into
# account when doing the calculations.
#--------------------------------------------------------------------------#
difference.test <- function(pop.matrix, i, j, m, n, z){
# This if loop is analyzing allele m of individual j against that of
# individual i. If they are equal, then that means the distance is equal
# to zero at that allele.
if(pop.matrix[j,m] == pop.matrix[i,m]){
# given that the distance between allele m in individuals j and i
# are equal, there is only one more comparison to do. If allele n
# of individual j is equal to that of individual i, then the
# distance is equal to 0. If they are not equal, z at the locus is 1
if(pop.matrix[j,n] != pop.matrix[i,n]){
z <- z+1
}
}
# If allele m of individual j is not equal to that of individual i, then
# a test to see if allele m is equal to allele n in individuals j and i,
# respectively.
else if(pop.matrix[j,m] == pop.matrix[i,n]){
# given that the distance between alleles m and n in individuals j
# and i respectively are equal, the only comparison left is to
# compare the distances between alleles m and n in individuals i and
# j respectively. If they are not equal, z at the locus is 1
if(pop.matrix[j,n] != pop.matrix[i,m]){
z <- z+1
}
}
# If neither allele in individual i is equal to allele m in individual j
# z is equal to one and a tests if the n allele of individual j is equal
# to either allele in individual i.
else{
z <- z+1
# Testing if the n allele in individual j is not equal to either allele
# in individual i. If neither is equal, z at the locus increases by 1.
if(pop.matrix[j,n] != pop.matrix[i,m] && pop.matrix[j,n] != pop.matrix[i,n]){
z <- z+1
}
}
# The final value of z is returned. At this point it can take on the
# values of 0, 1, or 2 if doing a pairwise comparison at a single locus.
# For pairwise comparisons over multiple loci, z can take on any value
# between 0 and 2*M where M is the number of loci sampled.
return(z)
}
stdIa <- function(x){
#--------------------------------------------------------------------------#
# This is the beginning of the analysis. It will take the list of files
# provided by the user and pull the files out of the directory that has
# been set by the user.
#--------------------------------------------------------------------------#
for(a in 1:length(x)){
pop <- read.table(file(description= x[a], open="r"))
pop.matrix <- as.matrix(pop)
numAlleles <- ncol(pop.matrix)
numIsolates <- nrow(pop.matrix)
np <- (numIsolates*(numIsolates-1))/2
# Creation of the datastructures
if (!exists("Ia.vector")){
Ia.vector <- NULL
rbarD.vector <- NULL
file.vector <- NULL
}
D.matrix <- matrix(0, nrow=numIsolates, ncol=numIsolates)
d.matrix <- D.matrix
d.vector <- NULL
d2.vector <- NULL
vard.vector <- NULL
vardpair.vector <- NULL
to.remove <- NULL
#--------------------------------------------------------------------------#
# This is the loop for analyzing the pairwise distances at each locus,
# also known as d. These values will be placed into two vectors representing
# the sum of d for each locus and the sum of d^2 for each locus.
#
# This will also calculate D, the pairwise comparison of all isolates over
# all loci.
#--------------------------------------------------------------------------#
# Initiating the loop over the columns of the population matrix
for(m in seq(1, (numAlleles-1), 2)){
n <- m+1
# Loop for the columns of the distance matrix
for(j in 1:numIsolates){
# Loop for the rows of the distance matrix
for(i in 1:numIsolates){
z <- 0
# Setting the contraint for the pairwise comparisons by the
# equation (n(n-1))/2 where n = numIsolates
# If this loop did not exist, the construction of the
# distance matrix would take twice as long.
if(j < i && i <= numIsolates){
z <- difference.test(pop.matrix, i, j, m, n, z)
}
# The value of z (0, 1, or 2) is pushed into the matrix
d.matrix[i,j] <- d.matrix[i,j]+z
# This value is added to the matrix for pairwise comparison
# of all the isolates over all loci as opposed to each locus
D.matrix[i,j] <- D.matrix[i,j]+d.matrix[i,j]
}
}
# placing the sum of the resulting matrix into a vector for later
# use
d.vector <- append(d.vector, sum(d.matrix))
# placing the sum of the squares of the resulting matrix into a
# vector for later use
d2.vector <- append(d2.vector, sum(d.matrix^2))
# zeroing out the matrix
d.matrix[1:numIsolates,1:numIsolates] <- 0
}
# removing the matrix from the namespace as it is no longer needed.
rm(d.matrix)
#--------------------------------------------------------------------------#
# Now to begin the calculations. First, set the variance of D
#--------------------------------------------------------------------------#
varD <- ((sum(D.matrix^2)-((sum(D.matrix))^2)/np))/np
#--------------------------------------------------------------------------#
# Next is to create a vector containing all of the variances of d (there
# will be one for each locus)
#--------------------------------------------------------------------------#
vard.vector <- ((d2.vector-((d.vector^2)/np))/np)
#--------------------------------------------------------------------------#
# Here the roots of the products of the variances are being produced and
# the sum of those values is taken.
#--------------------------------------------------------------------------#
for (b in 1:length(d.vector)){
for (d in 1:length(d.vector)){
# As pairwise multiplication is required, a pairwise constriant
# loop must be set up.
if(b < d && d <= length(d.vector)){
vardpair <- sqrt(vard.vector[b]*vard.vector[d])
vardpair.vector <- append(vardpair.vector, vardpair)
}
}
}
#--------------------------------------------------------------------------#
# The sum of the variances necessary for the calculation of Ia is calculated
#--------------------------------------------------------------------------#
sigVarj <- sum(vard.vector)
rm(vard.vector)
#--------------------------------------------------------------------------#
# Finally, the Index of Association and the standardized Index of associati-
# on are calculated.
#--------------------------------------------------------------------------#
Ia <- (varD/sigVarj)-1
rbarD <- (varD - sigVarj)/(2*sum(vardpair.vector))
# Prints to screen as loop progresses
print(paste("File Name:", x[a], sep=" "))
print(paste("Index of Association:", Ia, sep=" "))
print(paste("Standardized Index of Association (rbarD):", rbarD, sep=" "))
# Saves the values of Ia, rbarD, and the filename into datastructures
# that will be listed in a dataframe
file.vector <- append(file.vector, x[a])
Ia.vector <- append(Ia.vector, Ia)
rbarD.vector <- append(rbarD.vector, rbarD)
}
# Creating the data frame for output.
Iout <- list(Ia=Ia.vector, rbarD=rbarD.vector, File=file.vector)
return(as.data.frame(Iout))
}
|
{"hexsha": "9b8f4efcde1b7f1144c47342339e4c49e0378337", "size": 9321, "ext": "r", "lang": "R", "max_stars_repo_path": "the_horror/OLDmultilocusFunctions.r", "max_stars_repo_name": "zkamvar/PiG_Multitool", "max_stars_repo_head_hexsha": "81a0bf7bc7830fac8fab18e548e97a088fe341d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "the_horror/OLDmultilocusFunctions.r", "max_issues_repo_name": "zkamvar/PiG_Multitool", "max_issues_repo_head_hexsha": "81a0bf7bc7830fac8fab18e548e97a088fe341d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "the_horror/OLDmultilocusFunctions.r", "max_forks_repo_name": "zkamvar/PiG_Multitool", "max_forks_repo_head_hexsha": "81a0bf7bc7830fac8fab18e548e97a088fe341d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3857142857, "max_line_length": 79, "alphanum_fraction": 0.5797661195, "num_tokens": 2330}
|
"""Stopping rules are a particular focus of TAR research.
"""
import numpy as np
from tarexp.component.base import Component
from tarexp.ledger import Ledger
from tarexp.util import getOneDimScores
# Note: not importing Workflow to avoid circular dependency
from scipy.stats import hypergeom
def _inferBatchsize(ledger: Ledger, fast=True):
if fast:
return ledger.n_annotated // ledger.n_rounds
batch_size = None
for r in range(1, ledger.n_rounds): # round 0 is the seed set
current_bs = (ledger._record[:, 0] == r).sum()
if batch_size is None:
batch_size = current_bs
assert batch_size == current_bs
return batch_size
class StoppingRule(Component):
def __init__(self, target_recall: float=None):
super().__init__()
self.target_recall = target_recall
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
raise NotImplementedError
class NullStoppingRule(StoppingRule):
def checkStopping(self, *args, **kwargs):
return False
class FixedRoundStoppingRule(StoppingRule):
def __init__(self, max_round, *args, **kwargs):
super().__init__(**kwargs)
assert max_round >= 0
self.max_round = max_round
def checkStopping(self, ledger, *args, **kwargs):
return ledger.n_rounds >= self.max_round
class KneeStoppingRule(StoppingRule):
"""
.. seealso::
.. [1] Gordon V. Cormack, and Maura R. Grossman. "Engineering quality and reliability in technology-assisted review."
*Proceedings of the 39th International ACM SIGIR conference on Research and Development in Information Retrieval.* 2016.
`<https://dl.acm.org/doi/10.1145/2911451.2911510>`__
"""
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
if ledger.n_rounds < 1:
return False
pos_per_round = np.array([ c[1] if 1 in c else 0 for c in ledger.getAnnotationCounts() ])
pos_found = pos_per_round.cumsum()
rho_s = -1
for i in range(ledger.n_rounds):
rho = (pos_found[i]/(i+1)) / ((1+pos_found[-1]-pos_found[i])/(ledger.n_rounds-i))
rho_s = max(rho_s, rho)
return rho_s >= 156 - min(pos_found[-1], 150)
class BudgetStoppingRule(StoppingRule):
"""
.. seealso::
.. [2] Gordon V. Cormack, and Maura R. Grossman. "Engineering quality and reliability in technology-assisted review."
*Proceedings of the 39th International ACM SIGIR conference on Research and Development in Information Retrieval.* 2016.
`<https://dl.acm.org/doi/10.1145/2911451.2911510>`__
"""
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
if ledger.n_rounds < 1:
return False
batchsize = _inferBatchsize(ledger)
pos_per_round = np.array([ c[1] if 1 in c else 0 for c in ledger.getAnnotationCounts() ])
pos_found = pos_per_round.cumsum()
rho_s = -1
for i in range(ledger.n_rounds):
rho = (pos_found[i]/(i+1)) / ((1+pos_found[-1]-pos_found[i])/(ledger.n_rounds-i))
rho_s = max(rho_s, rho)
return (rho_s >= 6 and batchsize*i+1 >= 10*ledger.n_docs / pos_found[i]) or \
(ledger.n_annotated >= ledger.n_docs*0.75)
class ReviewHalfStoppingRule(StoppingRule):
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
return ledger.n_annotated >= ledger.n_docs // 2
class BatchPrecStoppingRule(StoppingRule):
def __init__(self, prec_cutoff=5/200, slack=1):
super().__init__()
self.prec_cutoff = prec_cutoff
self.slack = slack
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
bprec = np.array([ batch[1] / sum(batch.values()) for batch in ledger.getAnnotationCounts() ])
counter = 0
for prec in bprec:
counter = (counter+1) if prec <= self.prec_cutoff else 0
if counter >= self.slack:
return True
return False
class Rule2399StoppingRule(StoppingRule):
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
return ledger.n_annotated >= 1.2*ledger.n_pos_annotated + 2399
class QuantStoppingRule(StoppingRule):
"""
.. seealso::
.. [3] Eugene Yang, David D. Lewis, and Ophir Frieder. "Heuristic stopping rules for technology-assisted review."
*Proceedings of the 21st ACM Symposium on Document Engineering.* 2021.
`<https://arxiv.org/abs/2106.09871>`__
"""
def __init__(self, target_recall: float, nstd: float = 0):
super().__init__(target_recall=target_recall)
self.nstd = nstd
def checkStopping(self, ledger: Ledger, workflow, **kwargs) -> bool:
if ledger.n_rounds < 2:
return False
scores = getOneDimScores(workflow.latest_scores)
assert (scores <= 1).all() and (scores >= 0).all(), \
"Scores have to be probabilities to use Quant Rule."
# `ps` stands for probability sum
unknown_ps = scores[ ~ledger.annotated ].sum()
known_ps = scores[ ledger.annotated ].sum()
est_recall = (known_ps) / (known_ps + unknown_ps)
if self.nstd == 0:
return est_recall >= self.target_recall
prod = scores * (1-scores)
all_var = prod.sum()
unknown_var = prod[ ~ledger.annotated ].sum()
est_var = (known_ps**2 / (known_ps + unknown_ps)**4 * all_var) + (1 / (known_ps + unknown_ps)**2 * (all_var-unknown_var))
return est_recall - self.nstd*np.sqrt(est_var) >= self.target_recall
class CHMHeuristicsStoppingRule(StoppingRule):
"""
.. seealso::
.. [4] Max W. Callaghan, and Finn Müller-Hansen. "Statistical stopping criteria for automated screening in systematic reviews."
*Systematic Reviews 9.1* (2020): 1-14.
`<https://pubmed.ncbi.nlm.nih.gov/33248464/>`__
"""
def __init__(self, target_recall: float, alpha=0.05):
super().__init__(target_recall=target_recall)
self.alpha = alpha
def checkStopping(self, ledger: Ledger, *args, **kwargs) -> bool:
if ledger.n_rounds < 2:
return False
counts = ledger.getAnnotationCounts()
pos_found = np.array([ c[1] if 1 in c else 0 for c in counts ]).cumsum()
annotated_cumsum = np.array([ sum(c.values()) for c in counts ]).cumsum()
n_docs = ledger.n_docs
for i in range(1, ledger.n_rounds):
if hypergeom.cdf( pos_found[-1] - pos_found[i], # k
n_docs-annotated_cumsum[i], # N
int(pos_found[-1]/self.target_recall - pos_found[i]), # K_tar
annotated_cumsum[-1] - annotated_cumsum[i] # n
) < self.alpha:
return True
return False
# Note: Full CMH rule is actually a two-phase workflow where the poststopping does a random walk
|
{"hexsha": "453daf1dcb32b02983739f42c9b93fd1a787b0aa", "size": 7145, "ext": "py", "lang": "Python", "max_stars_repo_path": "tarexp/component/stopping.py", "max_stars_repo_name": "eugene-yang/tarexp", "max_stars_repo_head_hexsha": "3037462b41ea3a5aa3faf6afa71db0de5c14e88f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2022-02-23T09:39:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T15:34:38.000Z", "max_issues_repo_path": "tarexp/component/stopping.py", "max_issues_repo_name": "eugene-yang/tarexp", "max_issues_repo_head_hexsha": "3037462b41ea3a5aa3faf6afa71db0de5c14e88f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tarexp/component/stopping.py", "max_forks_repo_name": "eugene-yang/tarexp", "max_forks_repo_head_hexsha": "3037462b41ea3a5aa3faf6afa71db0de5c14e88f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6052631579, "max_line_length": 136, "alphanum_fraction": 0.6162351295, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1796}
|
import os
import pathlib
import re
import time
from typing import List
import shutil
from absl import app, flags
import numpy as np
import matplotlib.pyplot as plt
import s3_util
from gui import GUI, AnnotatedImage, Category
IMAGE_DIR_NAME = "images"
ANNOTATION_DIR_NAME = "annotations"
MANIFEST_DIR_NAME = "manifests"
flags.DEFINE_string(
"label_file_path",
"../data/labels.txt",
"Path to the file containing the category labels.",
)
flags.DEFINE_string(
"local_data_dir", "../data", "Local directory of the image files to label."
)
flags.DEFINE_string(
"s3_bucket_name", None, "S3 bucket to retrieve images from and upload manifest to."
)
flags.DEFINE_string("s3_data_dir", "data", "Prefix of the s3 data objects.")
flags.DEFINE_string("image_file_type", "jpg", "File type of the image files")
flags.DEFINE_string("annotation_file_type", "xml", "File type of the annotation files")
flags.DEFINE_string("manifest_file_type", "txt", "File type of the manifest files")
def get_files_from_dir(dir_path: str, file_type: str = None) -> List[str]:
if not os.path.isdir(dir_path):
return []
file_paths = [
f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))
]
if file_type is not None:
file_paths = [f for f in file_paths if f.lower().endswith(file_type.lower())]
return file_paths
def manifest_file_sort(manifest_file) -> int:
match = re.match("[0-9]+", manifest_file)
if not match:
return 0
return int(match[0])
def get_newest_manifest_path() -> str:
manifest_files = get_files_from_dir(
os.path.join(flags.FLAGS.local_data_dir, MANIFEST_DIR_NAME)
)
manifest_files = [
f for f in manifest_files if f.lower().endswith(flags.FLAGS.manifest_file_type)
]
if len(manifest_files) == 0:
return None
newest_manifest_file = sorted(manifest_files, key=manifest_file_sort, reverse=True)[
0
]
return os.path.join(
flags.FLAGS.local_data_dir, MANIFEST_DIR_NAME, newest_manifest_file
)
def save_outputs(
annotatedImages: List[AnnotatedImage],
previous_manifest_path: str,
start_time: int,
use_s3: bool,
) -> None:
# create a new manifest file
new_manifest_path = os.path.join(
flags.FLAGS.local_data_dir,
MANIFEST_DIR_NAME,
"%i-manifest.%s" % (start_time, flags.FLAGS.manifest_file_type),
)
if previous_manifest_path is not None:
shutil.copyfile(previous_manifest_path, new_manifest_path)
else:
open(new_manifest_path, "a").close()
new_annotation_filepaths = []
with open(new_manifest_path, "a") as manifest:
for image in annotatedImages:
annotation_filepath = image.write_to_pascal_voc()
image_filename = os.path.basename(image.image_path)
annotation_filename = (
os.path.basename(annotation_filepath)
if annotation_filepath is not None
else "Invalid"
)
if annotation_filepath is not None:
new_annotation_filepaths.append(annotation_filepath)
manifest.write("%s,%s\n" % (image_filename, annotation_filename,))
if use_s3:
s3_util.upload_files(
flags.FLAGS.s3_bucket_name,
new_annotation_filepaths,
flags.FLAGS.s3_data_dir + "/" + ANNOTATION_DIR_NAME,
)
s3_util.upload_files(
flags.FLAGS.s3_bucket_name,
[new_manifest_path],
flags.FLAGS.s3_data_dir + "/" + MANIFEST_DIR_NAME,
)
# ensure that all images have been uploaded
s3_util.upload_files(
flags.FLAGS.s3_bucket_name,
[image.image_path for image in annotatedImages],
flags.FLAGS.s3_data_dir + "/" + IMAGE_DIR_NAME,
)
def create_output_dir(dir_name) -> bool:
if not os.path.isdir(dir_name) or not os.path.exists(dir_name):
print("Creating output directory: %s" % dir_name)
try:
os.makedirs(dir_name)
except OSError:
print("Creation of the directory %s failed" % dir_name)
return False
else:
print("Successfully created the directory %s " % dir_name)
return True
else:
return True
def main(unused_argv):
start_time = time.time()
fig = plt.figure()
gui = GUI(fig)
use_s3 = True if flags.FLAGS.s3_bucket_name is not None else False
if use_s3:
if not s3_util.s3_bucket_exists(flags.FLAGS.s3_bucket_name):
use_s3 = False
print(
"Bucket: %s either does not exist or you do not have access to it"
% flags.FLAGS.s3_bucket_name
)
else:
print(
"Bucket: %s exists and you have access to it"
% flags.FLAGS.s3_bucket_name
)
if use_s3:
# Download new images from s3
s3_images = s3_util.s3_get_object_names_from_dir(
flags.FLAGS.s3_bucket_name,
flags.FLAGS.s3_data_dir + "/" + IMAGE_DIR_NAME,
flags.FLAGS.image_file_type,
)
s3_util.s3_download_files(
flags.FLAGS.s3_bucket_name,
s3_images,
os.path.join(flags.FLAGS.local_data_dir, IMAGE_DIR_NAME),
)
# Download any nest annotation files from s3
s3_annotations = s3_util.s3_get_object_names_from_dir(
flags.FLAGS.s3_bucket_name,
flags.FLAGS.s3_data_dir + "/" + ANNOTATION_DIR_NAME,
flags.FLAGS.annotation_file_type,
)
s3_util.s3_download_files(
flags.FLAGS.s3_bucket_name,
s3_annotations,
os.path.join(flags.FLAGS.local_data_dir, ANNOTATION_DIR_NAME),
)
# Download any new manifests files from s3
s3_manifests = s3_util.s3_get_object_names_from_dir(
flags.FLAGS.s3_bucket_name,
flags.FLAGS.s3_data_dir + "/" + MANIFEST_DIR_NAME,
)
s3_util.s3_download_files(
flags.FLAGS.s3_bucket_name,
s3_manifests,
os.path.join(flags.FLAGS.local_data_dir, MANIFEST_DIR_NAME),
)
if not os.path.isfile(flags.FLAGS.label_file_path):
print("Invalid category labels path.")
return
# read in the category labels
category_labels = open(flags.FLAGS.label_file_path).read().splitlines()
if len(category_labels) == 0:
print("No label categories found")
return
category_colors = plt.get_cmap("hsv")(np.linspace(0, 0.9, len(category_labels)))
for index, (name, color) in enumerate(zip(category_labels, category_colors)):
gui.add_category(Category(name, tuple(color), str(index)))
if not os.path.isdir(os.path.join(flags.FLAGS.local_data_dir, IMAGE_DIR_NAME)):
print("Invalid input image directory")
return
previous_manifest_file = get_newest_manifest_path()
manifest_images = set()
if previous_manifest_file is not None:
with open(previous_manifest_file, "r") as manifest:
for line in manifest:
manifest_images.add(line.split(",")[0].rstrip())
# read in the names of the images to label
for image_file in os.listdir(
os.path.join(flags.FLAGS.local_data_dir, IMAGE_DIR_NAME)
):
if (
image_file.endswith(flags.FLAGS.image_file_type)
and os.path.basename(image_file) not in manifest_images
):
gui.add_image(
AnnotatedImage(
os.path.join(
flags.FLAGS.local_data_dir, IMAGE_DIR_NAME, image_file
),
os.path.join(flags.FLAGS.local_data_dir, ANNOTATION_DIR_NAME),
)
)
if len(gui.images) == 0:
print("No input images found")
return
if not create_output_dir(
os.path.join(flags.FLAGS.local_data_dir, ANNOTATION_DIR_NAME)
):
print("Cannot create output annotations directory.")
return
if not create_output_dir(
os.path.join(flags.FLAGS.local_data_dir, MANIFEST_DIR_NAME)
):
print("Cannot create output manifests directory")
return
annotated_images = gui.show()
save_outputs(annotated_images, previous_manifest_file, start_time, use_s3)
if __name__ == "__main__":
app.run(main)
|
{"hexsha": "1494829d25f2271596f032d5fc07710a5df1c6d3", "size": 8459, "ext": "py", "lang": "Python", "max_stars_repo_path": "odlu/label.py", "max_stars_repo_name": "BrianOfrim/recyclops-od", "max_stars_repo_head_hexsha": "f2a95cf37e36cd884c2f6b96628d6ed2de4535a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-04T01:00:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-04T01:00:27.000Z", "max_issues_repo_path": "odlu/label.py", "max_issues_repo_name": "BrianOfrim/object-detection-label-utility", "max_issues_repo_head_hexsha": "f2a95cf37e36cd884c2f6b96628d6ed2de4535a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-01-04T01:02:25.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-07T19:27:05.000Z", "max_forks_repo_path": "odlu/label.py", "max_forks_repo_name": "BrianOfrim/recyclops-od", "max_forks_repo_head_hexsha": "f2a95cf37e36cd884c2f6b96628d6ed2de4535a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5634328358, "max_line_length": 88, "alphanum_fraction": 0.6386097647, "include": true, "reason": "import numpy", "num_tokens": 1905}
|
[STATEMENT]
lemma le_multiset_empty_left[simp]: "M \<noteq> {#} \<Longrightarrow> {#} < M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M \<noteq> {#} \<Longrightarrow> {#} < M
[PROOF STEP]
by (simp add: less_multiset\<^sub>H\<^sub>O)
|
{"llama_tokens": 104, "file": null, "length": 1}
|
import networkx
import sys
import os
import silk
from subprocess import Popen
import string
import random
import sqlparse
from protocol_graph import ProtocolGraph
def id_generator(size=8, chars=string.ascii_letters):
"""
Generate random string.
size: size of the string to be generated.
chars: list of letters for the string.
:return: random string generated.
"""
L = []
for _ in range(size):
L.append(random.choice(chars))
return ''.join(L)
# return ''.join(random.choice(chars) for _ in range(size))
def sql_parse_no_ws(s):
"""
Parse sql statement skipping whitespaces.
:param s: string with one sql statement.
:return: list of sqlparse.tokens for sql statement without the whitespaces.
"""
parsed = sqlparse.parse(s)
tokens = parsed[0].tokens
cleaned = [t for t in tokens if t.ttype != sqlparse.tokens.Token.Text.Whitespace]
return cleaned
def tokens_select_all(tokens):
"""
Check if tokens of sql statement is a 'select * from table;' statement.
:param tokens: list of sqlparse.tokens representing sql statment.
:return: table name or empty string if no such statement.
"""
table = ''
token_type = sqlparse.tokens.Token
if tokens[0].ttype == token_type.Keyword.DML and \
tokens[0].value.lower() == 'select':
if tokens[1].ttype == token_type.Wildcard and tokens[1].value == '*':
if tokens[2].ttype == token_type.Keyword and tokens[2].value.lower() == 'from':
if tokens[4].ttype == token_type.Punctuation and tokens[4].value == ';':
table = tokens[3].value
return table
def tokens_select_frame(tokens):
"""
Check if tokens of sql statement is a
'select * from table where (%s<=stime_epoch_secs and stime_epoch_secs<%s);'
statement
:param tokens: list of sqlparse.tokens representing sql statment.
:return: table name or empty string if not such a statement; currently only
checks whether a where clause is present.
"""
table = ''
token_type = sqlparse.tokens.Token
if tokens[0].ttype == token_type.Keyword.DML and \
tokens[0].value.lower() == 'select':
if tokens[1].ttype == token_type.Wildcard and tokens[1].value == '*':
if tokens[2].ttype == token_type.Keyword and tokens[2].value.lower() == 'from':
if tokens[4].ttype == sqlparse.sql.where:
table = tokens[3].value
return table
class FlowCursor:
"""
Cursor for data access.
"""
_connection = None # database connection
_sql_params = tuple() # tuple of sql parameters
def __init__(self, connection):
self._connection = connection
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def __iter__(self):
return self
def __next__(self):
"""
Next record in iterator.
:return: named record like in silk.
"""
pass
def execute(self, sql, params=tuple()):
"""
Execute sql statment to select the data.
:param sql: string of sql statement.
:param params: tuple (start, end).
"""
pass
class IPFIXCursor(FlowCursor):
"""
Cursor for data access.
"""
def __init__(self, connection):
super(IPFIXCursor, self).__init__(connection)
def __next__(self):
record = self._connection.__next__()
valid = False
while record and not valid:
if self._sql_params:
(start, end) = self._sql_params
valid = start <= record.stime_epoch_secs and record.etime_epoch_secs < end
else:
valid = True
return record
def execute(self, sql, params=tuple()):
"""
Execute sql statment to select the data;
currently only fetch_all and fetch_frame implemented.
:param sql: string of sql statement.
:param params: tuple (start, end).
"""
tokens = sql_parse_no_ws(sql)
table = tokens_select_all(tokens)
self._sql_params = tuple()
if not table:
table = tokens_select_frame(tokens)
if table:
self._sql_params = params
class FlowConnection:
"""
Connection to flow data.
"""
_connection = None # connection handle to flow data
_cursor_constructor = None # function to construct cursor object
def __init__(self, cursor_constructor, *args):
"""
Initialize.
"""
self._cursor_constructor = cursor_constructor
def __enter__(self):
"""
Enter with context.
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Exit with context.
"""
pass
def open(self):
"""
Open the database connection.
"""
pass
def close(self):
"""
Close connection.
"""
pass
def cursor(self):
"""
:return: cursor to access data.
"""
return self._cursor_constructor(self._connection)
class IPFIXConnection(FlowConnection):
"""
Connection to an IPFIX file.
"""
_ipfix_file = '' # name of the ipfix input file
_silk_file = None # silk file pipe
def __init__(self, ipfix_file):
"""
Create a connection to an IPFIX file.
:param ipfix_file: file name.
"""
super(IPFIXConnection, self).__init__(IPFIXCursor)
self._ipfix_file = ipfix_file
def open(self):
super(IPFIXConnection, self).open()
self._silk_file = '/tmp/' + os.path.basename(self._ipfix_file) + id_generator() + '.rw'
# print('Temporary file: ' + self._silk_file)
os.mkfifo(self._silk_file)
Popen(['rwipfix2silk', '--silk-output='+self._silk_file, self._ipfix_file])
self._connection = silk.silkfile_open(self._silk_file, silk.READ)
def close(self):
if self._connection:
# It seems the connection is closed automatically.
# self._connection.close()
# only need to remove the temporary silk file.
os.remove(self._silk_file)
super(IPFIXConnection, self).close()
|
{"hexsha": "124b9b08eda1de9885777c8d66e80eacb93cbcc7", "size": 6313, "ext": "py", "lang": "Python", "max_stars_repo_path": "netdata/importer/ipfix_connection.py", "max_stars_repo_name": "mincode/netdata", "max_stars_repo_head_hexsha": "4369a3bfb473509eff92083e03f214d5b75f6074", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "netdata/importer/ipfix_connection.py", "max_issues_repo_name": "mincode/netdata", "max_issues_repo_head_hexsha": "4369a3bfb473509eff92083e03f214d5b75f6074", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "netdata/importer/ipfix_connection.py", "max_forks_repo_name": "mincode/netdata", "max_forks_repo_head_hexsha": "4369a3bfb473509eff92083e03f214d5b75f6074", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0921658986, "max_line_length": 95, "alphanum_fraction": 0.6093774751, "include": true, "reason": "import networkx", "num_tokens": 1454}
|
import os
import numpy as np
from tensorboardX import SummaryWriter
import copy
import post_process.calculations.calculation_utils as cutils
import post_process.visualization.visualization_utils as vutils
from post_process.calculations.mission_calcs import MissionCalcs
from post_process.calculations.overall_stat import OverallStat
from matplotlib import pyplot as plt
# importing library
import csv
BLACK = "#000000"
DARK_ORANGE = "#ff8c00"
VIVID_ORANGE = "#FF5E0E"
RED_ORANFE = "#FF4500"
LIGHT_ORANGE = "#ffa500"
VERY_LIGHT_ORANGE = "#ffddb3"
DARK_GRAY = "#595959"
MEDIUM_GRAY = "#999999"
LIGHT_GRAY = "#b9b9b9"
VERY_LIGHT_GRAY = "#cecece"
VERY_VERY_LIGHT_GRAY = "#eaeaea"
DARK_BLUE = "#42838f"
LIGHT_BLUE = "#aaccff"
DARK_NEON_BLUE = "#0066ff"
NEON_BLUE = "#00ccff"
DARK_GREEN = "#006400"
LIGHT_GREEN = "#90EE90"
LIGHT_GREEN_BLUE = "#87decd"
# DARK_GREEN_V2 = "#4b742f"
# DARK_GREEN_V2 = "#548235"
DARK_GREEN_V2 = "#658f49"
LIGHT_GREEN_V2 = "#a9c09a"
PURPLE = "#9900cc"
SLAC = "#947F73"
LILAC = "#7E7193"
def add_mission_calc_to_tensorboard(path):
# this function adds not_mission info to Tensorboard for all simulations in path
subfolders = cutils.get_subfolders(path)
for subfolder in subfolders:
print("running add_mission_calc_to_tensorboard for subfolder = ", subfolder)
mission_calcs = MissionCalcs(path, subfolder)
mission_calcs.mission_not_accomplished()
mission_calcs.add_to_tensorboard()
# mission_calcs.debug_plot_results()
def get_overall_stats(path, n=None, single=False , subfolders = None,episode_duration =16):
# this function compares all the runs in path by averaging them over the last n episodes
if subfolders is None:
if single:
subfolders = [os.path.split(path)[1]]
path = os.path.split(path)[0]
else:
subfolders = cutils.get_subfolders(path)
overall_stats = {"paths": [],
"average_episode_length": [],
"average_episode_reward": [],
"average_mission_time": [],
"average_speed_all": [],
"average_speed_controlled": [],
"average_speed_human": [],
"reward_params": [],
"total_not_mission": [],
"total_crash": [],
"crash_episodes": [],
"crash_episodes_flag_all": [],
"average_distance_travelled": [],
"average_distance_travelled_human": [],
"average_distance_travelled_controlled": [],
"average_distance_travelled_stat": [],
"average_distance_travelled_human_stat": [],
"average_distance_travelled_controlled_stat": [],
"average_episode_reward_stat": [],
"not_mission_episodes_flag_all": []
}
for subfolder in subfolders:
if subfolder == "test":
continue
print("running get_overall_stats for subfolder = ", subfolder)
if single:
full_path =os.path.join(path, subfolder)
else:
full_path = os.path.join(path, subfolder)
overall_stat = OverallStat(path, subfolder, n,episode_duration=episode_duration)
overall_stat.get_overall_stat()
overall_stats["paths"].append(full_path)
overall_stats["average_episode_length"]. \
append(overall_stat.average_episode_length)
overall_stats["average_episode_reward"]. \
append(overall_stat.average_episode_reward)
overall_stats["average_mission_time"]. \
append(overall_stat.average_mission_time)
overall_stats["average_speed_all"]. \
append(overall_stat.average_speed_all)
overall_stats["average_speed_controlled"]. \
append(overall_stat.average_speed_controlled)
overall_stats["average_speed_human"]. \
append(overall_stat.average_speed_human)
overall_stats["total_not_mission"]. \
append(overall_stat.total_not_mission)
overall_stats["total_crash"]. \
append(overall_stat.total_crash)
overall_stats["crash_episodes"]. \
append(overall_stat.crash_episodes)
overall_stats["crash_episodes_flag_all"]. \
append(overall_stat.crash_episodes_flag_all)
overall_stats["average_distance_travelled"]. \
append(overall_stat.average_speed_all * overall_stat.average_episode_length)
overall_stats["average_distance_travelled_human"]. \
append(overall_stat.average_speed_human * overall_stat.average_episode_length)
overall_stats["average_distance_travelled_controlled"]. \
append(overall_stat.average_speed_controlled * overall_stat.average_episode_length)
overall_stats["average_distance_travelled_stat"]. \
append(overall_stat.average_distance_travelled_stat)
overall_stats["average_distance_travelled_controlled_stat"]. \
append(overall_stat.average_distance_travelled_controlled_stat)
overall_stats["average_distance_travelled_human_stat"]. \
append(overall_stat.average_distance_travelled_human_stat)
overall_stats["average_episode_reward_stat"]. \
append(overall_stat.average_episode_reward_stat)
overall_stats["not_mission_episodes_flag_all"]. \
append(overall_stat.not_mission_episodes_flag_all)
return overall_stats
def compare_stats_action_hist(stats, metrics=None, x_name=None, location_x=None, y_name=None, location_y=None,
plots=None, location_plot=None, base_dict=None, fig_out_path=None):
figsize = [20, 3]
for z_name in metrics:
x_vals = []
y_vals = []
plots_vals = []
metric = []
dict_map = {
"binary": 0,
"xy_discrete": 1,
"discrete": 2
}
if base_dict:
location_x = location_y = location_plot = base_dict
for i, value in enumerate(stats[z_name]):
# if stats[condition][condition_location] != condition_value:
# continue
metric.append(value)
val1 = stats[location_x][i][x_name]
if isinstance(val1, list):
val1 = val1[0]
x_vals.append(val1)
val2 = stats[location_y][i][y_name]
if isinstance(val2, list):
val2 = val2[0]
val2 = dict_map[val2]
y_vals.append(val2)
plots_vals. \
append(stats[location_plot][i][plots])
# TODO: clean this up and put them in a single 3D array
x_vals = np.array(x_vals)
y_vals = np.array(y_vals)
plots_vals = np.array(plots_vals)
metric = np.array(metric)
max_z = max(metric)
# max_z = 100
plot_vals_set = set(plots_vals)
axs = []
if len(plot_vals_set) == 2:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
ax1 = fig.add_subplot(121, projection='3d')
axs.append(ax1)
ax2 = fig.add_subplot(122, projection='3d')
axs.append(ax2)
elif len(plot_vals_set) == 3:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
axs[0] = fig.add_subplot(131, projection='3d')
axs[1] = fig.add_subplot(132, projection='3d')
axs[2] = fig.add_subplot(133, projection='3d')
# fig, axs = plt.subplots(1,len(plot_vals_set), figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
for idx, val in enumerate(plot_vals_set):
# filter based on cooperative_rewards
x = x_vals[np.where(plots_vals == val)]
y = y_vals[np.where(plots_vals == val)]
z = metric[np.where(plots_vals == val)]
plt_file_name = os.path.split(fig_out_path)[1]
plot_name = plots + " = " + str(val) + " - " + plt_file_name
tmp_info = []
for idx2, path in enumerate(stats["paths"]):
if plots_vals[idx2] == val:
scenario_name = path.split("_")[-1]
tmp_info.append(scenario_name)
fig1 = vutils.simple_3d_bar_plot(x, y, z,
x_name,
y_name,
z_name,
plot_name, zlim3d=max_z, show=True, info=tmp_info)
print(">>>>>>>>>>>>>>>>>>>>>>> INFO: ", plot_name, x_name, x, y_name, y, z_name, z)
if len(plot_vals_set) > 1:
fig2 = vutils.simple_3d_bar_plot(x, y, z,
x_name,
y_name,
z_name,
plot_name, zlim3d=max_z, ax=axs[idx])
if fig_out_path:
fig_out_path_final = fig_out_path + "_" + z_name + "_" + plots + "=" + str(val)
fig1.savefig(fig_out_path_final + ".png", dpi=300)
if len(plot_vals_set) > 1 and fig_out_path:
fig_out_path_final = fig_out_path + "_" + z_name + "_subplots_" + plots
fig.savefig(fig_out_path_final + ".png", dpi=300)
print("end")
def compare_stats_obs_features(stats, metrics=None, x_name=None, location_x=None, y_name=None, location_y=None,
plots=None, location_plot=None, base_dict=None, fig_out_path=None):
figsize = [20, 3]
for z_name in metrics:
x_vals = []
y_vals = []
plots_vals = []
metric = []
dict_mission_vehicle_observation = {
False: 0,
True: 1,
}
dict_features = {
tuple(["presence", "x", "y", "vx", "vy", "is_controlled"]): 0,
tuple(["presence", "x", "y", "vx", "vy"]): 1,
tuple(["x", "y", "vx", "vy", "is_controlled"]): 2
}
dict_order = {
'sorted': 0,
'sorted_by_id': 1,
'shuffled': 2
}
if base_dict:
location_x = location_y = location_plot = base_dict
# location_x = location_y = location_plot= "observation"
# x_name = "features"
# y_name = "order"
# plots = "mission_vehicle_observation"
for i, value in enumerate(stats[z_name]):
# if stats[condition][condition_location] != condition_value:
# continue
metric.append(value)
val1 = stats[location_x][i][x_name]
val1 = dict_features[tuple(val1)]
if isinstance(val1, list):
val1 = val1[0]
x_vals.append(val1)
val2 = stats[location_y][i][y_name]
if isinstance(val2, list):
val2 = val2[0]
val2 = dict_order[val2]
y_vals.append(val2)
val3 = stats[location_plot][i][plots]
plots_vals.append(dict_mission_vehicle_observation[val3])
# TODO: clean this up and put them in a single 3D array
x_vals = np.array(x_vals)
y_vals = np.array(y_vals)
plots_vals = np.array(plots_vals)
metric = np.array(metric)
max_z = max(metric)
# max_z = 100
plot_vals_set = set(plots_vals)
axs = []
if len(plot_vals_set) == 2:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
ax1 = fig.add_subplot(121, projection='3d')
axs.append(ax1)
ax2 = fig.add_subplot(122, projection='3d')
axs.append(ax2)
elif len(plot_vals_set) == 3:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
axs[0] = fig.add_subplot(131, projection='3d')
axs[1] = fig.add_subplot(132, projection='3d')
axs[2] = fig.add_subplot(133, projection='3d')
# fig, axs = plt.subplots(1,len(plot_vals_set), figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
for idx, val in enumerate(plot_vals_set):
# filter based on cooperative_rewards
x = x_vals[np.where(plots_vals == val)]
y = y_vals[np.where(plots_vals == val)]
z = metric[np.where(plots_vals == val)]
plt_file_name = os.path.split(fig_out_path)[1]
plot_name = plots + " = " + str(val) + " - " + plt_file_name
tmp_info = []
for idx2, path in enumerate(stats["paths"]):
if plots_vals[idx2] == val:
scenario_name = path.split("_")[-1]
tmp_info.append(scenario_name)
fig1 = vutils.simple_3d_bar_plot(x, y, z,
x_name,
y_name,
z_name,
plot_name, zlim3d=max_z, show=True, info=tmp_info)
print(">>>>>>>>>>>>>>>>>>>>>>> INFO: ", plot_name, x_name, x, y_name, y, z_name, z)
if len(plot_vals_set) > 1:
fig2 = vutils.simple_3d_bar_plot(x, y, z,
x_name,
y_name,
z_name,
plot_name, zlim3d=max_z, ax=axs[idx])
if fig_out_path:
fig_out_path_final = fig_out_path + "_" + z_name + "_" + plots + "=" + str(val)
fig1.savefig(fig_out_path_final + ".png", dpi=300)
if len(plot_vals_set) > 1 and fig_out_path:
fig_out_path_final = fig_out_path + "_" + z_name + "_subplots_" + plots
fig.savefig(fig_out_path_final + ".png", dpi=300)
print("end")
def compare_stats(stats, metrics=None, x_name=None, location_x=None, y_name=None, location_y=None, plots=None,
location_plot=None, base_dict=None, fig_out_path=None, condition_value=None, condition_name=None):
'''
[in] stats should be created by
overall_stats = get_overall_stats(path, 2000)
overall_stats = append_from_json(overall_stats)
'''
figsize = [20, 3]
for z_name in metrics:
x_vals = []
y_vals = []
plots_vals = []
metric = []
if base_dict:
location_x = location_y = location_plot = base_dict
path_ok = []
for i, value in enumerate(stats[z_name]):
if condition_name:
if stats[location_x][i][condition_name] != condition_value:
continue
metric.append(value)
val1 = stats[location_x][i][x_name]
if isinstance(val1, list):
val1 = val1[0]
x_vals.append(val1)
val2 = stats[location_y][i][y_name]
if isinstance(val2, list):
val2 = val2[0]
y_vals.append(val2)
tmp_val = stats[location_plot][i][plots]
if isinstance(tmp_val, list):
tmp_val = tmp_val[0]
plots_vals. \
append(tmp_val)
path_ok.append(stats["paths"][i])
# TODO: clean this up and put them in a single 3D array
x_vals = np.array(x_vals)
y_vals = np.array(y_vals)
plots_vals = np.array(plots_vals)
metric = np.array(metric)
max_z = max(metric)
# max_z = 100
plot_vals_set = set(plots_vals)
axs = []
if len(plot_vals_set) == 2:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
ax1 = fig.add_subplot(121, projection='3d')
axs.append(ax1)
ax2 = fig.add_subplot(122, projection='3d')
axs.append(ax2)
elif len(plot_vals_set) == 3:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
axs.append(ax1)
axs.append(ax2)
axs.append(ax3)
elif len(plot_vals_set) == 4:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
ax1 = fig.add_subplot(141, projection='3d')
ax2 = fig.add_subplot(142, projection='3d')
ax3 = fig.add_subplot(143, projection='3d')
ax4 = fig.add_subplot(144, projection='3d')
axs.append(ax1)
axs.append(ax2)
axs.append(ax3)
axs.append(ax4)
elif len(plot_vals_set) == 5:
fig = plt.figure(figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
axs[0] = fig.add_subplot(151, projection='3d')
axs[1] = fig.add_subplot(152, projection='3d')
axs[2] = fig.add_subplot(153, projection='3d')
axs[3] = fig.add_subplot(154, projection='3d')
axs[4] = fig.add_subplot(155, projection='3d')
# fig, axs = plt.subplots(1,len(plot_vals_set), figsize=(figsize[0], figsize[1] * len(plot_vals_set)))
for idx, val in enumerate(plot_vals_set):
# filter based on cooperative_rewards
x = x_vals[np.where(plots_vals == val)]
y = y_vals[np.where(plots_vals == val)]
z = metric[np.where(plots_vals == val)]
plt_file_name = os.path.split(fig_out_path)[1]
plot_name = plots + " = " + str(val) + " - " + plt_file_name
tmp_info = []
for idx2, path in enumerate(path_ok):
if plots_vals[idx2] == val:
scenario_name = path.split("_")[-1]
tmp_info.append(scenario_name)
fig1 = vutils.simple_3d_bar_plot(x, y, z,
x_name,
y_name,
z_name,
plot_name, zlim3d=max_z, show=False, info=tmp_info)
if len(plot_vals_set) > 1:
fig2 = vutils.simple_3d_bar_plot(x, y, z,
x_name,
y_name,
z_name,
plot_name, zlim3d=max_z, ax=axs[idx], info=tmp_info)
if fig_out_path:
fig_out_path_final = fig_out_path + "_" + z_name + "_" + plots + "=" + str(val)
if condition_name:
fig_out_path_final += "_" + condition_name + str(condition_value)
fig1.savefig(fig_out_path_final + ".png", dpi=300)
if len(plot_vals_set) > 1 and fig_out_path:
fig_out_path_final = fig_out_path + "_" + z_name + "_subplots_" + plots
if condition_name:
fig_out_path_final += "_" + condition_name + str(condition_value)
fig.savefig(fig_out_path_final + ".png", dpi=300)
print("end")
def append_from_json(stats):
'''
[in] stats: this should be created overall_stats = get_overall_stats(path, 2000)
reads the json and append the stats with reward_params and configs of merging vehicle
'''
stats['merging_vehicle'] = []
stats['observation'] = []
stats['metadata'] = []
for path in stats["paths"]:
metadata = cutils.read_metadata_file(path)
reward_params = metadata['env']['reward']
stats['reward_params'].append(reward_params)
merging_vehicle = metadata['env']['merging_vehicle']
stats['merging_vehicle'].append(merging_vehicle)
observation = metadata['env']['observation']
stats['observation'].append(observation)
stats['metadata'].append(metadata)
return stats
def add_total_distance_travelled(stats):
stats['total_distance_travelled'] = []
# for path in stats["paths"]:
#
#
a = 2
return stats
# output {'exp_number': [metric1_val, metric2_val,...] ; ....}
def get_experiments(stats, metrics=None, train = True,n=900,max_distance= 400):
exp_dict= {}
exp_dict["metrics"] = metrics
for i, path in enumerate(stats["paths"]):
if train:
exp = path.split("_")[-1]
else:
exp = path.split("_")[-1]
exp = exp.split("-")[0]
metric_vals = []
for metric in metrics:
if metric !="average_distance_travelled":
val = stats[metric][i]/n*100
else:
# val = stats[metric][i] / max_distance * 100
val = stats[metric][i]
metric_vals.append(val)
exp_dict[exp] = metric_vals
return exp_dict
def compare_folder(stats, metrics=None, plots_output_path=None, n=900,persentage=True, set_limit=True):
figsize = [10, 6]
# plt.rcdefaults()
if persentage:
x_limit = 100
else:
x_limit =n
header = ['Name', 'metric']
for z_name in metrics:
# metric = []
all_stats_non_aggressive = []
legend_non_aggressive = []
paths = []
for i, value in enumerate(stats[z_name]):
# metric.append(value)
if persentage:
# value =value/n*100
if z_name != "average_distance_travelled":
value = float(value)/n*100
else:
value = float(value)/400*100
all_stats_non_aggressive.append(value)
# legend_name = "plt_" + stats["paths"][i].split("_")[-1]
legend_name = "plt_" + stats["paths"][i]
legend_non_aggressive.append(legend_name)
paths.append(stats["paths"][i].split("_")[-1])
order = legend_non_aggressive
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
# plt_name = plots_output_path.split("_")[-1]
plt_name = os.path.split(plots_output_path)[-1]
plt.ioff()
# fig, ax = plt.subplots(figsize=figsize)
# y_pos = np.arange(len(all_stats_aggressive_order))
# ax.barh(y_pos, all_stats_aggressive_order, align='center')
# ax.set_yticks(y_pos)
# ax.set_yticklabels(order)
# ax.invert_yaxis() # labels read top-to-bottom
# ax.set_xlabel(z_name)
# ax.set_title('Aggressive')
# print(order)
# print('Aggressive', z_name, all_stats_aggressive_order)
# plt.subplots_adjust(left=0.3, bottom=0.1, right=0.95, top=0.9)
# plt.margins(0.9)
# out_file = plt_name + 'Aggressive' + z_name
# fig_out_path = os.path.join(plots_output_path, out_file)
# fig.savefig(fig_out_path + ".png", dpi=300)
fig, ax = plt.subplots(figsize=figsize)
y_pos = np.arange(len(all_stats_non_aggressive))
ax.barh(y_pos, all_stats_non_aggressive, align='center')
for i, v in enumerate(all_stats_non_aggressive):
ax.text(v + 3, i + .25, str(v), color='blue', fontweight='bold')
if set_limit:
ax.set_xlim(0, x_limit)
ax.set_yticks(y_pos)
ax.set_yticklabels(order)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel(z_name)
ax.set_title(plt_name)
print(order)
print(plt_name, z_name, all_stats_non_aggressive)
plt.subplots_adjust(left=0.3, bottom=0.1, right=0.95, top=0.9)
# plt.margins(0.9)
out_file = plt_name + "_" + z_name
fig_out_path = os.path.join(plots_output_path, out_file)
data = [legend_non_aggressive, all_stats_non_aggressive]
datat = np.array(data).T
# datat_2d_t = np.array(data).T.tolist()
datat_2d_t = np.array(datat).tolist()
with open(fig_out_path + '.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['Name', 'metric'])
writer.writerows(datat)
# file = open(fig_out_path + '.csv', 'w', newline='')
# writer = csv.DictWriter(file, fieldnames=header)
# writer.writeheader()
fig.savefig(fig_out_path + ".png", dpi=300)
# plt.show()
def compare_behavior(stats, metrics=None, plots_output_path=None, condition_name=None, condition_value=None):
figsize = [10, 6]
# plt.rcdefaults()
for z_name in metrics:
# metric = []
all_stats_aggressive = []
legend_aggressive = []
all_stats_non_aggressive = []
legend_non_aggressive = []
paths = []
for i, value in enumerate(stats[z_name]):
# metric.append(value)
if condition_name and condition_value:
if stats["merging_vehicle"][i][condition_name][1] != condition_value:
continue
if stats["merging_vehicle"][i]["vehicles_type"] == "highway_env.vehicle.behavior.CustomVehicleAggressive":
all_stats_aggressive.append(value)
legend_name = "coop_" + str(stats["reward_params"][i]["cooperative_flag"]) + "_symp_" + str(
stats["reward_params"][i][
"sympathy_flag"]) + "_percep_" + str(stats["observation"][i]["cooperative_perception"])
legend_aggressive.append(legend_name)
paths.append(stats["paths"][i].split("_")[-1])
else:
all_stats_non_aggressive.append(value)
legend_name = "coop_" + str(stats["reward_params"][i]["cooperative_flag"]) + "_symp_" + str(
stats["reward_params"][i][
"sympathy_flag"]) + "_percep_" + str(stats["observation"][i]["cooperative_perception"])
legend_non_aggressive.append(legend_name)
paths.append(stats["paths"][i].split("_")[-1])
all_stats_aggressive_order = []
all_stats_non_aggressive_order = []
if len(legend_non_aggressive) == 4:
order = ['coop_False_symp_False_percep_False', 'coop_False_symp_False_percep_True',
'coop_True_symp_False_percep_True', 'coop_True_symp_True_percep_True']
else:
order = ['coop_False_symp_False_percep_True',
'coop_True_symp_False_percep_True', 'coop_True_symp_True_percep_True']
order_name = copy.deepcopy(order)
for ido, o in enumerate(order):
if legend_aggressive:
idx1 = [idx for idx, l in enumerate(legend_aggressive) if l == o]
all_stats_aggressive_order.append(all_stats_aggressive[idx1[0]])
if legend_non_aggressive:
idx1 = [idx for idx, l in enumerate(legend_non_aggressive) if l == o]
all_stats_non_aggressive_order.append(all_stats_non_aggressive[idx1[0]])
order_name[ido] = order[ido] + paths[idx1[0]]
order = order_name
if legend_aggressive and legend_non_aggressive:
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
plt.ioff()
fig, ax = plt.subplots(figsize=figsize)
y_pos = np.arange(len(all_stats_aggressive_order))
ax.barh(y_pos, all_stats_aggressive_order, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(order)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel(z_name)
ax.set_title('Aggressive')
print(order)
print('Aggressive', z_name, all_stats_aggressive_order)
plt.subplots_adjust(left=0.3, bottom=0.1, right=0.95, top=0.9)
# plt.margins(0.9)
plt_name = plots_output_path.split[-1]
out_file = plt_name + 'Aggressive' + z_name
fig_out_path = os.path.join(plots_output_path, out_file)
fig.savefig(fig_out_path + ".png", dpi=300)
fig, ax = plt.subplots(figsize=figsize)
y_pos = np.arange(len(all_stats_non_aggressive_order))
ax.barh(y_pos, all_stats_non_aggressive_order, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(order)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel(z_name)
ax.set_title('Non Aggressive')
print(order)
print('Non Aggressive', z_name, all_stats_non_aggressive_order)
plt.subplots_adjust(left=0.3, bottom=0.1, right=0.95, top=0.9)
# plt.margins(0.9)
out_file = plt_name + 'NonAgressive_' + z_name
fig_out_path = os.path.join(plots_output_path, out_file)
fig.savefig(fig_out_path + ".png", dpi=300)
if not legend_aggressive:
# figManager = plt.get_current_fig_manager()
# figManager.window.showMaximized()
# plt_name = plots_output_path.split("_")[-1]
plt_name = os.path.split(plots_output_path)[-1]
plt.ioff()
# fig, ax = plt.subplots(figsize=figsize)
# y_pos = np.arange(len(all_stats_aggressive_order))
# ax.barh(y_pos, all_stats_aggressive_order, align='center')
# ax.set_yticks(y_pos)
# ax.set_yticklabels(order)
# ax.invert_yaxis() # labels read top-to-bottom
# ax.set_xlabel(z_name)
# ax.set_title('Aggressive')
# print(order)
# print('Aggressive', z_name, all_stats_aggressive_order)
# plt.subplots_adjust(left=0.3, bottom=0.1, right=0.95, top=0.9)
# plt.margins(0.9)
# out_file = plt_name + 'Aggressive' + z_name
# fig_out_path = os.path.join(plots_output_path, out_file)
# fig.savefig(fig_out_path + ".png", dpi=300)
fig, ax = plt.subplots(figsize=figsize)
y_pos = np.arange(len(all_stats_non_aggressive_order))
ax.barh(y_pos, all_stats_non_aggressive_order, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(order)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel(z_name)
ax.set_title(plt_name)
print(order)
print(plt_name, z_name, all_stats_non_aggressive_order)
plt.subplots_adjust(left=0.3, bottom=0.1, right=0.95, top=0.9)
# plt.margins(0.9)
out_file = plt_name + "_" + z_name
if condition_name and condition_value:
out_file += "_" + condition_name + "_" + str(condition_value)
fig_out_path = os.path.join(plots_output_path, out_file)
fig.savefig(fig_out_path + ".png", dpi=300)
# plt.show()
def avg_every_n_episodes(simulation_path_base, n=20, span=100, std=True, span_std=20, subfolders=None,
metric="crash_episodes_flag_all"):
overall_stats = get_overall_stats(simulation_path_base, subfolders=subfolders)
overall_stats = append_from_json(overall_stats)
avg_crashes = []
lens = []
for idx, crashes in enumerate(overall_stats["crash_episodes_flag_all"]):
avg_crash = cutils.average_binary_array(crashes, n)
avg_crashes.append(avg_crash)
lens.append(len(avg_crash))
pathx = overall_stats["paths"][idx]
l = min(lens)
avg_crashes_filter = [avg_crash_filter[0:l - 1] for avg_crash_filter in avg_crashes]
alpha = 2 / (span + 1)
avg_crashes_filter_std = [np.std(cutils.rolling_window(np.array(data), span_std), 1) for data in avg_crashes_filter]
avg_crashes_filter_ema = [cutils.ewma_vectorized_safe(data, alpha) for data in avg_crashes_filter]
avg_crashes_np = np.array(avg_crashes_filter_ema)
if std:
avg_crashes_filter_std_zeros = np.zeros((np.shape(avg_crashes_filter_ema)))
avg_crashes_filter_std_zeros[:, span_std - 1:] = avg_crashes_filter_std
if std:
return avg_crashes_np, np.array(avg_crashes_filter_std_zeros)
else:
return avg_crashes_np
|
{"hexsha": "60b5e080de82abb9bc4d2736dfd64fe705063ffc", "size": 32292, "ext": "py", "lang": "Python", "max_stars_repo_path": "post_process/applications/applications.py", "max_stars_repo_name": "rvalienter90/rl-agents", "max_stars_repo_head_hexsha": "ad6be08f9a7e2f0ec0daf6f557bd9f476bb9e4da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "post_process/applications/applications.py", "max_issues_repo_name": "rvalienter90/rl-agents", "max_issues_repo_head_hexsha": "ad6be08f9a7e2f0ec0daf6f557bd9f476bb9e4da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "post_process/applications/applications.py", "max_forks_repo_name": "rvalienter90/rl-agents", "max_forks_repo_head_hexsha": "ad6be08f9a7e2f0ec0daf6f557bd9f476bb9e4da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4, "max_line_length": 120, "alphanum_fraction": 0.573702465, "include": true, "reason": "import numpy", "num_tokens": 7512}
|
import multiprocessing
import argparse
import os
import shutil
import numpy as np
import pandas as pd
import torch
from allennlp.common.params import Params
from allennlp.training.learning_rate_schedulers import LearningRateScheduler
from allennlp.training.optimizers import Optimizer
from torch.nn import DataParallel
from torch.nn.modules import BatchNorm2d
from tqdm import tqdm
import config
import gc
gc.collect()
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# from dataloaders.vcr import VCR, VCRLoader
from dataloaders.vcr_crf import VCR, VCRLoader
from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \
restore_checkpoint, print_para, restore_best_checkpoint
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
# os.environ["CUDA_VISIBLE_DEVICES"]="2"
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG)
# This is needed to make the imports work
from allennlp.models import Model
import models
seed = 522
import torch
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import numpy as np
np.random.seed(seed)
def _init_fn(worker_id):
np.random.seed(seed)
#################################
#################################
######## Data loading stuff
#################################
#################################
parser = argparse.ArgumentParser(description='train')
parser.add_argument(
'-params',
dest='params',
help='Params location',
type=str,
)
parser.add_argument(
'-rationale',
action="store_true",
help='use rationale',
)
parser.add_argument(
'-folder',
dest='folder',
help='folder location',
type=str,
)
parser.add_argument(
'-vcr_data',
dest='vcr_data',
help='vcr data location',
type=str,
)
parser.add_argument(
'-no_tqdm',
dest='no_tqdm',
action='store_true',
)
parser.add_argument(
'-aug_flag',
dest='aug_flag',
action='store_true',
)
parser.add_argument(
'-att_reg',
type=float,
dest='att_reg'
)
args = parser.parse_args()
config.VCR_ANNOTS_DIR = args.__dict__['vcr_data']
print('vcr annots dir', config.VCR_ANNOTS_DIR)
params = Params.from_file(args.params)
train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'),
only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', True), aug_flag=args.aug_flag)
#NUM_GPUS = torch.cuda.device_count()
#NUM_CPUS = multiprocessing.cpu_count()
NUM_GPUS = 2
NUM_CPUS = 8
print('number gpus: ', NUM_GPUS)
if NUM_GPUS == 0:
raise ValueError("you need gpus!")
def _to_gpu(td):
if NUM_GPUS > 1:
return td
for k in td:
if k != 'metadata':
td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[
k].cuda(
non_blocking=True)
# td[k] = {k2: v.cuda(async=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[k].cuda(
# async=True)
return td
# num_workers = (4 * NUM_GPUS if NUM_CPUS == 32 else 2*NUM_GPUS)-1
num_workers = 8
batch_size = 24
print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True)
# loader_params = {'batch_size': batch_size// NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers, 'worker_init_fn': _init_fn}
loader_params = {'batch_size': batch_size// NUM_GPUS, 'num_gpus':NUM_GPUS, 'num_workers':num_workers}
train_loader = VCRLoader.from_dataset(train, **loader_params)
val_loader = VCRLoader.from_dataset(val, **loader_params)
# test_loader = VCRLoader.from_dataset(test, **loader_params)
ARGS_RESET_EVERY = 100
print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True)
print(str(params['model']))
model = Model.from_params(vocab=train.vocab, params=params['model'])
if config.double_flag:
model.double()
print('*'*100)
for submodule in model.detector.backbone.modules():
if isinstance(submodule, BatchNorm2d):
submodule.track_running_stats = False
for p in submodule.parameters():
p.requires_grad = False
att_flag = model.att_flag
multi_flag = model.multi_flag
wo_qa_flag = model.wo_qa
wo_qr_flag = model.wo_qr
print('att flag: ', att_flag)
print('multi flag: ', multi_flag)
print('qa flag: ', wo_qa_flag)
print('qr flag: ', wo_qr_flag)
model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda()
# model = model.cuda()
optimizer = Optimizer.from_params([x for x in model.named_parameters() if x[1].requires_grad],
params['trainer']['optimizer'])
lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None)
scheduler = LearningRateScheduler.from_params(optimizer, lr_scheduler_params) if lr_scheduler_params else None
if os.path.exists(args.folder):
print("Found folder! restoring "+args.folder, flush=True)
start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=args.folder,
learning_rate_scheduler=scheduler)
else:
print("Making directories: ", args.folder)
os.makedirs(args.folder, exist_ok=True)
start_epoch, val_metric_per_epoch = 0, []
shutil.copy2(args.params, args.folder)
param_shapes = print_para(model)
num_batches = 0
multi_task_lambda = 1.0
for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch):
train_results = []
# norms = []
model.train()
for b, (time_per_batch, batch) in enumerate(time_batch(train_loader if args.no_tqdm else tqdm(train_loader), reset_every=ARGS_RESET_EVERY)):
batch = _to_gpu(batch)
optimizer.zero_grad()
output_dict = model(**batch)
loss = output_dict['loss'].mean() + output_dict['cnn_regularization_loss'].mean()
# loss = output_dict['loss'].mean() + output_dict['cnn_regularization_loss'].mean() + output_dict['answer_gd_loss'].mean() + output_dict['rationale_gd_loss'].mean()
if(multi_flag):
if not wo_qa_flag:
loss += multi_task_lambda*output_dict['answer_loss'].mean()
if not wo_qr_flag:
loss += multi_task_lambda*output_dict['rationale_loss'].mean()
# loss += output_dict['answer_loss'].mean() + output_dict['rationale_loss'].mean()
# if(att_flag):
# loss += kl_lambda*output_dict['kl_loss'].mean()
loss.backward()
num_batches += 1
if scheduler:
scheduler.step_batch(num_batches)
clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
# orms.append(
# clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False)
# )
optimizer.step()
# torch.cuda.empty_cache()
temp_dict ={'loss': output_dict['loss'].detach().mean().item(),
'crl': output_dict['cnn_regularization_loss'].detach().mean().item(),
'accuracy': (model.module if NUM_GPUS > 1 else model).get_metrics(
reset=(b % ARGS_RESET_EVERY) == 0)[
'accuracy'],
'sec_per_batch': time_per_batch,
'hr_per_epoch': len(train_loader) * time_per_batch / 3600,
}
# if(att_flag):
# temp_dict['kl_loss'] = output_dict['kl_loss'].detach().mean().item()
if multi_flag:
if not wo_qa_flag:
temp_dict['answer_loss'] = output_dict['answer_loss'].detach().mean().item()
if not wo_qr_flag:
temp_dict['rationale_loss'] = output_dict['rationale_loss'].detach().mean().item()
train_results.append(pd.Series(temp_dict))
del batch, output_dict, loss
print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean()))
val_probs = []
val_labels = []
val_loss_sum = 0.0
model.eval()
model.training = False
for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)):
with torch.no_grad():
batch = _to_gpu(batch)
output_dict = model(**batch)
val_probs.append(output_dict['label_logits'].detach().cpu().numpy())
val_labels.append(batch['label'].detach().cpu().numpy())
val_loss_sum += output_dict['loss'].detach().mean().item() * batch['label'].shape[0]
del batch, output_dict
val_labels = np.concatenate(val_labels, 0)
val_probs = np.concatenate(val_probs, 0)
val_loss_avg = val_loss_sum / val_labels.shape[0]
# np.save(os.path.join(args.folder, f'temp_question.npy'), val_test)
# np.save(os.path.join(args.folder, f'temp_preds.npy'), val_probs)
val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1))))
if scheduler:
scheduler.step(val_metric_per_epoch[-1], epoch_num)
print("Val epoch {} has acc {:.3f} and loss {:.3f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg),
flush=True)
if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']):
print("Stopping at epoch {:2d}".format(epoch_num))
break
save_checkpoint(model, optimizer, args.folder, epoch_num, val_metric_per_epoch,
is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1))
model.training = True
|
{"hexsha": "6a74b91d80df770f6de05ff5b84f0002697dd6ac", "size": 10034, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/crf_train.py", "max_stars_repo_name": "yangshao/vcr", "max_stars_repo_head_hexsha": "88513d6958d93bd7845d532d5b83744a678fc980", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/crf_train.py", "max_issues_repo_name": "yangshao/vcr", "max_issues_repo_head_hexsha": "88513d6958d93bd7845d532d5b83744a678fc980", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/crf_train.py", "max_forks_repo_name": "yangshao/vcr", "max_forks_repo_head_hexsha": "88513d6958d93bd7845d532d5b83744a678fc980", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6600790514, "max_line_length": 173, "alphanum_fraction": 0.6362367949, "include": true, "reason": "import numpy", "num_tokens": 2333}
|
module Not-named-according-to-the-Haskell-lexical-syntax where
postulate
IO : Set -> Set
{-# BUILTIN IO IO #-}
{-# COMPILED_TYPE IO IO #-}
postulate
return : {A : Set} -> A -> IO A
{-# COMPILED return (\_ -> return :: a -> IO a) #-}
{-# COMPILED_EPIC return (u1 : Unit, a : Any) -> Any = ioreturn(a) #-}
data Unit : Set where
unit : Unit
{-# COMPILED_DATA Unit () () #-}
|
{"hexsha": "1e6f9021050f6c43f707e07aa9114e849856d993", "size": 383, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "examples/compiler/Not-named-according-to-the-Haskell-lexical-syntax.agda", "max_stars_repo_name": "redfish64/autonomic-agda", "max_stars_repo_head_hexsha": "c0ae7d20728b15d7da4efff6ffadae6fe4590016", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-27T04:41:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-27T04:41:05.000Z", "max_issues_repo_path": "examples/compiler/Not-named-according-to-the-Haskell-lexical-syntax.agda", "max_issues_repo_name": "masondesu/agda", "max_issues_repo_head_hexsha": "70c8a575c46f6a568c7518150a1a64fcd03aa437", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/compiler/Not-named-according-to-the-Haskell-lexical-syntax.agda", "max_forks_repo_name": "masondesu/agda", "max_forks_repo_head_hexsha": "70c8a575c46f6a568c7518150a1a64fcd03aa437", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-12T11:35:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T11:35:18.000Z", "avg_line_length": 20.1578947368, "max_line_length": 70, "alphanum_fraction": 0.5953002611, "num_tokens": 118}
|
import numpy as np
from ddpg_agent.contrib.physics_sim import PhysicsSim
from ddpg_agent.quadcopter_environment import QuadcopterState
class Task():
"""Task (environment) that defines the goal and provides feedback to the agent."""
def __init__(self, init_pose=None, init_velocities=None,
init_angle_velocities=None, runtime=5., target_pos=None):
"""Initialize a Task object.
Params
======
init_pose: initial position of the quadcopter in (x,y,z) dimensions and the Euler angles
init_velocities: initial velocity of the quadcopter in (x,y,z) dimensions
init_angle_velocities: initial radians/second for each of the three Euler angles
runtime: time limit for each episode
target_pos: target/goal (x,y,z) position for the agent
"""
# Simulation
self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime)
self.action_repeat = 3
self.state_size = self.action_repeat * 6
self.action_low = 0
self.action_high = 900
self.action_size = 4
# Goal
self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.])
# self.state_size = 18
# self.observation_space = Space(
# np.hstack(( self.sim.lower_bounds, [-np.pi]*3, [float('-inf')]*6, [float('-inf')]*6)),
# np.hstack(( self.sim.upper_bounds, [np.pi]*3, [float('inf')]*6, [float('inf')]*6)) )
self.state_size = self.action_repeat*6
self.observation_space = Space(
list( list(self.sim.lower_bounds) + [-np.pi]*3 )*self.action_repeat,
list( list(self.sim.upper_bounds) + [np.pi]*3 )*self.action_repeat )
self.action_space = Space([0,0,0,0], [900,900,900,900])
self.action_size = 4
def get_reward(self):
reward = 0
#Reward for horizontal distance to goal
horiz_dist, vert_dist = self.get_horiz_vert_distance_from_goal()
if vert_dist<10 and horiz_dist<10:
reward += 10-vert_dist
reward += .1*(10-horiz_dist)
return reward
def get_full_state(self):
return QuadcopterState( *self.sim.pose, *self.sim.v, *self.sim.angular_v,
*self.sim.linear_accel, *self.sim.angular_accels )
def get_horiz_vert_distance_from_goal(self):
horiz_dist = np.sqrt((self.sim.pose[0]-self.target_pos[0])**2 +(self.sim.pose[1]-self.target_pos[1])**2)
vert_dist = np.abs(self.target_pos[2]-self.sim.pose[2])
return horiz_dist, vert_dist
def step(self, rotor_speeds):
"""Uses action to obtain next state, reward, done."""
reward = 0
pose_all = []
for _ in range(self.action_repeat):
done = self.sim.next_timestep(rotor_speeds) # update the sim pose and velocities
reward += self.get_reward()
pose_all.append(self.sim.pose)
next_state = np.concatenate(pose_all)
return next_state, reward, done, None
def reset(self):
"""Reset the sim to start a new episode."""
self.sim.reset()
# Add some noise to the starting position
self.sim.pose[:3] += np.random.normal(0,3,3)
state = np.concatenate([self.sim.pose] * self.action_repeat)
return state
class Space():
def __init__(self, low, high):
low = np.array(low)
high = np.array(high)
assert low.shape == high.shape,\
"Expected bounds to be of same shape."
self.low = low
self.high = high
self.shape = low.shape
|
{"hexsha": "5ef9a21cb4cf93f0908907e647d50d0b1aea7c6a", "size": 3644, "ext": "py", "lang": "Python", "max_stars_repo_path": "task.py", "max_stars_repo_name": "samhiatt/ddpg_agent", "max_stars_repo_head_hexsha": "1b96c36d184c810e7188dcc41752fab3f3739d2f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-13T16:01:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T18:50:54.000Z", "max_issues_repo_path": "task.py", "max_issues_repo_name": "samhiatt/ddpg_agent", "max_issues_repo_head_hexsha": "1b96c36d184c810e7188dcc41752fab3f3739d2f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:50:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:13:55.000Z", "max_forks_repo_path": "task.py", "max_forks_repo_name": "samhiatt/ddpg_agent", "max_forks_repo_head_hexsha": "1b96c36d184c810e7188dcc41752fab3f3739d2f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4888888889, "max_line_length": 112, "alphanum_fraction": 0.6196487377, "include": true, "reason": "import numpy", "num_tokens": 883}
|
C Simplified version of AXIAL, used to expose a bug in
C transformers_inter_full, most likely linked to bourdoncle
C restructuration.
SUBROUTINE UNSTRUC19
* ( DIRR, NUMIRR , NUMMAT , IBUR)
LOGICAL FINDMO
Real XIR (1)
2 FINDMO=.FALSE.
4 IF (FINDMO) THEN
IR=IRINF
5 CONTINUE
IF (ITYP.EQ.2 .AND. IR.LE.IRSUP) THEN
XIR(IR)=AFLOT
IR=IR+1
GOTO 5
ENDIF
GOTO 2
ENDIF
END
|
{"hexsha": "9d08fc2200b96a03a22960ca8303c6b1c9a2b675", "size": 519, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/Semantics/unstruc19.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/Semantics/unstruc19.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/Semantics/unstruc19.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 20.76, "max_line_length": 63, "alphanum_fraction": 0.5298651252, "num_tokens": 164}
|
#!/usr/bin/env python
# for stage-4 world
from __future__ import print_function
from enum import IntEnum
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
from scipy import ndimage, interpolate
import pdb
import glob
import os
import errno
import re
import time
import random
import cv2
from recordtype import recordtype
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
from torchvision import transforms
# from logger import Logger
from copy import deepcopy
import argparse
import copy
import math
import argparse
from datetime import datetime
from gym_dal.maze import generate_map
import matplotlib.pyplot as plt
from matplotlib.patches import Wedge
from a2c_ppo_acktr import arguments
from resnet_pm import resnet18, resnet34, resnet50, resnet101, resnet152
from torchvision.models.resnet import resnet18 as resnet18s
from torchvision.models.resnet import resnet34 as resnet34s
from torchvision.models.resnet import resnet50 as resnet50s
from torchvision.models.resnet import resnet101 as resnet101s
from torchvision.models.resnet import resnet152 as resnet152s
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
from networks import intrinsic_model
from networks import policy_A3C
def shift(grid, d, axis=None, fill = 0.5):
grid = np.roll(grid, d, axis=axis)
if axis == 0:
if d > 0:
grid[:d,:] = fill
elif d < 0:
grid[d:,:] = fill
elif axis == 1:
if d > 0:
grid[:,:d] = fill
elif d < 0:
grid[:,d:] = fill
return grid
def softmax(w, t = 1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def softermax(w, t = 1.0):
w = np.array(w)
w = w - w.min() + np.exp(1)
e = np.log(w)
dist = e / np.sum(e)
return dist
def to_index(a, N, mm):
a_max = mm[1]
a_min = mm[0]
return int(np.floor(N*(a_max-a)/(a_max-a_min)))
def to_real(i, mm, n):
u = (mm[1]-mm[0])/n
u0 = u/2
return mm[1]-u*i - u0
def wrap(phase):
# wrap into [-pi, pi]
phase = ( phase + np.pi) % (2 * np.pi ) - np.pi
return phase
def normalize(x):
if x.min() == x.max():
return 0.0*x
x = x-x.min()
x = x/x.max()
return x
Pose2d = recordtype("Pose2d", "theta x y")
Grid = recordtype("Grid", "head row col")
class Lidar():
def __init__(self, ranges, angle_min, angle_max,
range_min, range_max):
# self.ranges = np.clip(ranges, range_min, range_max)
self.ranges = ranges
self.angle_min = angle_min
self.angle_max = angle_max
num_data = len(self.ranges)
self.angle_increment = (self.angle_max-self.angle_min)/num_data #math.increment
class DalEnv(gym.Env):
class Actions(IntEnum):
left = 0
right = 1
forward = 2
hold = 3
def __init__(self):
seed = 1337
self.args = arguments.get_args_iko()
self.rl_test = False
self.start_time = time.time()
self.actions = DalEnv.Actions
self.action_space = spaces.Discrete(len(self.actions))
if (self.args.use_gpu) > 0 and torch.cuda.is_available():
self.device = torch.device("cuda" )
torch.set_default_tensor_type(torch.cuda.FloatTensor)
else:
self.device = torch.device("cpu")
torch.set_default_tensor_type(torch.FloatTensor)
self.init_fig = False
self.n_maze_grids = None
self.grid_rows = self.args.map_size * self.args.sub_resolution
self.grid_cols = self.args.map_size * self.args.sub_resolution
self.grid_dirs = self.args.n_headings
self.collision_radius = self.args.collision_radius
num_dirs = 1
num_classes = self.args.n_lm_grids ** 2 * num_dirs
final_num_classes = num_classes
if self.args.n_pre_classes is not None:
num_classes = self.args.n_pre_classes
else:
num_classes = final_num_classes
self.map_rows, self.map_cols = 224, 224
if self.args.pm_net == "none":
self.perceptual_model = None
elif self.args.pm_net == "densenet121":
self.perceptual_model = densenet121(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1024
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet169":
self.perceptual_model = densenet169(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1664
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet201":
self.perceptual_model = densenet201(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 1920
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "densenet161":
self.perceptual_model = densenet161(pretrained = self.args.use_pretrained, drop_rate = self.args.drop_rate)
num_ftrs = self.perceptual_model.classifier.in_features # 2208
self.perceptual_model.classifier = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet18s":
self.perceptual_model = resnet18s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet34s":
self.perceptual_model = resnet34s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet50s":
self.perceptual_model = resnet50s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet101s":
self.perceptual_model = resnet101s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet152s":
self.perceptual_model = resnet152s(pretrained=self.args.use_pretrained)
num_ftrs = self.perceptual_model.fc.in_features
self.perceptual_model.fc = nn.Linear(num_ftrs, num_classes)
elif self.args.pm_net == "resnet18":
self.perceptual_model = resnet18(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet34":
self.perceptual_model = resnet34(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet50":
self.perceptual_model = resnet50(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet101":
self.perceptual_model = resnet101(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features
elif self.args.pm_net == "resnet152":
self.perceptual_model = resnet152(num_classes = num_classes)
num_ftrs = self.perceptual_model.fc.in_features # 2048
else:
raise Exception('pm-net required: resnet or densenet')
# if self.args.RL_type == 0:
# self.policy_model = policy_A3C(self.args.n_state_grids, 2+self.args.n_state_dirs, num_actions = self.args.num_actions)
# elif self.args.RL_type == 1:
# self.policy_model = policy_A3C(self.args.n_state_grids, 1+self.args.n_state_dirs, num_actions = self.args.num_actions)
# elif self.args.RL_type == 2:
# self.policy_model = policy_A3C(self.args.n_state_grids, 2*self.args.n_state_dirs, num_actions = self.args.num_actions, add_raw_map_scan = True)
self.intri_model = intrinsic_model(self.grid_rows)
self.max_scan_range = 3.5
self.min_scan_range = 0.1
self.manhattans = []
self.manhattan = 0
self.rewards = []
self.reward = 0
self.done = 0
self.step_count = 0
self.step_max = self.args.num[2]
self.map_2d = None
# self.laser_1d = None
self.xlim = (-3.0, 3.0)
self.ylim = (-3.0, 3.0)
if self.args.thickness == 0.0:
self.radius = 0.5*(self.xlim[1]-self.xlim[0])/self.args.map_size/2*0.9
else:
self.radius = (self.xlim[1]-self.xlim[0])/self.args.map_size/2*self.args.thickness
self.longest = float(self.grid_dirs/2 + self.grid_rows-1 + self.grid_cols-1) #longest possible manhattan distance
self.cell_size = (self.xlim[1]-self.xlim[0])/self.grid_rows
self.heading_resol = 2*np.pi/self.grid_dirs
self.fwd_step = self.cell_size*self.args.fwd_step
self.collision = False
self.sigma_xy = self.cell_size * 0.1
self.sigma_theta = self.heading_resol * 0.1
self.scans_over_map = np.zeros((self.grid_rows,self.grid_cols,360))
self.scans_over_map_high = np.zeros((self.map_rows, self.map_cols, 360))
self.scan_2d = np.zeros((self.map_rows, self.map_cols))
self.scan_2d_low = np.zeros((self.grid_rows, self.grid_cols))
self.map_2d = np.zeros((self.map_rows, self.map_cols))
self.map_design = np.zeros((self.grid_rows, self.grid_cols),dtype='float')
self.map_design_tensor = torch.zeros((1,self.grid_rows, self.grid_cols),device=torch.device(self.device))
self.data_cnt = 0
self.bel_ent = np.log(1.0/(self.grid_dirs*self.grid_rows*self.grid_cols))
self.likelihood = torch.ones((self.grid_dirs,self.grid_rows, self.grid_cols),device=torch.device(self.device))
self.likelihood = self.likelihood / self.likelihood.sum()
# self.gt_likelihood_high = np.ones((self.grid_dirs, self.map_rows, self.map_cols))
self.gt_likelihood_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_high = self.gt_likelihood_high / self.gt_likelihood_high.sum()
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
self.gt_likelihood_unnormalized_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
# self.belief = torch.ones((self.grid_dirs,self.map_rows, self.map_cols),device=torch.device(self.device))
self.belief = torch.ones((self.grid_dirs, self.grid_rows, self.grid_cols), device=torch.device(self.device))
self.belief = self.belief / self.belief.sum()
self.loss_policy = 0
self.loss_value = 0
# self.turtle_loc = np.zeros((self.map_rows,self.map_cols))
self.turtle_loc = np.zeros((self.grid_rows, self.grid_cols))
# what to do
# current pose: where the robot really is. motion incurs errors in pose
self.current_pose = Pose2d(0,0,0)
self.goal_pose = Pose2d(0,0,0)
self.last_pose = Pose2d(0,0,0)
self.perturbed_goal_pose = Pose2d(0,0,0)
self.start_pose = Pose2d(0,0,0)
#grid pose
self.true_grid = Grid(head=0,row=0,col=0)
self.bel_grid = Grid(head=0,row=0,col=0)
self.reward_block_penalty = 0
self.reward_bel_gt = 0
self.reward_bel_gt_nonlog = 0
self.reward_infogain = 0
self.reward_bel_ent = 0
self.reward_hit = 0
self.reward_dist = 0
self.reward_inv_dist = 0
self.actions_space = list(("turn_left", "turn_right", "go_fwd", "hold"))
self.action_name = 'none'
self.current_state = "new_env_pose"
self.state = np.zeros((6, self.grid_rows, self.grid_cols), dtype=np.float32)
self.observation_space = spaces.Box(
low=0,
high=1,
shape=self.state.shape,
dtype='float32'
)
# Initialize the RNG
self.seed(seed=seed)
# Initialize the state
self.reset()
#end of init
def seed(self, seed=None):
self.np_random, _ = seeding.np_random(seed)
return [seed]
def reset(self):
self.clear_objects()
self.set_walls()
self.place_turtle()
self.get_lidar()
self.get_scan_2d()
self.step_count = 0
self.gt_likelihood_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.gt_likelihood_high = self.gt_likelihood_high / self.gt_likelihood_high.sum()
self.gt_likelihood_unnormalized = np.ones((self.grid_dirs,self.grid_rows,self.grid_cols))
self.gt_likelihood_unnormalized_high = np.ones((self.grid_dirs, self.grid_rows, self.grid_cols))
self.rewards = []
self.manhattans=[]
self.reward = 0
self.explored_space = np.zeros((self.grid_dirs,self.grid_rows, self.grid_cols),dtype='float')
self.bel_list = []
self.state[0,:,:] = self.map_design
ding = self.belief.detach().cpu().numpy()
self.state[1:5,:,:] = ding
self.state[5,:,:] = self.scan_2d_low
return self.state
@property
def steps_remaining(self):
return self.max_steps - self.step_count
def reset_pose(self):
self.place_turtle()
self.step_count = 0
self.bel_ent = np.log(1.0/(self.grid_dirs*self.grid_rows*self.grid_cols))
self.step_count = 0
# reset belief too
self.belief[:,:,:]=1.0
self.belief /= self.belief.sum()#np.sum(self.belief, dtype=float)
done = False
def step(self, action):
# start = time.time()
done = False
reward = 0
if self.step_count == 0:
self.get_synth_scan()
self.step_count = self.step_count + 1
self.action = action
self.action_name = self.actions_space[action]
self.update_target_pose()
self.collision_check()
self.execute_action_teleport()
self.transit_belief()
if self.collision == False:
self.update_true_grid()
self.get_lidar()
self.update_explored()
self.scan_2d, self.scan_2d_low = self.get_scan_2d_n_headings()
# self.get_scan_2d()
# self.generate_map_trans()
self.compute_gtl()
# self.likelihood = self.update_likelihood_rotate(self.map_2d, self.scan_2d)
self.likelihood = self.gt_likelihood_high
self.likelihood = self.likelihood/(torch.sum(self.likelihood))
if self.args.mask:
self.mask_likelihood()
self.product_belief() # likelihood x belief
self.update_bel_list()
self.get_reward()
self.state[0,:,:] = self.map_design
ding = self.belief.detach().cpu().numpy()
self.state[1:5,:,:] = ding
self.state[5,:,:] = self.scan_2d_low
if self.step_count >= self.step_max:
done = True
obs = self.state
reward = self.reward
return obs, reward, done, {'reward_block_penalty': self.reward_block_penalty,
'reward_bel_gt': self.reward_bel_gt,
'reward_bel_gt_nonlog': self.reward_bel_gt_nonlog,
'reward_infogain': self.reward_infogain,
'reward_bel_ent': self.reward_bel_ent,
'reward_hit': self.reward_hit,
'reward_dist': self.reward_dist,
'reward_inv_dist': self.reward_inv_dist }
def update_explored(self):
if self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] == 0.0:
self.new_pose = True
else:
self.new_pose = False
self.explored_space[self.true_grid.head,self.true_grid.row, self.true_grid.col] = 1.0
return
def compute_gtl(self):
if self.args.gtl_src == 'hd-corr':
self.get_gt_likelihood_corr(clip=0)
elif self.args.gtl_src == 'hd-corr-clip':
self.get_gt_likelihood_corr(clip=1)
elif self.args.gtl_src == 'hd-cos':
self.get_gt_likelihood_cossim()
else:
raise Exception('GTL source required: --gtl-src= [low-dim-map, high-dim-map]')
self.normalize_gtl()
def normalize_gtl(self):
if type(self.gt_likelihood_high).__name__ == 'torch.CudaTensor':
gt_high = self.gt_likelihood_high.cpu().numpy()
elif type(self.gt_likelihood_high).__name__ == 'Tensor':
gt_high = self.gt_likelihood_high.cpu().numpy()
else:
gt_high = self.gt_likelihood_high
#self.gt_likelihood_unnormalized = np.copy(self.gt_likelihood)
if self.args.gtl_output == "softmax":
gt_high = softmax(gt_high, self.args.temperature)
elif self.args.gtl_output == "softermax":
gt_high = softermax(gt_high.cpu())
elif self.args.gtl_output == "linear":
gt_high = np.clip(gt_high.cpu(), 1e-5, 1.0)
gt_high = gt_high / gt_high.sum()
self.gt_likelihood_high = torch.tensor(gt_high).float().to(self.device)
def get_gt_likelihood_cossim(self):
offset = 360/self.grid_dirs
y= np.array(self.scan_data_at_unperturbed.ranges)[::self.args.pm_scan_step]
# y= np.array(self.scan_data.ranges)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
# y[y==np.inf]= self.max_scan_range
for heading in range(self.grid_dirs): ## that is, each direction
#compute cosine similarity at each loc
X = np.roll(self.scans_over_map_high, int(-offset*heading),axis=2)[:,:,::self.args.pm_scan_step]
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if (i_ld*self.map_cols+j_ld == self.taken).any():
self.gt_likelihood_high[heading,i_ld,j_ld]= 0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
self.gt_likelihood_high[heading,i_ld,j_ld]= self.get_cosine_sim(x,y)
def get_cosine_sim(self,x,y):
# numpy arrays.
return sum(x*y)/np.linalg.norm(y,2)/np.linalg.norm(x,2)
def get_gt_likelihood_corr(self):
offset = 360/self.grid_dirs
y= np.array(self.scan_data_at_unperturbed.ranges)[::self.args.pm_scan_step]
# y= np.array(self.scan_data.ranges)[::self.args.pm_scan_step]
y = np.clip(y, self.min_scan_range, self.max_scan_range)
# y[y==np.inf]= self.max_scan_range
for heading in range(self.grid_dirs): ## that is, each direction
#compute cosine similarity at each loc
X = np.roll(self.scans_over_map, -offset*heading,axis=2)[:,:,::self.args.pm_scan_step]
for i_ld in range(self.grid_rows):
for j_ld in range(self.grid_cols):
if (i_ld*self.grid_cols+j_ld == self.taken).any():
self.gt_likelihood[heading,i_ld,j_ld]= 0.0
else:
x = X[i_ld,j_ld,:]
x = np.clip(x, self.min_scan_range, self.max_scan_range)
self.gt_likelihood[heading,i_ld,j_ld]= self.get_corr(x,y)
def get_synth_scan(self):
# place sensor at a location, then reach out in 360 rays all around it and record when each ray gets hit.
n_places=self.grid_rows * self.grid_cols
for i_place in range(n_places):
row_ld = i_place // self.grid_cols
col_ld = i_place % self.grid_cols
x_real = to_real(row_ld, self.xlim, self.grid_rows ) # from low-dim location to real
y_real = to_real(col_ld, self.ylim, self.grid_cols ) # from low-dim location to real
scan = self.get_a_scan(x_real, y_real,scan_step=self.args.pm_scan_step)
self.scans_over_map[row_ld, col_ld,:] = np.clip(scan, 1e-10, self.max_scan_range)
# if i_place%10==0: print ('.')
## Uncomment the following if you want scans_over_map at high resolution.
# n_places = self.map_rows * self.map_cols
# for i_place in range(n_places):
# row_ld = i_place // self.map_rows
# col_ld = i_place % self.map_cols
# x_real = to_real(row_ld, self.xlim, self.map_rows ) # from low-dim location to real
# y_real = to_real(col_ld, self.ylim, self.map_cols ) # from low-dim location to real
# scan = self.get_a_scan(x_real, y_real,scan_step=self.args.pm_scan_step)
# self.scans_over_map_high[row_ld, col_ld,:] = np.clip(scan, 1e-10, self.max_scan_range)
# if i_place%100==0: print ('.')
def update_true_grid(self):
self.true_grid.row=to_index(self.current_pose.x, self.grid_rows, self.xlim)
self.true_grid.col=to_index(self.current_pose.y, self.grid_cols, self.ylim)
heading = self.current_pose.theta
self.true_grid.head = self.grid_dirs * wrap(heading + np.pi/self.grid_dirs) / 2.0 / np.pi
self.true_grid.head = int(self.true_grid.head % self.grid_dirs)
def teleport_turtle(self):
# if self.args.perturb > 0:
self.current_pose.x = self.perturbed_goal_pose.x
self.current_pose.y = self.perturbed_goal_pose.y
self.current_pose.theta = self.perturbed_goal_pose.theta
def set_walls(self):
if self.args.test_mode:
map_file = os.path.join(self.args.test_data_path, "map-design-%05d.npy"%self.env_count)
maze = np.load(map_file)
else:
if self.args.random_rm_cells[1]>0:
low=self.args.random_rm_cells[0]
high=self.args.random_rm_cells[1]
num_cells_to_delete = np.random.randint(low, high)
else:
num_cells_to_delete = self.args.rm_cells
maze = generate_map(self.args.map_size, num_cells_to_delete)
self.map_design = np.zeros((self.grid_rows, self.grid_cols))
for i in range(self.args.map_size):
for j in range(self.args.map_size):
if i < self.args.map_size-1:
if maze[i,j]==1 and maze[i+1,j]==1:
#place vertical
self.set_a_wall([i,j],[i+1,j],self.args.map_size,horizontal=False)
if j < self.args.map_size-1:
if maze[i,j]==1 and maze[i,j+1] ==1:
#place horizontal wall
self.set_a_wall([i,j],[i,j+1],self.args.map_size,horizontal=True)
if i>0 and i<self.args.map_size-1 and j>0 and j<self.args.map_size-1:
if maze[i,j]==1 and maze[i-1,j] == 0 and maze[i+1,j]==0 and maze[i,j-1]==0 and maze[i,j+1]==0:
self.set_a_pillar([i,j], self.args.map_size)
# self.map_design = maze
self.map_design_tensor[0,:,:] = torch.tensor(self.map_design).float().to(self.device)
self.taken = np.arange(self.map_design.size)[self.map_design.flatten()==1]
def clear_objects(self):
self.map_2d = np.zeros((self.map_rows, self.map_cols))
self.map_design = np.zeros((self.grid_rows, self.grid_cols),dtype='float')
self.map_design_tensor = torch.zeros((1,self.grid_rows, self.grid_cols),device=torch.device(self.device))
def set_a_pillar(self, a, grids):
x=to_real(a[0], self.xlim, grids)
y=to_real(a[1], self.ylim, grids)
#rad = self.radius
if self.args.backward_compatible_maps:
rad = 0.15
elif self.args.random_thickness:
rad = np.random.normal(loc=self.radius, scale=self.radius*0.1)
rad = np.clip(rad, 0, self.radius*.95)
else:
rad = self.radius
corner0 = [x+rad,y+rad]
corner1 = [x-rad,y-rad]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
for ir in range(x0,x1+1):
for ic in range(y0,y1+1):
dx = to_real(ir, self.xlim, self.map_rows) - x
dy = to_real(ic, self.ylim, self.map_cols) - y
dist = np.sqrt(dx**2+dy**2)
if dist <= rad:
self.map_2d[ir,ic]=1.0
x0 = to_index(corner0[0], self.grid_rows, self.xlim)
y0 = to_index(corner0[1], self.grid_cols, self.ylim)
x1 = to_index(corner1[0], self.grid_rows, self.xlim)
y1 = to_index(corner1[1], self.grid_cols, self.ylim)
corners = [(0,0), (-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)]
half_cell = 0.5*(self.xlim[1]-self.xlim[0])/self.grid_rows
for ir in range(x0,x1+1):
for ic in range(y0,y1+1):
for con in corners:
dx = to_real(ir, self.xlim, self.grid_rows) + con[0]*half_cell - x
dy = to_real(ic, self.ylim, self.grid_cols) + con[1]*half_cell - y
dist = np.sqrt(dx**2+dy**2)
if dist <= rad:
self.map_design[ir,ic]=1.0
break
def set_a_wall(self,a,b,grids,horizontal=True):
ax = to_real(a[0], self.xlim, grids)
ay = to_real(a[1], self.ylim, grids)
bx = to_real(b[0], self.xlim, grids)
by = to_real(b[1], self.ylim, grids)
if self.args.backward_compatible_maps:
rad = 0.1*np.ones(4)
elif self.args.random_thickness:
rad = np.random.normal(loc=self.radius, scale=self.radius*0.1, size=4)
rad = np.clip(rad, 0, self.radius*0.95)
else:
rad = self.radius*np.ones(4)
corner0 = [ax+rad[0],ay+rad[1]]
corner1 = [bx-rad[2],by-rad[3]]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
if self.args.backward_compatible_maps:
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
else:
x1 = to_index(corner1[0], self.map_rows, self.xlim)+1
y1 = to_index(corner1[1], self.map_cols, self.ylim)+1
self.map_2d[x0:x1, y0:y1]=1.0
x0 = to_index(corner0[0], self.grid_rows, self.xlim)
y0 = to_index(corner0[1], self.grid_cols, self.ylim)
x1 = to_index(corner1[0], self.grid_rows, self.xlim)+1
y1 = to_index(corner1[1], self.grid_cols, self.ylim)+1
self.map_design[x0:x1, y0:y1]=1.0
def place_turtle(self):
# new turtle location (random)
turtle_can = [i for i in range(self.grid_rows*self.grid_cols) if i not in self.taken]
turtle_bin = np.random.choice(turtle_can,1)
self.true_grid.row = turtle_bin//self.grid_cols
self.true_grid.col = turtle_bin% self.grid_cols
self.true_grid.head = np.random.randint(self.grid_dirs)
self.goal_pose.x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
self.goal_pose.y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
self.goal_pose.theta = wrap(self.true_grid.head*self.heading_resol)
# if self.args.perturb>0:
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
delta_x = (0.5-np.random.rand())*(self.xlim[1]-self.xlim[0])/self.grid_rows
delta_y = (0.5-np.random.rand())*(self.ylim[1]-self.ylim[0])/self.grid_cols
else:
delta_x=0
delta_y=0
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
delta_theta = (0.5-np.random.rand())*self.heading_resol
else:
delta_theta=0
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = self.goal_pose.theta+delta_theta
if self.args.test_mode:
pg_pose_file = os.path.join(self.args.test_data_path, "pg-pose-%05d.npy"%self.env_count)
g_pose_file = os.path.join(self.args.test_data_path, "g-pose-%05d.npy"%self.env_count)
pg_pose = np.load(pg_pose_file)
g_pose = np.load(g_pose_file)
self.goal_pose.theta = g_pose[0]
self.goal_pose.x = g_pose[1]
self.goal_pose.y = g_pose[2]
if self.args.init_error == "XY" or self.args.init_error == "BOTH":
self.perturbed_goal_pose.x = pg_pose[1]
self.perturbed_goal_pose.y = pg_pose[2]
else:
self.perturbed_goal_pose.x = g_pose[1]
self.perturbed_goal_pose.y = g_pose[2]
if self.args.init_error == "THETA" or self.args.init_error == "BOTH":
self.perturbed_goal_pose.theta = pg_pose[0]
else:
self.perturbed_goal_pose.theta = g_pose[0]
self.teleport_turtle()
self.update_true_grid()
# self.update_current_pose()
def generate_map_trans(self):
self.grid_center = ((self.grid_rows-1)/2, (self.grid_cols-1)/2)
self.map_trans = self.map_design
self.map_trans = shift(self.map_trans, int(self.grid_center[0]-self.true_grid.row), axis = 0, fill=1.0)
self.map_trans = shift(self.map_trans, int(self.grid_center[1]-self.true_grid.col), axis = 1, fill=1.0)
self.map_trans = np.rot90(self.map_trans, -self.true_grid.head)
def get_scan_2d(self):
data = self.scan_data
if self.map_rows == None :
return
if self.map_cols == None:
return
N=self.map_rows
M=self.map_cols
self.scan_2d = np.zeros(shape=(N,M))
x_max = self.xlim[1] # map height/2 in meters
x_min = self.xlim[0]
y_max = self.ylim[1]# map width/2 in meters
y_min = self.ylim[0]
resol0=min((x_max-x_min)/N,(y_max-y_min)/M)
angle = data.angle_min
for i,dist in enumerate(data.ranges):
resol=resol0
if ~np.isinf(dist):
over = 0
while True:
x = (dist+over)*np.cos(angle)
y = (dist+over)*np.sin(angle)
n = to_index(x, N, self.xlim)
m = to_index(y, M, self.ylim)
if not (n>=0 and n<N and m>0 and m<M): break
self.scan_2d[n,m] = 1.0
over += resol
angle += data.angle_increment
def get_a_scan(self, x_real, y_real, offset=0, scan_step=1, noise=0, sigma=0.05):
#class member variables: map_rows, map_cols, xlim, ylim, min_scan_range, max_scan_range, map_2d
row_hd = to_index(x_real, self.map_rows, self.xlim) # from real to hd
col_hd = to_index(y_real, self.map_cols, self.ylim) # from real to hd
scan = np.zeros(360)
missing = np.random.choice(360, noise, replace=False)
gaussian_noise = np.random.normal(scale=sigma, size=360)
for i_ray in range(0,360, scan_step):
theta = math.radians(i_ray)+offset
if i_ray in missing:
dist = np.inf
else:
dist = self.min_scan_range
while True:
if dist >= self.max_scan_range:
dist = np.inf
break
x_probe = x_real + dist * np.cos(theta)
y_probe = y_real + dist * np.sin(theta)
# see if there's something
i_hd_prb = to_index(x_probe, self.map_rows, self.xlim)
j_hd_prb = to_index(y_probe, self.map_cols, self.ylim)
if i_hd_prb < 0 or i_hd_prb >= self.map_rows:
dist = np.inf
break
if j_hd_prb < 0 or j_hd_prb >= self.map_cols:
dist = np.inf
break
if self.map_2d[i_hd_prb, j_hd_prb] >= 0.5:
break
dist += 0.01+0.01*(np.random.rand())
scan[i_ray]=dist+gaussian_noise[i_ray]
return scan
def get_scan_2d_n_headings(self):
data = self.scan_data
if self.map_rows == None :
return
if self.map_cols == None:
return
O=self.grid_dirs
N=self.map_rows
M=self.map_cols
self.scan_2d = np.zeros(shape=(O,N,M))
# self.scan_2d_rotate = np.zeros(shape=(O,N,M))
angles = np.linspace(data.angle_min, data.angle_max, data.ranges.size, endpoint=False)
for i,dist in enumerate(data.ranges):
for rotate in range(O):
offset = 2*np.pi/O*rotate
angle = offset + angles[i]
if angle > math.radians(self.args.fov[0]) and angle < math.radians(self.args.fov[1]):
continue
if ~np.isinf(dist):
x = (dist)*np.cos(angle)
y = (dist)*np.sin(angle)
n = to_index(x, N, self.xlim)
m = to_index(y, M, self.ylim)
if n>=0 and n<N and m>0 and m<M:
self.scan_2d[rotate,n,m] = 1.0
rows1 = self.args.n_state_grids
cols1 = self.args.n_state_grids
rows2 = self.args.n_local_grids
cols2 = rows2
center=self.args.n_local_grids/2
if self.args.binary_scan:
self.scan_2d_low = np.ceil(normalize(cv2.resize(self.scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA)))
else:
self.scan_2d_low = normalize(cv2.resize(self.scan_2d[0,:,:], (rows1, cols1),interpolation=cv2.INTER_AREA))
return self.scan_2d, self.scan_2d_low
def get_scan_2d_noshade(self):
data = self.scan_data
if self.map_rows == None :
return
if self.map_cols == None:
return
N=self.map_rows
M=self.map_cols
self.scan_2d = np.zeros(shape=(N,M))
angle = data.angle_min
for i,dist in enumerate(data.ranges):
if angle > math.radians(self.args.fov[0]) and angle < math.radians(self.args.fov[1]):
angle += data.angle_increment
continue
if ~np.isinf(dist):
x = (dist)*np.cos(angle)
y = (dist)*np.sin(angle)
n = to_index(x, N, self.xlim)
m = to_index(y, M, self.ylim)
if n>=0 and n<N and m>0 and m<M:
self.scan_2d[n,m] = 1.0
angle += data.angle_increment
def mask_likelihood(self):
the_mask = torch.tensor(np.ones([self.grid_dirs, self.grid_rows, self.grid_cols])).float().to(self.device)
for i in range(self.grid_rows):
for j in range(self.grid_cols):
if (i*self.grid_cols+j==self.taken).any():
the_mask[:,i,j]=0.0
self.likelihood = self.likelihood * the_mask
self.likelihood = self.likelihood/self.likelihood.sum()
def product_belief(self):
if type(self.belief) is np.ndarray:
self.belief = torch.from_numpy(self.belief).float().to(self.device)
self.belief = self.belief * (self.likelihood)
#normalize belief
self.belief /= self.belief.sum()
#update bel_grid
guess = np.unravel_index(np.argmax(self.belief.cpu().detach().numpy(), axis=None), self.belief.shape)
self.bel_grid = Grid(head=guess[0],row=guess[1],col=guess[2])
def update_target_pose(self):
self.last_pose.x = self.perturbed_goal_pose.x
self.last_pose.y = self.perturbed_goal_pose.y
self.last_pose.theta = self.perturbed_goal_pose.theta
self.start_pose.x = self.perturbed_goal_pose.x
self.start_pose.y = self.perturbed_goal_pose.y
self.start_pose.theta = self.perturbed_goal_pose.theta
offset = self.heading_resol*self.args.rot_step
if self.action_name == "turn_right":
self.goal_pose.theta = wrap(self.start_pose.theta-offset)
self.goal_pose.x = self.start_pose.x
self.goal_pose.y = self.start_pose.y
elif self.action_name == "turn_left":
self.goal_pose.theta = wrap(self.start_pose.theta+offset)
self.goal_pose.x = self.start_pose.x
self.goal_pose.y = self.start_pose.y
elif self.action_name == "go_fwd":
self.goal_pose.x = self.start_pose.x + math.cos(self.start_pose.theta)*self.fwd_step
self.goal_pose.y = self.start_pose.y + math.sin(self.start_pose.theta)*self.fwd_step
self.goal_pose.theta = self.start_pose.theta
elif self.action_name == "hold":
return
else:
print('undefined action name %s'%self.action_name)
exit()
delta_x, delta_y = 0,0
delta_theta = 0
if self.args.process_error:
delta_x, delta_y = np.random.normal(scale=self.args.process_error[0],size=2)
delta_theta = np.random.normal(scale=self.args.process_error[1])
self.perturbed_goal_pose.x = self.goal_pose.x+delta_x
self.perturbed_goal_pose.y = self.goal_pose.y+delta_y
self.perturbed_goal_pose.theta = wrap(self.goal_pose.theta+delta_theta)
def collision_check(self):
row=to_index(self.perturbed_goal_pose.x, self.grid_rows, self.xlim)
col=to_index(self.perturbed_goal_pose.y, self.grid_cols, self.ylim)
x = self.perturbed_goal_pose.x
y = self.perturbed_goal_pose.y
rad = self.collision_radius
if self.args.collision_from == "scan" and self.action_str == "go_fwd":
self.collision = self.collision_fnc(0, 0, 0, self.scan_2d_slide)
elif self.args.collision_from == "map":
self.collision = self.collision_fnc(x,y,rad, self.map_2d)
else:
self.collision = False
if self.collision:
#undo update target
self.perturbed_goal_pose.x = self.last_pose.x
self.perturbed_goal_pose.y = self.last_pose.y
self.perturbed_goal_pose.theta = self.last_pose.theta
def collision_fnc(self, x, y, rad, img):
corner0 = [x+rad,y+rad]
corner1 = [x-rad,y-rad]
x0 = to_index(corner0[0], self.map_rows, self.xlim)
y0 = to_index(corner0[1], self.map_cols, self.ylim)
x1 = to_index(corner1[0], self.map_rows, self.xlim)
y1 = to_index(corner1[1], self.map_cols, self.ylim)
if x0 < 0 :
return True
if y0 < 0:
return True
if x1 >= self.map_rows:
return True
if y1 >= self.map_cols:
return True
if rad == 0:
if img[x0, y0] > 0.5 :
return True
else:
return False
else:
pass
for ir in range(x0,x1+1):
for ic in range(y0,y1+1):
dx = to_real(ir, self.xlim, self.map_rows) - x
dy = to_real(ic, self.ylim, self.map_cols) - y
dist = np.sqrt(dx**2+dy**2)
if dist <= rad and img[ir,ic]==1.0:
return True
return False
def get_lidar(self):
ranges = self.get_a_scan(self.current_pose.x, self.current_pose.y,
offset=self.current_pose.theta,
noise=self.args.lidar_noise,
sigma=self.args.lidar_sigma)
bearing_deg = np.arange(360.0)
mindeg=0
maxdeg=359
incrementdeg=1
params = {'ranges': ranges,
'angle_min': math.radians(mindeg),
'angle_max': math.radians(maxdeg),
'range_min': self.min_scan_range,
'range_max': self.max_scan_range}
self.scan_data = Lidar(**params)
## scan_data @ unperturbed pose
x = to_real(self.true_grid.row, self.xlim, self.grid_rows)
y = to_real(self.true_grid.col, self.ylim, self.grid_cols)
offset = self.heading_resol*self.true_grid.head
# ranges = self.get_a_scan(x, y, offset=offset, noise=0, sigma=0)
ranges = self.get_a_scan(x, y, offset=offset, noise=0, sigma=0.03)
params = {'ranges': ranges,
'angle_min': math.radians(mindeg),
'angle_max': math.radians(maxdeg),
'range_min': self.min_scan_range,
'range_max': self.max_scan_range}
self.scan_data_at_unperturbed = Lidar(**params)
def fwd_clear(self):
robot_width = 0.3
angles=math.degrees(np.arctan2(0.5*robot_width, self.fwd_step))
ranges = self.scan_data.ranges
if min(ranges[0:int(angles)]) < 1.5*self.fwd_step or min(ranges[-int(angles):]) < 1.5*self.fwd_step:
return False
else:
return True
def execute_action_teleport(self):
if self.collision:
return False
self.teleport_turtle()
return True
def transit_belief(self):
self.belief = self.belief.cpu().detach().numpy()
if self.collision == True:
self.prior = np.copy(self.belief)
self.belief = torch.from_numpy(self.belief).float().to(self.device)
return
self.belief = self.trans_bel()
self.belief = torch.from_numpy(self.belief).float().to(self.device)#$ requires_grad=True)
if type(self.belief).__name__ == 'torch.CudaTensor':
self.belief = self.belief.cpu().numpy()
elif type(self.belief).__name__ == 'Tensor':
self.belief = self.belief.cpu().numpy()
else:
self.belief = self.belief
self.prior = np.copy(self.belief)
def trans_bel(self):
rotation_step = self.args.rot_step
if self.action_name == "turn_right":
self.belief=np.roll(self.belief,-rotation_step, axis=0)
elif self.action_name == "turn_left":
self.belief=np.roll(self.belief, rotation_step, axis = 0)
elif self.action_name == "go_fwd":
if self.args.trans_belief == "roll":
self.belief[0,:,:]=np.roll(self.belief[0,:,:], -1, axis=0)
self.belief[1,:,:]=np.roll(self.belief[1,:,:], -1, axis=1)
self.belief[2,:,:]=np.roll(self.belief[2,:,:], 1, axis=0)
self.belief[3,:,:]=np.roll(self.belief[3,:,:], 1, axis=1)
elif self.args.trans_belief == "stoch-shift" or self.args.trans_belief == "shift":
prior = self.belief.min()
for i in range(self.grid_dirs):
theta = i * self.heading_resol
fwd_dist = self.args.fwd_step
dx = fwd_dist*np.cos(theta+np.pi)
dy = fwd_dist*np.sin(theta+np.pi)
# simpler way:
DX = np.round(dx)
DY = np.round(dy)
shft_hrz = shift(self.belief[i,:,:], int(DY), axis=1, fill=prior)
self.belief[i,:,:]=shift(shft_hrz, int(DX), axis=0, fill=prior)
if self.args.trans_belief == "stoch-shift" and self.action_name != "hold":
for ch in range(self.grid_dirs):
self.belief[ch,:,:] = ndimage.gaussian_filter(self.belief[ch,:,:], sigma=self.sigma_xy)
n_dir = self.grid_dirs//4
p_roll = 0.20
roll_n = []
roll_p = []
for r in range(1, n_dir):
if roll_n == [] and roll_p == []:
roll_n.append(p_roll*np.roll(bel,-1,axis=0))
roll_p.append(p_roll*np.roll(bel, 1,axis=0))
else:
roll_n.append(p_roll*np.roll(roll_n[-1],-1,axis=0))
roll_p.append(p_roll*np.roll(roll_p[-1], 1,axis=0))
self.belief = sum(roll_n + roll_p)+self.belief
self.belief /= np.sum(self.belief)
return self.belief
def get_reward(self):
self.manhattan = self.get_manhattan(self.belief.cpu().detach().numpy()) #manhattan distance between gt and belief.
self.manhattans.append(self.manhattan)
self.reward = 0.0
# if self.args.penalty_for_block and self.action_name == "go_fwd_blocked":
if self.collision == True:
self.reward_block_penalty = -1.0
else:
self.reward_block_penalty = 0.0
self.reward_bel_gt = torch.log(self.belief[self.true_grid.head,self.true_grid.row,self.true_grid.col]).cpu().detach().numpy()
self.reward_bel_gt_nonlog = self.belief[self.true_grid.head,self.true_grid.row,self.true_grid.col].cpu().detach().numpy()
bel = torch.clamp(self.belief, 1e-9, 1.0)
# info gain = p*log(p) - q*log(q)
infogain = (bel * torch.log(bel)).sum().detach() - self.bel_ent
self.bel_ent = (bel * torch.log(bel)).sum().detach()
self.reward_infogain = infogain.cpu().detach().numpy()
bel=self.belief
self.reward_bel_ent = (bel * torch.log(bel)).sum().cpu().detach().numpy()
if self.manhattan == 0:
self.reward_hit = 1
else:
self.reward_hit = 0
self.reward_dist = (self.longest-self.manhattan)/self.longest
self.reward_inv_dist = 1.0/(self.manhattan+1.0)
if self.args.penalty_for_block:
self.reward += self.reward_block_penalty
if self.args.rew_bel_gt:
self.reward += self.reward_bel_gt
if self.args.rew_bel_gt_nonlog:
self.reward += self.reward_bel_gt_nonlog
if self.args.rew_infogain:
self.reward += self.reward_infogain
if self.args.rew_bel_ent:
self.reward += self.reward_bel_ent
if self.args.rew_hit:
self.reward += self.reward_hit
if self.args.rew_dist:
self.reward += self.reward_dist
if self.args.rew_inv_dist:
self.reward += self.reward_inv_dist
self.rewards.append(self.reward)
def get_manhattan(self, bel):
guess = (self.bel_grid.head, self.bel_grid.row, self.bel_grid.col)
e_dir = abs(guess[0]-self.true_grid.head)
e_dir = min(4-e_dir, e_dir)
return float(e_dir+abs(guess[1]-self.true_grid.row)+abs(guess[2]-self.true_grid.col))
def update_bel_list(self):
guess = self.bel_grid
if guess not in self.bel_list:
self.new_bel = True
self.bel_list.append(guess)
else:
self.new_bel = False
|
{"hexsha": "a06e8e9a115d28581c42cfe9dc2ed7479d3b75e3", "size": 40320, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym_dal/envs/dal_env.py", "max_stars_repo_name": "montrealrobotics/dal", "max_stars_repo_head_hexsha": "4c83d3b118a1d61306f2f2ecc6d900f4be12bb7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2019-03-07T23:48:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-28T09:26:19.000Z", "max_issues_repo_path": "gym_dal/envs/dal_env.py", "max_issues_repo_name": "montrealrobotics/dal", "max_issues_repo_head_hexsha": "4c83d3b118a1d61306f2f2ecc6d900f4be12bb7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-04-19T06:03:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-22T07:12:54.000Z", "max_forks_repo_path": "gym_dal/envs/dal_env.py", "max_forks_repo_name": "montrealrobotics/dal", "max_forks_repo_head_hexsha": "4c83d3b118a1d61306f2f2ecc6d900f4be12bb7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-03-11T00:46:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T13:19:49.000Z", "avg_line_length": 33.7405857741, "max_line_length": 148, "alphanum_fraction": 0.702281746, "include": true, "reason": "import numpy,from scipy", "num_tokens": 12040}
|
"""
===================================================================
Recognizing hand-written digits using Fastfood kernel approximation
===================================================================
This shows how the Fastfood kernel approximation compares to a dual and primal
support vector classifier. It is based on the plot_digits_classification
example of scikit-learn. The idea behind Fastfood is to map the data into a
feature space (approximation) and then run a linear classifier on the mapped
data.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Modified By: Felix Maximilian Möller
# License: Simplified BSD
# Standard scientific Python imports
import numpy as np
import pylab as pl
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
from sklearn_extra.kernel_approximation import Fastfood
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)):
pl.subplot(2, 4, index + 1)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Training: %i' % label)
if index > 3:
break
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
gamma = .001
sigma = np.sqrt(1 / (2 * gamma))
number_of_features_to_generate = 1000
train__idx = range(n_samples // 2)
test__idx = range(n_samples // 2, n_samples)
# map data into featurespace
rbf_transform = Fastfood(
sigma=sigma, n_components=number_of_features_to_generate)
data_transformed_train = rbf_transform.fit_transform(data[train__idx])
data_transformed_test = rbf_transform.transform(data[test__idx])
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=gamma)
linear_classifier = svm.LinearSVC()
linear_classifier_transformation = svm.LinearSVC()
# We learn the digits on the first half of the digits
classifier.fit(data[train__idx], digits.target[train__idx])
linear_classifier.fit(data[train__idx], digits.target[train__idx])
# Run the linear classifier on the mapped data.
linear_classifier_transformation.fit(
data_transformed_train, digits.target[train__idx])
# Now predict the value of the digit on the second half:
expected = digits.target[test__idx]
predicted = classifier.predict(data[test__idx])
predicted_linear = linear_classifier.predict(data[test__idx])
predicted_linear_transformed = linear_classifier_transformation.predict(
data_transformed_test)
print("Classification report for dual classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Classification report for primal linear classifier %s:\n%s\n"
% (linear_classifier,
metrics.classification_report(expected, predicted_linear)))
print(
"Classification report for primal transformation classifier %s:\n%s\n"
% (linear_classifier_transformation,
metrics.classification_report(expected, predicted_linear_transformed)))
print("Confusion matrix for dual classifier:\n%s"
% metrics.confusion_matrix(expected, predicted))
print("Confusion matrix for primal linear classifier:\n%s"
% metrics.confusion_matrix(expected, predicted_linear))
print("Confusion matrix for for primal transformation classifier:\n%s"
% metrics.confusion_matrix(expected, predicted_linear_transformed))
for index, (image, prediction) in enumerate(
zip(digits.images[test__idx], predicted)):
pl.subplot(2, 4, index + 4)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction: %i' % prediction)
if index > 3:
break
pl.show()
|
{"hexsha": "91ed389e9cb9f3fda673156c484286e7022e1cce", "size": 4153, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/plot_digits_classification_fastfood.py", "max_stars_repo_name": "zdog234/scikit-learn-extra", "max_stars_repo_head_hexsha": "edfa77fd5d684ffc98151c0e93c3c2bd117627cd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/plot_digits_classification_fastfood.py", "max_issues_repo_name": "zdog234/scikit-learn-extra", "max_issues_repo_head_hexsha": "edfa77fd5d684ffc98151c0e93c3c2bd117627cd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/plot_digits_classification_fastfood.py", "max_forks_repo_name": "zdog234/scikit-learn-extra", "max_forks_repo_head_hexsha": "edfa77fd5d684ffc98151c0e93c3c2bd117627cd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1009174312, "max_line_length": 78, "alphanum_fraction": 0.7399470262, "include": true, "reason": "import numpy", "num_tokens": 916}
|
[STATEMENT]
lemma pairwise_disjnt_quadruple_sum:
"pairwise disjnt ((\<lambda> x. {(a, b, c, d) | a b c d. a \<in> A \<and> b \<in> A \<and> c \<in> A \<and> d \<in> A \<and> additive_quadruple a b c d \<and> a \<oplus> b = x \<and> c \<oplus> d = x}) ` (sumset A A))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pairwise disjnt ((\<lambda>x. {(a, b, c, d) |a b c d. a \<in> A \<and> b \<in> A \<and> c \<in> A \<and> d \<in> A \<and> additive_quadruple a b c d \<and> a \<oplus> b = x \<and> c \<oplus> d = x}) ` sumset A A)
[PROOF STEP]
unfolding disjnt_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pairwise (\<lambda>A B. A \<inter> B = {}) ((\<lambda>x. {(a, b, c, d) |a b c d. a \<in> A \<and> b \<in> A \<and> c \<in> A \<and> d \<in> A \<and> additive_quadruple a b c d \<and> a \<oplus> b = x \<and> c \<oplus> d = x}) ` sumset A A)
[PROOF STEP]
by (intro pairwiseI) (auto)
|
{"llama_tokens": 410, "file": "Balog_Szemeredi_Gowers_Additive_Combinatorics_Preliminaries", "length": 2}
|
from torchvision import transforms, utils
from PIL import Image
from torch.utils.data import DataLoader
import numpy as np
import torch
import os, cv2
class SaliconDataset(DataLoader):
def __init__(self, img_dir, gt_dir, fix_dir, img_ids, exten='.png'):
self.img_dir = img_dir
self.gt_dir = gt_dir
self.fix_dir = fix_dir
self.img_ids = img_ids
self.exten = exten
self.img_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
def __getitem__(self, idx):
img_id = self.img_ids[idx]
img_path = os.path.join(self.img_dir, img_id + self.exten)
gt_path = os.path.join(self.gt_dir, img_id + self.exten)
fix_path = os.path.join(self.fix_dir, img_id + self.exten)
img = Image.open(img_path).convert('RGB')
gt = np.array(Image.open(gt_path).convert('L'))
gt = gt.astype('float')
gt = cv2.resize(gt, (256,256))
fixations = np.array(Image.open(fix_path).convert('L'))
fixations = fixations.astype('float')
img = self.img_transform(img)
if np.max(gt) > 1.0:
gt = gt / 255.0
fixations = (fixations > 0.5).astype('float')
assert np.min(gt)>=0.0 and np.max(gt)<=1.0
assert np.min(fixations)==0.0 and np.max(fixations)==1.0
return img, torch.FloatTensor(gt), torch.FloatTensor(fixations)
def __len__(self):
return len(self.img_ids)
class TestLoader(DataLoader):
def __init__(self, img_dir, img_ids):
self.img_dir = img_dir
self.img_ids = img_ids
self.img_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
def __getitem__(self, idx):
img_id = self.img_ids[idx]
img_path = os.path.join(self.img_dir, img_id)
img = Image.open(img_path).convert('RGB')
sz = img.size
img = self.img_transform(img)
return img, img_id, sz
def __len__(self):
return len(self.img_ids)
class MITDataset(DataLoader):
def __init__(self, img_dir, gt_dir, fix_dir, img_ids, exten='.png', val=False):
self.img_dir = img_dir
self.gt_dir = gt_dir
self.fix_dir = fix_dir
self.img_ids = img_ids
self.val = val
self.exten = exten
self.img_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
def __getitem__(self, idx):
img_id = self.img_ids[idx]
img_path = os.path.join(self.img_dir, img_id + '.png')
gt_path = os.path.join(self.gt_dir, img_id + self.exten)
fix_path = os.path.join(self.fix_dir, img_id + self.exten)
img = Image.open(img_path).convert('RGB')
gt = np.array(Image.open(gt_path).convert('L'))
gt = gt.astype('float')
gt = cv2.resize(gt, (256,256))
fixations = np.array(Image.open(fix_path).convert('L'))
fixations = fixations.astype('float')
img = self.img_transform(img)
if np.max(gt) > 1.0:
gt = gt / 255.0
fixations = (fixations > 0.5).astype('float')
assert np.min(gt)>=0.0 and np.max(gt)<=1.0
assert np.min(fixations)==0.0 and np.max(fixations)==1.0
if self.val:
return img, torch.FloatTensor(gt), torch.FloatTensor(fixations)
else:
return img, torch.FloatTensor(gt), torch.FloatTensor(gt)
def __len__(self):
return len(self.img_ids)
|
{"hexsha": "e4c0e915aaed81b01b7b6ed9f809119631c08074", "size": 3975, "ext": "py", "lang": "Python", "max_stars_repo_path": "SimpleNet/dataloader.py", "max_stars_repo_name": "chhanganivarun/saliency", "max_stars_repo_head_hexsha": "a9edbd7d89d1e170bfb5056eb48e7a103d489995", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-03-15T12:06:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T09:40:48.000Z", "max_issues_repo_path": "SimpleNet/dataloader.py", "max_issues_repo_name": "chhanganivarun/saliency", "max_issues_repo_head_hexsha": "a9edbd7d89d1e170bfb5056eb48e7a103d489995", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-03-18T07:26:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:44:07.000Z", "max_forks_repo_path": "SimpleNet/dataloader.py", "max_forks_repo_name": "chhanganivarun/saliency", "max_forks_repo_head_hexsha": "a9edbd7d89d1e170bfb5056eb48e7a103d489995", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-03-15T12:07:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T14:42:59.000Z", "avg_line_length": 34.2672413793, "max_line_length": 83, "alphanum_fraction": 0.5637735849, "include": true, "reason": "import numpy", "num_tokens": 1027}
|
__author__ = 'Alexendar Perez'
#####################
# #
# Introduction #
# #
#####################
"""CRISPR Specificity Correction
National Cancer Institute, National Institutes of Health, United States of America
Developer: Alexendar R. Perez M.D., Ph.D
Primary Investigator: Joana A. Vidigal Ph.D
Laboratory: Vidigal Laboratory, 2020
"""
#################
# #
# Libraries #
# #
#################
import sys
import argparse
import numpy as np
import pickle as pl
import pandas as pd
from math import sqrt
from pyearth import Earth
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from pkg_resources import resource_exists, resource_filename
#########################
# #
# Auxillary Function #
# #
#########################
def arg_parser():
parser = argparse.ArgumentParser()
hamming_data = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-i','--infile',help='absolute filepath to input file',required=True)
hamming_data.add_argument('-l',dest='library',help='avana, brunello, geckov1, geckov2, tkov: default is None',default=None)
hamming_data.add_argument('-g',dest='Hamming',help='absolute filepath to CSC Hamming pickle file: default is None',default=None)
args = parser.parse_args()
in_file = args.infile
library = args.library
genome = args.Hamming
return in_file,library,genome
def writeout(df, hamming_string_dict, outfile):
"""write out
:param df: input dataframe that is made from input file
:param hamming_string_dict: dictionary object created from hamming string pickle
:param outfile: opened outfile object
:return: output file
"""
df_v = np.asarray(df)
for i in range(len(df_v)):
grna = df_v[i][0]
for j in df_v[i]:
outfile.write('%s,' % j)
try:
for jj in hamming_string_dict[grna]:
outfile.write('%s,' % jj)
specific, h0 = float(hamming_string_dict[grna][0]), float(hamming_string_dict[grna][1])
if specific >= 0.16 and h0 == 1:
c = 'above GuideScan specificity threshold'
else:
c = 'below GuideScan specificity threshold'
outfile.write('%s\n' % c)
except KeyError:
sys.stderr.write('\n%s not found in selected library: passing\n' % grna)
outfile.write('\n%s not present in library\n' % grna)
def specificity_metrics(outdir, filename, df, hamming_string_dict):
"""
:param outdir: absolute filepath to output directory
:param filename: name of input file to be used as part of output filename
:param df: pandas dataframe with first column as gRNA
:param hamming_string_dict: CSC onboard dictionary object with key as gRNA and value as Hamming metrics
:return: file with gRNA and specificity metrics
"""
with open('%s/%s_CSC_gRNA_Hamming_neighborhood.csv' % (outdir, filename), 'w') as outfile:
outfile.write('%s,%s,%s,%s,%s,%s,%s\n' % (
'gRNA', 'specificity', 'h0', 'h1', 'h2', 'h3', 'classification'))
writeout(df, hamming_string_dict, outfile)
sys.stdout.write('write out complete\n%s/%s_CSC_gRNA_Hamming_neighborhood.csv' % (outdir, filename))
def csc(df, hamming_string_dict, outdir, filename):
"""CRISPR Specificity Correction
:param df: pandas dataframe with first column as gRNA and second column as logFC/metric
:param hamming_string_dict: CSC onboard dictionary object with key as gRNA and value as Hamming metrics
:param outdir: absolute filepath to output directory
:param filename: name of input file to be used as part of output filename
:return: CSC adjustment
"""
# MARS compatible file
df_mars_lst = []
df_v = np.asarray(df)
for i in range(len(df_v)):
row_lst = []
grna, metric = df_v[i][0], df_v[i][1]
try:
metric = float(metric)
except ValueError:
sys.stdout.write('WARNING: encountered %s which is not float compatible, skipping\n' % metric)
continue
row_lst.append(grna)
try:
for jj in hamming_string_dict[grna]:
row_lst.append(jj)
row_lst.append(metric)
df_mars_lst.append(row_lst)
except KeyError:
sys.stdout.write('\n%s not found in selected library: passing\n' % grna)
continue
df = pd.DataFrame(df_mars_lst, columns=['gRNA', 'specificity', 'h0', 'h1', 'h2', 'h3', 'original_value'])
# exclude infinte specificity non-target gRNAs
df = df[df['h0'] != 0]
# isolate pertinent confounder variables
df_confounders = df[['specificity', 'h0', 'h1', 'h2', 'h3']]
# knots
knots = df['original_value'].quantile([0.25, 0.5, 0.75, 1])
# training and testing data
train_x, test_x, train_y, test_y = train_test_split(df_confounders, df['original_value'], test_size=0.10,
random_state=1)
# Fit an Earth model
model = Earth(feature_importance_type='gcv')
try:
model.fit(train_x, train_y)
except ValueError:
sys.stdout.write('\nValue Error encountered. Model unable to be trained. Exiting CSC Novo\n')
model_processed = 'F'
sys.stdout.write('training input x data\n %s\ntraining input y data\n %s\n' % (train_x,train_y))
return model_processed
# Print the model
print(model.trace())
print(model.summary())
print(model.summary_feature_importances())
# Plot the model
y_hat = model.predict(test_x)
# calculating RMSE values
rms1 = sqrt(mean_squared_error(test_y, y_hat))
print('\n\nRMSE on Predictions\n\n')
print(rms1)
# calculating R^2 for training
print('\n\nR^2 on Training Data\n\n')
print(model.score(train_x, train_y))
# calculating R^2 for testing
print('\n\nR^2 on Testing Data\n\n')
print(model.score(test_x, test_y))
# write out model metrics
with open('%s/csc_model_metrics_%s.txt' % (outdir, filename), 'w') as outfile:
outfile.write('%s\n%s\n%s\nRMSE on Predictions\n%s' % (
model.trace(), model.summary(), model.summary_feature_importances(), rms1))
if rms1 <= 1.0:
#model processed
model_processed = 'T'
# full data prediction
df['earth_adjustment'] = model.predict(df_confounders)
# CSC correction
df['earth_corrected'] = df['original_value'] - df['earth_adjustment']
# main write out
df.to_csv('%s/csc_output_%s_earth_patched.csv' % (outdir, filename))
# pickle write out
model_file = open('%s/csc_output_%s_earth_model.pl' % (outdir, filename), 'wb')
pl.dump(model, model_file)
model_file.close()
sys.stdout.write('\nCSC adjustment complete\n')
sys.stdout.write('\nCSC output files written to %s\n' % outdir)
return model_processed
else:
sys.stdout.write('\nCSC adjustment not computed as model residual mean squared error exceeds 1.0\n')
model_processed = 'F'
return model_processed
def read_in(in_file):
"""multiple attempt read in for generic file
:param in_file: absolute filepath to input file
:return: opened file, classification of opening method
"""
classification = '.csv'
if '\t' in open(in_file).readline():
classification = '.txt'
try:
infile = pd.read_excel(in_file)
sys.stdout.write('file read in as Excel\n')
except:
try:
if classification == '.csv':
infile = pd.read_csv(in_file)
sys.stdout.write('file read in as csv\n')
else:
infile = pd.read_csv(in_file, sep='\t')
sys.stdout.write('file read in as txt\n')
except:
infile = pd.DataFrame(open(in_file, 'r'))
sys.stdout.write('file read in with python open function and cast as pandas DataFrame\n')
return infile
def csc_processing(in_file, hamming_string_dict):
"""control function that assessed if CSC adjustment/model deployed or if specificity metrics only are given
:param in_file: absolute filepath to input file
:param hamming_string_dict: dictionary object with gRNA as key and hamming string as value
:return: CSC adjustment or specificity metric output
"""
# read in file
df = read_in(in_file)
filename, outdir = in_file.split('/')[-1].split('.')[0], '/'.join(in_file.split('/')[:-1])
columns, rows = len(df.columns), df.shape[0]
# ensure columns named correctly
if columns > 1:
sys.stdout.write(
'\n%s columns detected\nfirst two columns will be used\n---column one = gRNA---\n---column two = value---\n' % columns)
df = df.iloc[:, 0:2]
df.columns = ['gRNA', 'original_value']
model_processed = csc(df, hamming_string_dict, outdir, filename)
if model_processed == 'T':
pass
else:
specificity_metrics(outdir, filename, df, hamming_string_dict)
elif columns == 1:
sys.stdout.write('\nfile determined to have only one column\n---column one = gRNA---\n')
specificity_metrics(outdir, filename, df, hamming_string_dict)
else:
sys.stdout.write('\nfile determined to have no columns. Unable to process\n')
sys.exit(1)
def load_pickle(f):
"""load pickle file and generate dictionary
:param f: absolute filepath to CSC library pickle files
:return: dictionary object (Pandas)
"""
with open(f, 'rb') as infile:
pickle_dataframe = pl.load(infile,encoding='latin1')
try:
pickle_dictionary = pickle_dataframe.set_index('gRNA').to_dict()
return pickle_dictionary
except AttributeError:
if type(pickle_dataframe) == dict:
sys.stdout.write('\n%s is a dictionary object\n' % f)
pickle_dictionary = pickle_dataframe
return pickle_dictionary
else:
sys.stderr.write('\n%s is incompatible pickle file\nHave pickle file be dictionary with gRNA as key and specificity string as value\n' % f)
sys.exit(1)
def file_load(infile):
"""input parameter selections
:param infile: name of screen
:return: filepath for Hamming and correction factor pickles for library
"""
if infile == 'avana':
infile_h = 'screen_models/Hamming/avana_patched_Hamming_string.pl'
h = resource_filename(__name__, infile_h)
return h
elif infile == 'brunello':
infile_h = 'screen_models/Hamming/brunello_patch_format_screen_Hamming_string.pl'
h = resource_filename(__name__, infile_h)
return h
elif infile == 'geckov1':
infile_h = 'screen_models/Hamming/geckov1_patch_format_screen_Hamming_string.pl'
h = resource_filename(__name__, infile_h)
return h
elif infile == 'geckov2':
infile_h = 'screen_models/Hamming/geckov2_patch_format_screen_Hamming_string.pl'
h = resource_filename(__name__, infile_h)
return h
elif infile == 'tkov':
infile_h = 'screen_models/Hamming/tkov_patch_format_screen_Hamming_string.pl'
h = resource_filename(__name__, infile_h)
return h
elif infile == 'example_grna_logfc':
infile_h = 'screen_models/examples/avana_patched_sample_gRNA_lognorm_lnfc.csv'
h = resource_filename(__name__, infile_h)
return h
elif infile == 'example_grna':
infile_h = 'screen_models/examples/avana_patched_sample_gRNA.csv'
h = resource_filename(__name__, infile_h)
return h
else:
sys.stderr.write('%s not a recognized screen\n' % infile)
def processing(in_file,screen,classification):
"""core processing function
:param in_file: absolute filepath to input file
:param screen: string value corresponding to screen name
:param classification: deploy lite or novo
:return:
"""
# supported screens
screen = screen.lower()
support_screens = ['avana', 'brunello', 'geckov1', 'geckov2']
if classification == 'l':
# ensure strings all lowercase
sys.stdout.write('\nCSC Lite deployed\n')
elif classification == 'g':
sys.stdout.write('\nCSC Novo deployed\n')
# check if support screen queried
if screen in support_screens:
sys.stdout.write('loading %s screen data\n' % screen)
h = file_load(screen)
# load pickle and generate dictionaries
hamming_dict = load_pickle(h)
# translate hamming string
hamming_string_dict = {}
for key in hamming_dict['Hamming_string'].keys():
float_casted = [float(i) for i in hamming_dict['Hamming_string'][key].split('_')]
hamming_string_dict[key] = float_casted
csc_processing(in_file, hamming_string_dict)
elif screen == 'example':
if in_file == 'example_grna_logfc':
in_file = file_load('example_grna_logfc')
elif in_file == 'example_grna':
in_file = file_load('example_grna')
else:
sys.stderr.write('ENTER\n"csc_process -i example_grna_logfc -l example"\nOR\n"csc_process -i example_grna -l example"\n')
sys.exit(1)
sys.stdout.write('Example\n')
h = file_load('avana')
# load pickle and generate dictionaries
hamming_dict = load_pickle(h)
# translate hamming string
hamming_string_dict = {}
for key in hamming_dict['Hamming_string'].keys():
hamming_string_dict[key] = hamming_dict['Hamming_string'][key].split('_')
csc_processing(in_file, hamming_string_dict)
else:
if screen == 'tkov':
sys.stdout.write('tkov screen\n')
h = file_load('tkov')
# load pickle and generate dictionaries
hamming_dict = load_pickle(h)
else:
sys.stdout.write('\nscreen selection of %s is novel; will attempt load into memory\n' % screen)
# load pickle and generate dictionaries
hamming_dict = load_pickle(screen)
# translate hamming string
hamming_string_dict = {}
try:
for key in hamming_dict['Hamming_string'].keys():
float_casted = [float(i) for i in hamming_dict['Hamming_string'][key].split('_')]
hamming_string_dict[key] = float_casted
except KeyError:
for key in hamming_dict.keys():
float_casted = [float(i) for i in hamming_dict[key].split('_')]
hamming_string_dict[key] = float_casted
csc_processing(in_file, hamming_string_dict)
#####################
# #
# Main Function #
# #
#####################
def main():
# user inputs
in_file,library,genome = arg_parser()
if library:
screen = library
classification = 'l'
else:
screen = genome
classification = 'g'
# processing
processing(in_file,screen,classification)
# user end message
sys.stdout.write('\nprocessing complete\n')
if __name__ == '__main__':
main()
|
{"hexsha": "c072693be60323f39a9b914477adacc8222da9d8", "size": 15410, "ext": "py", "lang": "Python", "max_stars_repo_path": "csc_v2/csc_lite.py", "max_stars_repo_name": "xerezLA/CSC-crispr", "max_stars_repo_head_hexsha": "c45a4c1fded9bef81d3625cf6958491b181cf236", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "csc_v2/csc_lite.py", "max_issues_repo_name": "xerezLA/CSC-crispr", "max_issues_repo_head_hexsha": "c45a4c1fded9bef81d3625cf6958491b181cf236", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "csc_v2/csc_lite.py", "max_forks_repo_name": "xerezLA/CSC-crispr", "max_forks_repo_head_hexsha": "c45a4c1fded9bef81d3625cf6958491b181cf236", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7199124726, "max_line_length": 155, "alphanum_fraction": 0.6284231019, "include": true, "reason": "import numpy", "num_tokens": 3631}
|
[STATEMENT]
lemma tab\<^sub>1_in_hom [intro]:
assumes "ide r"
shows "\<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>"
and "\<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright> &&& \<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
2. \<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>
[PROOF STEP]
show "\<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>
[PROOF STEP]
using assms rep_props left_adjoint_is_ide
[PROOF STATE]
proof (prove)
using this:
ide r
ide ?r \<Longrightarrow> \<guillemotleft>rep ?r : tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<Rightarrow> ?r\<guillemotright>
ide ?r \<Longrightarrow> local.iso (rep ?r)
ide ?r \<Longrightarrow> ?r \<cong> VVV.iso_class_rep \<lbrakk>?r\<rbrakk>
ide ?r \<Longrightarrow> isomorphic_rep ?r (tab\<^sub>0 ?r) (tab\<^sub>1 ?r)
ide ?r \<Longrightarrow> tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<cong> ?r
is_left_adjoint ?f \<Longrightarrow> ide ?f
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<guillemotleft>tab\<^sub>1 r : tab\<^sub>1 r \<Rightarrow> tab\<^sub>1 r\<guillemotright>
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
[PROOF STEP]
have "trg (tab\<^sub>1 r) = trg r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trg (tab\<^sub>1 r) = trg r
[PROOF STEP]
using assms rep_props
[PROOF STATE]
proof (prove)
using this:
ide r
ide ?r \<Longrightarrow> \<guillemotleft>rep ?r : tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<Rightarrow> ?r\<guillemotright>
ide ?r \<Longrightarrow> local.iso (rep ?r)
ide ?r \<Longrightarrow> ?r \<cong> VVV.iso_class_rep \<lbrakk>?r\<rbrakk>
ide ?r \<Longrightarrow> isomorphic_rep ?r (tab\<^sub>0 ?r) (tab\<^sub>1 ?r)
ide ?r \<Longrightarrow> tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<cong> ?r
goal (1 subgoal):
1. trg (tab\<^sub>1 r) = trg r
[PROOF STEP]
by (metis ideD(1) isomorphic_implies_hpar(1) isomorphic_implies_hpar(4) trg_hcomp)
[PROOF STATE]
proof (state)
this:
trg (tab\<^sub>1 r) = trg r
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
trg (tab\<^sub>1 r) = trg r
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
[PROOF STEP]
have "src (tab\<^sub>0 r) = src (tab\<^sub>1 r)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. src (tab\<^sub>0 r) = src (tab\<^sub>1 r)
[PROOF STEP]
using assms rep_props
[PROOF STATE]
proof (prove)
using this:
ide r
ide ?r \<Longrightarrow> \<guillemotleft>rep ?r : tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<Rightarrow> ?r\<guillemotright>
ide ?r \<Longrightarrow> local.iso (rep ?r)
ide ?r \<Longrightarrow> ?r \<cong> VVV.iso_class_rep \<lbrakk>?r\<rbrakk>
ide ?r \<Longrightarrow> isomorphic_rep ?r (tab\<^sub>0 ?r) (tab\<^sub>1 ?r)
ide ?r \<Longrightarrow> tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<cong> ?r
goal (1 subgoal):
1. src (tab\<^sub>0 r) = src (tab\<^sub>1 r)
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
src (tab\<^sub>0 r) = src (tab\<^sub>1 r)
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
trg (tab\<^sub>1 r) = trg r
src (tab\<^sub>0 r) = src (tab\<^sub>1 r)
[PROOF STEP]
show "\<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>"
[PROOF STATE]
proof (prove)
using this:
trg (tab\<^sub>1 r) = trg r
src (tab\<^sub>0 r) = src (tab\<^sub>1 r)
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
[PROOF STEP]
using assms rep_props left_adjoint_is_ide
[PROOF STATE]
proof (prove)
using this:
trg (tab\<^sub>1 r) = trg r
src (tab\<^sub>0 r) = src (tab\<^sub>1 r)
ide r
ide ?r \<Longrightarrow> \<guillemotleft>rep ?r : tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<Rightarrow> ?r\<guillemotright>
ide ?r \<Longrightarrow> local.iso (rep ?r)
ide ?r \<Longrightarrow> ?r \<cong> VVV.iso_class_rep \<lbrakk>?r\<rbrakk>
ide ?r \<Longrightarrow> isomorphic_rep ?r (tab\<^sub>0 ?r) (tab\<^sub>1 ?r)
ide ?r \<Longrightarrow> tab\<^sub>1 ?r \<star> (tab\<^sub>0 ?r)\<^sup>* \<cong> ?r
is_left_adjoint ?f \<Longrightarrow> ide ?f
goal (1 subgoal):
1. \<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
[PROOF STEP]
by (intro in_hhomI, auto)
[PROOF STATE]
proof (state)
this:
\<guillemotleft>tab\<^sub>1 r : src (tab\<^sub>0 r) \<rightarrow> trg r\<guillemotright>
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2280, "file": "Bicategory_BicategoryOfSpans", "length": 16}
|
[STATEMENT]
lemma (in is_cat_limit) cat_lim_unique':
assumes "u' : r' <\<^sub>C\<^sub>F\<^sub>.\<^sub>l\<^sub>i\<^sub>m \<FF> : \<JJ> \<mapsto>\<mapsto>\<^sub>C\<^bsub>\<alpha>\<^esub> \<CC>"
shows
"\<exists>!f'. f' : r' \<mapsto>\<^bsub>\<CC>\<^esub> r \<and> (\<forall>j\<in>\<^sub>\<circ>\<JJ>\<lparr>Obj\<rparr>. u'\<lparr>NTMap\<rparr>\<lparr>j\<rparr> = u\<lparr>NTMap\<rparr>\<lparr>j\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>!f'. f' : r' \<mapsto>\<^bsub>\<CC>\<^esub> r \<and> (\<forall>j\<in>elts (\<JJ>\<lparr>Obj\<rparr>). u'\<lparr>NTMap\<rparr>\<lparr>j\<rparr> = u\<lparr>NTMap\<rparr>\<lparr>j\<rparr> \<circ>\<^sub>A\<^bsub>\<CC>\<^esub> f')
[PROOF STEP]
by (intro cat_lim_unique_cone'[OF is_cat_limitD(1)[OF assms]])
|
{"llama_tokens": 378, "file": "CZH_Universal_Constructions_czh_ucategories_CZH_UCAT_Limit", "length": 1}
|
#pragma once
#include <algorithm>
#include <limits>
#include <type_traits>
#include <unordered_map>
#include <vector>
#include <boost/optional.hpp>
#include <boost/range/algorithm/find_if.hpp>
#include <elle/Defaulted.hh>
#include <elle/Error.hh>
#include <elle/assert.hh>
#include <elle/find.hh>
#include <elle/log.hh>
#include <elle/make-vector.hh>
#include <elle/range.hh>
#include <elle/serialization/json.hh>
#include <elle/das/named.hh>
namespace elle
{
namespace das
{
namespace cli
{
/// Create a command line interface for a function using symbols.
///
/// @code{.cc}
///
/// ELLE_DAS_CLI_SYMBOL(prefix, 'p', "The prefix", false);
/// ELLE_DAS_CLI_SYMBOL(body, 'b', "The body", false);
///
/// ...
///
/// auto func = [] (std::string const& foo, std::string const& bar)
/// {
/// return foo + "_" + bar;
/// };
/// auto const proto = elle::das::named::prototype(prefix, body);
/// assert(
/// elle::das::cli::call(proto, func,
/// {"--prefix", "bad", "--body", "karma"})
/// == "bad_karma");
///
/// assert(
/// elle::sprintf("%s", elle::das::cli::help(proto))
/// == " -p, --prefix arg The prefix\n"
/// " -b, --body arg The body\n");
///
/// @endcode
/*---------.
| Errors. |
`---------*/
/// Command line error
class Error
: public elle::Error
{
public:
using elle::Error::Error;
};
/// Error on a specific option
class OptionError
: public virtual Error
{
public:
OptionError(std::string option)
: Error("") // Never called (virtual)
, _option(std::move(option))
{}
/// The erroneous option
ELLE_ATTRIBUTE_R(std::string, option);
};
/// Error on a value, e.g., unexpect argument.
class ValueError
: public virtual Error
{
public:
ValueError(std::string value)
: Error("") // Never called (virtual)
, _value(std::move(value))
{}
/// The erroneous value.
ELLE_ATTRIBUTE_R(std::string, value);
};
/// Error on the value given to an option, e.g., invalid type.
class OptionValueError
: public OptionError
, public ValueError
{
public:
OptionValueError(std::string option,
std::string value,
std::string message)
: Error(elle::sprintf("invalid value \"%s\" for option --%s: %s",
value, option, message))
, OptionError(option)
, ValueError(value)
{}
/// The erroneous option
ELLE_ATTRIBUTE_R(std::string, value);
};
#define ELLE_DAS_CLI_OPTION_ERROR(Name, Description) \
class Name ## Option \
: public OptionError \
{ \
public: \
Name ## Option(std::string option) \
: Error(elle::sprintf(Description ": --%s", option)) \
, OptionError(option) \
{} \
} \
#define ELLE_DAS_CLI_VALUE_ERROR(Name, Description) \
class Name ## Value \
: public ValueError \
{ \
public: \
Name ## Value(std::string value) \
: Error(elle::sprintf(Description ": %s", value)) \
, ValueError(value) \
{} \
} \
/// Unrecognized option
ELLE_DAS_CLI_OPTION_ERROR(Unknown, "unknown option");
/// Missing mandatory option
ELLE_DAS_CLI_OPTION_ERROR(Missing, "missing option");
/// Duplicate option
ELLE_DAS_CLI_OPTION_ERROR(Duplicate, "duplicate option");
/// Option used as a flag and passed arguments
ELLE_DAS_CLI_OPTION_ERROR(
Mixed, "option can't be used both as a flag and with arguments");
/// Option without argument.
ELLE_DAS_CLI_OPTION_ERROR(Valueless, "option requires an argument");
/// Unrecognized left over value
ELLE_DAS_CLI_VALUE_ERROR(Unrecognized, "extra unrecognized argument");
#undef ELLE_DAS_CLI_OPTION_ERROR
#undef ELLE_DAS_CLI_VALUE_ERROR
/// Base tag for cli symbols
class CLI_Symbol
{};
/*--------.
| Options |
`--------*/
struct Option
{
Option(char short_name = 0,
std::string help = "",
bool positional = false)
: short_name(short_name)
, help(std::move(help))
, positional(positional)
{}
char short_name;
std::string help;
bool positional;
};
using Options = std::unordered_map<std::string, Option>;
inline
std::string
option_name_from_c(std::string n)
{
std::replace(n.begin(), n.end(), '_', '-');
return n;
}
namespace _details
{
inline
std::string
strip_dashes(std::string const& a)
{
auto s = a.size();
if (s == 2 && a[0] == '-' && std::isalpha(a[1]))
return a.substr(1);
else if (2 < a.size() && a[0] == '-' && a[1] == '-')
return a.substr(2);
else
return a;
}
class IsOption
{
public:
IsOption(std::string a, Options const& opts)
: _option(a.size() > 2 && a[0] == '-' && a[1] == '-'
|| a.size() == 2 && a[0] == '-' && std::isalpha(a[1]))
, _arg(std::move(a))
{}
/// Whether this is the option corresponding to Formal F.
template <typename F>
bool
is(Options const& opts)
{
if (this->_option)
{
if (this->_arg[0] == '-' && this->_arg[1] == '-')
return this->_arg.substr(2) == option_name_from_c(F::name());
else
{
using Formal = named::make_formal<F>;
auto res =
elle::meta::static_if<std::is_base_of<CLI_Symbol,
Formal>::value>(
[this] (auto&& formal)
{
return this->_arg[1] == formal.short_name();
},
[](auto&&) { return false; })(Formal{});
if (auto it = find(opts, F::name()))
res = this->_arg[1] == it->second.short_name;
return res;
}
}
else
return false;
}
operator bool() const
{
return this->_option;
}
/// Whether is an option.
ELLE_ATTRIBUTE(bool, option);
/// The long option name (without the `--`).
ELLE_ATTRIBUTE(std::string, arg);
};
}
static inline
_details::IsOption
is_option(std::string const& a, Options const& opts = Options())
{
return _details::IsOption(a, opts);
}
namespace _details
{
/// A unique "impossible" string that will replace parsed
/// arguments.
static inline std::string const&
nothing()
{
// A marker, quite unlikely to be an effective argument.
static auto const res = '{' + std::string(1, 0) + '}';
return res;
}
/// Type of default values for flags that don't have a default value.
struct NoDefault
{
friend std::ostream& operator<<(std::ostream& os, NoDefault)
{
return os << "NoDefault";
};
};
/// A value not found on the CLI
template <typename Default = NoDefault>
class Value
{
public:
/// Whether a default value was specified.
static bool constexpr default_has
= !std::is_same<Default, NoDefault>::value;
ELLE_LOG_COMPONENT("das.cli");
/// An option with a value.
///
/// @param set whether the option was passed on the command line.
Value(Default const& d,
std::string option,
bool positional,
std::vector<std::string>& args,
std::vector<std::string> value,
int& remaining,
bool set)
: _def(d)
, _option(std::move(option))
, _values(std::move(value))
, _flag(false)
, _positional(positional)
, _args(args)
, _remaining(remaining)
, _set(set)
{}
/// An option whose value was not given from the CLI, aka a flag.
Value(Default const& d,
std::string option,
bool positional,
std::vector<std::string>& args,
int& remaining,
bool set)
: Value(d, std::move(option), positional, args, {},
remaining, set)
{
this->_flag = true;
}
template <typename I>
static
std::enable_if_t<std::is_signed<I>::value, I>
to_int(std::string const& v, std::string const& option)
{
std::size_t end;
auto i = std::stoll(v, &end);
if (end != v.size())
throw OptionValueError(option, v, "invalid integer");
else if (i < std::numeric_limits<I>::min()
|| i > std::numeric_limits<I>::max())
throw OptionValueError(option, v, "integer out of range");
else
return i;
}
template <typename I>
static
std::enable_if_t<!std::is_signed<I>::value, I>
to_int(std::string const& v, std::string const& option)
{
// Beware: `std::stoull` underflows instead of throwing out_of_range,
// which is UTTER BULLSHIT. Check manually for negative numbers.
for (auto c: v)
{
if (c == '-')
throw OptionValueError(option, v, "integer out of range");
// "Discards any whitespace characters (as identified by calling
// isspace()) until the first non-whitespace character is found"
if (!std::isspace(c))
break;
}
std::size_t end;
auto i = std::stoull(v, &end);
if (end != v.size())
throw OptionValueError(option, v, "invalid integer");
else if (i > std::numeric_limits<I>::max())
throw OptionValueError(option, v, "integer out of range");
else
return i;
}
template <typename I>
std::enable_if_t<std::is_same<I, bool>::value, I>
convert(std::string const& v, int) const
{
if (v == "true")
return true;
else if (v == "false")
return false;
else
throw OptionValueError(this->_option, v, "invalid boolean");
}
template <typename I>
std::enable_if_t<std::is_same<I, std::string>::value, I>
convert(std::string const& v, int) const
{
return v;
}
template <typename I>
std::enable_if_t<std::is_base_of<boost::optional_detail::optional_tag, I>::value, I>
convert(std::string const& v, int) const
{
if (this->_values.empty())
return boost::none;
else if (this->_values.size() > 1)
throw DuplicateOption(this->_option);
else
return convert<typename I::value_type>(this->_values[0], 0);
}
template <typename I>
std::enable_if_t<
std::is_integral<I>::value && !std::is_same<I, bool>::value, I>
convert(std::string const& v, int) const
{
try
{
return to_int<I>(v, this->_option);
}
catch (std::invalid_argument)
{
throw OptionValueError(this->_option, v, "invalid integer");
}
catch (std::out_of_range)
{
throw OptionValueError(this->_option, v, "integer out of range");
}
}
template <typename I>
I
convert(std::string const& v, ...) const
{
elle::serialization::json::SerializerIn s(elle::json::Json(v), false);
return s.deserialize<I>();
}
template <typename T>
std::enable_if_t<default_has, T>
missing() const
{
ELLE_TRACE("use default value: %s", this->_def);
return this->_def;
}
template <typename T>
std::enable_if_t<!default_has && !std::is_same<T, bool>::value, T>
missing() const
{
ELLE_TRACE("raise missing error");
throw MissingOption(this->_option);
}
template <typename T>
std::enable_if_t<!default_has && std::is_same<T, bool>::value, bool>
missing() const
{
return false;
}
template <typename I>
I
convert() const
{
ELLE_TRACE("convert: %s", this);
if (this->_flag)
throw ValuelessOption(this->_option);
else if (this->_values.empty())
{
if (this->_positional)
{
ELLE_DEBUG("looking for positional arguments");
auto it = this->_args.begin();
while (it != this->_args.end())
{
ELLE_DUMP("evaluating %s", *it);
// Skip option and possible argument.
if (is_option(*it))
{
++it;
if (it != this->_args.end() && *it != nothing())
++it;
}
else if (*it == nothing())
++it;
else
// A genuine argument.
break;
}
if (it != this->_args.end() && *it != nothing())
{
ELLE_TRACE("use next positional value: %s", *it);
this->_values.emplace_back(std::move(*it));
*it = nothing();
++it;
}
else
return this->missing<I>();
}
else
return this->missing<I>();
}
if (this->_values.size() > 1)
throw DuplicateOption(this->_option);
else
return convert<I>(this->_values[0], 0);
}
template <typename I>
operator I() const
{
ELLE_TRACE_SCOPE(
"convert %s to %s", this->_option, elle::type_info<I>());
auto const res = this->convert<I>();
this->_check_remaining();
return res;
}
operator bool() const
{
ELLE_TRACE_SCOPE("convert %s to boolean", this->_option);
auto const res = this->_flag || this->convert<bool>();
this->_check_remaining();
return res;
}
template <typename T>
operator std::vector<T>() const
{
ELLE_TRACE_SCOPE(
"convert %s to %s", this->_option, elle::type_info(std::vector<T>{}));
if (this->_values.empty() && this->_positional)
{
// Take the first contiguous sequence of arguments.
auto const begin = boost::find_if(this->_args,
[] (auto const& a)
{
return a != nothing();
});
auto const end = std::find(begin, this->_args.end(), nothing());
this->_values.insert(
std::end(this->_values),
std::make_move_iterator(begin),
std::make_move_iterator(end));
for (auto& s: as_range(begin, end))
s = nothing();
}
this->_check_remaining();
return elle::make_vector(this->_values,
[&] (std::string const& v)
{
return convert<T>(v, 0);
});
}
/// A conversion that allows to know whether we have the
/// option's default value, or a user defined one.
template <typename I>
operator elle::Defaulted<I>() const
{
ELLE_TRACE_SCOPE("convert %s to %s",
this->_option,
elle::type_info<elle::Defaulted<I>>());
auto const res = this->operator I();
ELLE_TRACE_SCOPE(
"converted %s to %s (%s)",
this->_option, res, this->_set ? "explicit" : "implicit");
return {res, this->_set};
}
/// Check if there are still arguments to process.
///
/// Must be called once per formal option as it decreased a
/// counter of the number of options left to check.
void
_check_remaining() const
{
ELLE_DUMP("%s: checking what remains: %s and %s",
this, this->_remaining, this->_args);
if (!--this->_remaining)
{
// This was the last option to be parsed. Check that
// there's nothing left to parse.
auto it = boost::find_if(this->_args,
[](auto const& arg)
{
return arg != nothing();
});
if (it != this->_args.end())
{
if (is_option(*it))
throw UnknownOption(strip_dashes(*it));
else
throw UnrecognizedValue(*it);
}
}
}
friend
std::ostream&
operator <<(std::ostream& out, Value<Default> const& v)
{
elle::fprintf(
out,
"Value(\"%s\", flag=%s, value=%s, def=%s, args=%s,"
" remaining=%s, set=%s)",
v.option(), v.flag(), v.values(), v.def(), v._args,
v._remaining, v.set());
return out;
}
private:
ELLE_ATTRIBUTE_R(Default const&, def);
ELLE_ATTRIBUTE_R(std::string, option);
ELLE_ATTRIBUTE_R(std::vector<std::string>, values, mutable);
/// Whether had no value given on the CLI.
/// E.g. `--foo` => is-flag and value = true,
/// but `--foo=true` => not-is-flag, and value = true.
ELLE_ATTRIBUTE_R(bool, flag);
ELLE_ATTRIBUTE(bool, positional);
/// Arguments not processed so far.
ELLE_ATTRIBUTE(std::vector<std::string>&, args);
/// Number of options that are still to process.
/// Initialized to the number of (formal) options to
/// process, and decreased for each option processed.
ELLE_ATTRIBUTE(int&, remaining);
/// Whether the option was given on the command line.
ELLE_ATTRIBUTE_R(bool, set);
};
/// Invoked once for each Formal type.
template <typename Formal>
struct parse_arg
{
/// The type of the default value.
using Default
= typename named::DefaultStore<Formal>::template DefaultFor<Formal>;
using Symbol = named::make_formal<Formal>;
/// Invoked once for each Formal type.
static inline
auto
value(Default const& d,
std::vector<std::string>& args,
Options const& opts,
int& counter)
{
ELLE_LOG_COMPONENT("das.cli");
bool flag = false;
bool pos = false;
bool set = false;
auto value = std::vector<std::string>{};
auto next_option = [&](auto i) {
return std::find_if(i, args.end(), [](auto const& arg) {
return is_option(arg);
});
};
ELLE_TRACE_SCOPE("parsing option %s", Symbol::name());
ELLE_DUMP("remaining arguments: %s", args);
for (auto it = next_option(args.begin()); it != args.end();
it = next_option(it + 1))
{
// There's a possible explicit argument after `=`.
auto const eq = it->find('=');
auto const option = it->substr(0, eq);
auto o = is_option(option, opts);
assert(o);
if (o.template is<Formal>(opts))
{
// This is the option we are looking for.
set = true;
auto argument_set = [&](auto const& arg) {
ELLE_DEBUG(
"found \"%s\" on the command line with argument \"%s\"",
option, arg);
if (flag)
throw MixedOption(Symbol::name());
value.emplace_back(arg);
};
if (eq == it->npos)
{
// `--foo`: no `=`.
*it = nothing();
if (it+1 != args.end() && !is_option(*(it+1), opts) && *(it+1) != nothing())
{
++it;
argument_set(*it);
*it = nothing();
}
else
{
ELLE_DEBUG("found \"%s\" on the command line", *it);
if (!value.empty())
throw MixedOption(Symbol::name());
if (flag)
throw DuplicateOption(Symbol::name());
flag = true;
}
}
else
{
// `--foo=bar`: explicit argument.
argument_set(it->substr(eq + 1));
*it = nothing();
}
}
}
elle::meta::static_if<std::is_base_of<CLI_Symbol, Symbol>::value>(
[] (auto& pos, auto t)
{
pos = decltype(t)::type::positional();
})(pos, elle::meta::Identity<Symbol>());
{
auto it = opts.find(Symbol::name());
if (it != opts.end())
pos = it->second.positional;
}
return elle::meta::static_if<Default::has>(
[&] (auto const& d)
{
using V = Value<
typename std::remove_cv_reference_t<decltype(d)>::type>;
if (flag)
return V(d.value, Symbol::name(), pos, args, counter, set);
else
{
if (value.empty())
ELLE_DEBUG("no occurences, default value is %s", d.value);
return V(d.value, Symbol::name(), pos, args,
std::move(value), counter, set);
}
},
[&] (auto)
{
using V = Value<NoDefault>;
if (flag)
return V(NoDefault{}, Symbol::name(), pos, args, counter, set);
else
{
if (value.empty())
ELLE_DEBUG("no occurences and no default value");
return V(NoDefault{}, Symbol::name(), pos, args,
std::move(value), counter, set);
}
})(d);
}
};
}
/// The type of the object functions in charge of parsing
/// arguments of type `Formal`.
template <typename Formal>
using ParseArg
= _details::parse_arg<std::remove_cv_reference_t<Formal>>;
template <typename F, typename ... Formals, typename ... Raw>
auto
_call(named::Prototype<Formals...> const& p,
F const& f,
std::vector<std::string>& args,
Raw&& ... raw,
Options const& opts = Options())
{
ELLE_LOG_COMPONENT("das.cli");
int counter = sizeof ... (Formals);
return f(ParseArg<Formals>::value(p.defaults, args, opts, counter)...,
std::forward<Raw>(raw)...);
}
template <typename F,
typename ... Formals,
typename ... Raw>
auto
call(named::Prototype<Formals...> const& p,
F const& f,
std::vector<std::string>& args,
Raw&& ... raw,
Options const& opts = Options())
{
return _call(p, f, args, std::forward<Raw>(raw)..., opts);
}
template <typename F,
typename ... Formals,
typename ... Args,
typename ... Raw>
auto
call(named::Prototype<Formals...> const& p,
F const& f,
std::vector<std::string> const& args,
Raw&& ... raw,
Options const& opts = Options())
{
auto copy = args;
return _call(p, f, copy, std::forward<Raw>(raw)..., opts);
}
template <typename ... T,
typename ... Raw>
auto
call(named::Function<T...> const& f,
std::vector<std::string>& args,
Raw&& ... raw,
Options const& opts = Options())
{
return _call(f.prototype(), f.function(), args,
std::forward<Raw>(raw)..., opts);
}
template <typename ... T,
typename ... Raw>
auto
call(named::Function<T...> const& f,
std::vector<std::string> const& args,
Raw&& ... raw,
Options const& opts = Options())
{
auto copy = args;
return _call(f.prototype(), f.function(), copy,
std::forward<Raw>(raw)..., opts);
}
inline
void
print_help(std::ostream& s,
std::string const& name,
bool with_argument,
char short_name = 0,
std::string const& help = {})
{
if (short_name)
elle::fprintf(s, " -%s, ", short_name);
else
elle::fprintf(s, " ");
elle::fprintf(s, "--%-18s",
cli::option_name_from_c(name)
+ (with_argument ? " arg" : ""));
if (!help.empty())
elle::fprintf(s, " %s", help);
}
template <typename Formal, typename Default>
struct help_map
{
using type = bool;
static
bool
value(std::ostream& s, Options const& opts, Default const& def)
{
using Symbol = named::make_formal<Formal>;
// Whether expects an argument.
bool with_argument
= elle::meta::static_if<Default::has>
([] (auto const& def)
{
auto const& v = def.value;
return !std::is_same<decltype(v), bool const&>::value;
},
[] (auto const&)
{
return true;
})(def);
// Print the option's help string.
auto opt = opts.find(Symbol::name());
if (opt == opts.end())
elle::meta::static_if<std::is_base_of<CLI_Symbol, Symbol>::value>(
[&s, with_argument] (auto formal) {
using formal_t = typename decltype(formal)::type;
print_help(s,
formal_t::name(), with_argument,
formal_t::short_name(), formal_t::help());
},
[&s, with_argument] (auto formal) {
using formal_t = typename decltype(formal)::type;
print_help(s, option_name_from_c(formal_t::name()), with_argument);
})(elle::meta::Identity<Symbol>{});
else
print_help(s, Symbol::name(), with_argument,
opt->second.short_name, opt->second.help);
elle::meta::static_if<Default::has>
([&s] (auto const& def)
{
auto const& v = def.value;
if (!std::is_same<decltype(v), bool const&>::value
&& !std::is_same<decltype(v), boost::none_t const&>::value)
elle::fprintf(s, " (default: %s)", v);
})(def);
elle::fprintf(s, "\n");
return true;
}
};
template <typename ... T>
class Help
: public elle::Printable::as<Help<T ...>>
{
public:
Help(named::Prototype<T...> const& p,
Options const& opts = Options())
: _prototype(p)
, _options(opts)
{}
template <typename>
using make_bool = bool;
void
print(std::ostream& s) const
{
std::tuple<make_bool<T>...>{
help_map<T, typename named::Prototype<T...>::DefaultStore::template DefaultFor<T>>::value(
s, this->_options, this->_prototype.defaults)...
};
}
ELLE_ATTRIBUTE(named::Prototype<T...> const&, prototype);
ELLE_ATTRIBUTE(Options, options);
};
template <typename ... T>
auto
help(named::Prototype<T...> const& p, Options const& opts = Options())
{
return Help<T...>(p, opts);
}
template <typename ... T>
auto
help(named::Function<T...> const& f, Options const& opts = Options())
{
return help(f.prototype(), opts);
}
}
}
}
#define ELLE_DAS_CLI_SYMBOL(Name, ...) ELLE_DAS_CLI_SYMBOL_NAMED(Name, Name, __VA_ARGS__)
#define ELLE_DAS_CLI_SYMBOL_NAMED(Name, CName, Short, Help, Pos) \
ELLE_DAS_SYMBOL_TYPE_NAMED(Name, CName); \
class CS_##Name \
: public _Symbol_##Name<CS_##Name> \
, public ::elle::das::cli::CLI_Symbol \
{ \
public: \
using Super = _Symbol_##Name<CS_##Name>; \
using Super::operator=; \
constexpr \
CS_##Name() \
{} \
\
static constexpr \
char \
short_name() \
{ \
return Short; \
} \
\
static constexpr \
char const* \
help() \
{ \
return Help; \
} \
\
static constexpr \
bool \
positional() \
{ \
return Pos; \
} \
}; \
constexpr static CS_##Name CName = {};
|
{"hexsha": "a3f41f3b90d8ca68d5a2f3b268a8737ff9ba8669", "size": 33839, "ext": "hh", "lang": "C++", "max_stars_repo_path": "src/elle/das/cli.hh", "max_stars_repo_name": "infinitio/elle", "max_stars_repo_head_hexsha": "d9bec976a1217137436db53db39cda99e7024ce4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/elle/das/cli.hh", "max_issues_repo_name": "infinitio/elle", "max_issues_repo_head_hexsha": "d9bec976a1217137436db53db39cda99e7024ce4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/elle/das/cli.hh", "max_forks_repo_name": "infinitio/elle", "max_forks_repo_head_hexsha": "d9bec976a1217137436db53db39cda99e7024ce4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3225469729, "max_line_length": 102, "alphanum_fraction": 0.4225006649, "num_tokens": 6675}
|
/*
* File: cql_exponential_reconnection_policy_t.hpp
* Author: mc
*
* Created on September 26, 2013, 8:49 AM
*/
#ifndef CQL_EXPONENTIAL_RECONNECTION_POLICY_T_HPP_
#define CQL_EXPONENTIAL_RECONNECTION_POLICY_T_HPP_
#include <boost/date_time/posix_time/posix_time.hpp>
#include "cql/policies/cql_reconnection_policy.hpp"
namespace cql {
class cql_exponential_reconnection_schedule_t;
class CQL_EXPORT cql_exponential_reconnection_policy_t
: public cql_reconnection_policy_t
{
public:
inline boost::posix_time::time_duration
base_delay() const { return _base_delay; }
inline boost::posix_time::time_duration
max_delay() const { return _max_delay; }
virtual boost::shared_ptr<cql_reconnection_schedule_t>
new_schedule();
cql_exponential_reconnection_policy_t(
const boost::posix_time::time_duration& base_delay,
const boost::posix_time::time_duration& max_delay);
private:
boost::posix_time::time_duration _base_delay;
boost::posix_time::time_duration _max_delay;
};
class CQL_EXPORT cql_exponential_reconnection_schedule_t
: public cql_reconnection_schedule_t
{
public:
virtual boost::posix_time::time_duration
get_delay();
private:
cql_exponential_reconnection_schedule_t(
const cql_exponential_reconnection_policy_t& policy)
: _policy(policy), _attempts(0),
_last_delay(boost::posix_time::microseconds(0)) { }
friend class cql_exponential_reconnection_policy_t;
const cql_exponential_reconnection_policy_t _policy;
int _attempts;
boost::posix_time::time_duration _last_delay;
};
}
#endif /* CQL_EXPONENTIAL_RECONNECTION_POLICY_T_HPP_ */
|
{"hexsha": "4aad5f6ffa1c408f9c07905012aed687b4b27757", "size": 1866, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/cql/policies/cql_exponential_reconnection_policy_t.hpp", "max_stars_repo_name": "ncbi/cassandra-cpp-driver", "max_stars_repo_head_hexsha": "b2259e9b13849c98fc6b6485f2433c97c1fa4b9f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-02-24T09:22:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-06T03:04:21.000Z", "max_issues_repo_path": "include/cql/policies/cql_exponential_reconnection_policy_t.hpp", "max_issues_repo_name": "ncbi/cassandra-cpp-driver", "max_issues_repo_head_hexsha": "b2259e9b13849c98fc6b6485f2433c97c1fa4b9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/cql/policies/cql_exponential_reconnection_policy_t.hpp", "max_forks_repo_name": "ncbi/cassandra-cpp-driver", "max_forks_repo_head_hexsha": "b2259e9b13849c98fc6b6485f2433c97c1fa4b9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2015-04-26T07:16:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-23T06:31:07.000Z", "avg_line_length": 29.15625, "max_line_length": 68, "alphanum_fraction": 0.6939978564, "num_tokens": 425}
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax_cfd.equations."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jax_cfd.base import advection
from jax_cfd.base import equations
from jax_cfd.base import finite_differences as fd
from jax_cfd.base import funcutils
from jax_cfd.base import grids
from jax_cfd.base import pressure
from jax_cfd.base import test_util
import numpy as np
def sinusoidal_field(grid):
"""Returns a divergence-free flow on `grid`."""
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
vs = tuple(jnp.sin(2. * np.pi * g / s)
for g, s in zip(grid.mesh(), mesh_size))
return tuple(grids.AlignedArray(v, o)
for v, o in zip(vs[1:] + vs[:1], grid.cell_faces))
def gaussian_field(grid):
"""Returns a 'Gaussian-shaped' field in the 'x' direction."""
mesh = grid.mesh()
mesh_size = jnp.array(grid.shape) * jnp.array(grid.step)
offsets = grid.cell_faces
v = [grids.AlignedArray(
jnp.exp(-sum([jnp.square(x / s - .5)
for x, s in zip(mesh, mesh_size)]) * 100.),
offsets[0])]
for j in range(1, grid.ndim):
v.append(grids.AlignedArray(jnp.zeros(grid.shape), offsets[j]))
return tuple(v)
def zero_field(grid):
"""Returns an all-zero field."""
return tuple(grids.AlignedArray(jnp.zeros(grid.shape), o)
for o in grid.cell_faces)
def momentum(v, density, grid):
"""Returns the momentum due to velocity field `v`."""
return jnp.array([u.data for u in v]).sum() * density * jnp.array(
grid.step).prod()
def _convect_upwind(v, g):
return tuple(advection.advect_upwind(u, v, g) for u in v)
class SemiImplicitNavierStokesTest(test_util.TestCase):
@parameterized.named_parameters(
dict(testcase_name='sinusoidal_velocity_base',
velocity=sinusoidal_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=None,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=2e-3),
dict(testcase_name='gaussian_force_upwind',
velocity=zero_field,
forcing=lambda v, g: gaussian_field(g),
shape=(40, 40, 40),
step=(1., 1., 1.),
density=1.,
viscosity=0,
convect=_convect_upwind,
pressure_solve=pressure.solve_cg,
dt=1e-3,
time_steps=100,
divergence_atol=1e-4,
momentum_atol=1e-4),
dict(testcase_name='sinusoidal_velocity_fast_diag',
velocity=sinusoidal_field,
forcing=None,
shape=(100, 100),
step=(1., 1.),
density=1.,
viscosity=1e-4,
convect=advection.convect_linear,
pressure_solve=pressure.solve_fast_diag,
dt=1e-3,
time_steps=1000,
divergence_atol=1e-3,
momentum_atol=1e-3),
)
def test_divergence_and_momentum(
self, velocity, forcing, shape, step, density, viscosity, convect,
pressure_solve, dt, time_steps, divergence_atol, momentum_atol,
):
grid = grids.Grid(shape, step)
navier_stokes = equations.semi_implicit_navier_stokes(
density,
viscosity,
dt,
grid,
convect=convect,
pressure_solve=pressure_solve,
forcing=forcing)
v_initial = velocity(grid)
v_final = funcutils.repeated(navier_stokes, time_steps)(v_initial)
divergence = fd.divergence(v_final, grid)
self.assertLess(jnp.max(divergence.data), divergence_atol)
initial_momentum = momentum(v_initial, density, grid)
final_momentum = momentum(v_final, density, grid)
if forcing is not None:
expected_change = (
jnp.array([f_i.data for f_i in forcing(v_initial, grid)]).sum() *
jnp.array(grid.step).prod() * dt * time_steps)
else:
expected_change = 0
self.assertAllClose(
initial_momentum + expected_change, final_momentum, atol=momentum_atol)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "540ed05f2a6039b362deedc6841ed2b6829093ea", "size": 4741, "ext": "py", "lang": "Python", "max_stars_repo_path": "jax_cfd/base/equations_test.py", "max_stars_repo_name": "dionhaefner/jax-cfd", "max_stars_repo_head_hexsha": "a387152efa580592134a98b50f95055882568447", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jax_cfd/base/equations_test.py", "max_issues_repo_name": "dionhaefner/jax-cfd", "max_issues_repo_head_hexsha": "a387152efa580592134a98b50f95055882568447", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jax_cfd/base/equations_test.py", "max_forks_repo_name": "dionhaefner/jax-cfd", "max_forks_repo_head_hexsha": "a387152efa580592134a98b50f95055882568447", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8187919463, "max_line_length": 79, "alphanum_fraction": 0.650495676, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 1209}
|
# ------------------------------------
# set volume
# ------------------------------------
function set_volume(nodes, cellxmax, cellymax, volume)
for j in 1:cellymax
for i in 1:cellxmax
vec_r1x = nodes[i+1,j+1,1] - nodes[i,j,1]
vec_r1y = nodes[i+1,j+1,2] - nodes[i,j,2]
vec_r2x = nodes[i,j+1,1] - nodes[i+1,j,1]
vec_r2y = nodes[i,j+1,2] - nodes[i+1,j,2]
volume[i,j] = abs(vec_r1x*vec_r2y - vec_r1y*vec_r2x) /2
end
end
return volume
end
# ------------------------------------
# set cellcenter
# ------------------------------------
function set_cellcenter(cellcenter, nodes, cellxmax, cellymax)
for l in 1:2
for j in 1:cellymax
for i in 1:cellxmax
xl = 0.25*(nodes[i+1,j+1,l] + nodes[i+1,j,l] + nodes[i,j+1,l] + nodes[i,j,l])
cellcenter[i,j,l] = xl
end
end
end
return cellcenter
end
# ------------------------------------
# set dx and dy for local time stepping
# ------------------------------------
function set_dx_lts(dx, dy, nodes, cellxmax, cellymax, icell)
# define at cell boundaries
# dx = zeros(cellxmax+1, cellymax)
# dy = zeros(cellxmax, cellymax+1)
for j in 1+icell:cellymax -icell
for i in 1+icell:cellxmax+1 -icell
x1 = nodes[i,j,1]
y1 = nodes[i,j,2]
x2 = nodes[i,j+1,1]
y2 = nodes[i,j+1,2]
if (x2 - x1) == 0.0
a = 0.0
b = y1 - a*x1
else
a = (y2-y1) / (x2-x1)
b = y1 - a*x1
end
ccx = 0.125 * (nodes[i,j,1] + nodes[i+1,j,1] + nodes[i,j+1,1] + nodes[i+1,j+1,1])
ccy = 0.125 * (nodes[i,j,2] + nodes[i+1,j,2] + nodes[i,j+1,2] + nodes[i+1,j+1,2])
dx1 = 0.0
if a == 0.0
dx1 = abs(ccx - b)
else
dx1 = abs(-a*ccx + ccy -b)/abs(a)
end
ccx = 0.125 * (nodes[i-1,j,1] + nodes[i,j,1] + nodes[i-1,j+1,1] + nodes[i,j+1,1])
ccy = 0.125 * (nodes[i-1,j,2] + nodes[i,j,2] + nodes[i-1,j+1,2] + nodes[i,j+1,2])
dx2 = 0.0
if a == 0.0
dx2 = abs(ccx - b)
else
dx2 = abs(-a*ccx + ccy -b)/abs(a)
end
dx[i,j] = 0.5 * (dx1+dx2)
end
end
for j in 1+icell:cellymax+1 -icell
for i in 1+icell:cellxmax -icell
x1 = nodes[i,j,1]
y1 = nodes[i,j,2]
x2 = nodes[i+1,j,1]
y2 = nodes[i+1,j,2]
if (x2 - x1) == 0.0
a = 0.0
b = y1 - a*x1
else
a = (y2-y1) / (x2-x1)
b = y1 - a*x1
end
ccx = 0.125 * (nodes[i,j,1] + nodes[i+1,j,1] + nodes[i,j+1,1] + nodes[i+1,j+1,1])
ccy = 0.125 * (nodes[i,j,2] + nodes[i+1,j,2] + nodes[i,j+1,2] + nodes[i+1,j+1,2])
dy1 = 0.0
if a == 0.0
dy1 = abs(ccy - b)
else
dy1 = abs(-a*ccx + ccy -b)/abs(a)
end
ccx = 0.125 * (nodes[i,j,1] + nodes[i+1,j,1] + nodes[i,j-1,1] + nodes[i+1,j-1,1])
ccy = 0.125 * (nodes[i,j,2] + nodes[i+1,j,2] + nodes[i,j-1,2] + nodes[i+1,j-1,2])
dy2 = 0.0
if a == 0.0
dy2 = abs(ccy - b)
else
dy2 = abs(-a*ccx + ccy -b)/abs(a)
end
dy[i,j] = 0.5 * (dy1+dy2)
end
end
return dx, dy
end
# ------------------------------------
# set dtau for local time stepping
# ------------------------------------
function set_lts(dtau, lambda_facex, lambda_facey, Qbase, cellxmax, cellymax, mu, dx, dy,
vecAx, vecAy, volume, specific_heat_ratio, cfl, icell)
g = specific_heat_ratio
for j in 1+icell:cellymax -icell
for i in 1+icell:cellxmax+1 -icell
rho_av = 0.5 * (Qbase[i,j,1] + Qbase[i-1,j,1])
u_av = 0.5 * (Qbase[i,j,2] + Qbase[i-1,j,2])
v_av = 0.5 * (Qbase[i,j,3] + Qbase[i-1,j,3])
mu_av = 0.5 * ( mu[i,j] + mu[i-1,j] )
ap = (g * Qbase[ i,j,4] / Qbase[ i,j,1])^0.5
am = (g * Qbase[i-1,j,4] / Qbase[i-1,j,1])^0.5
a_av = 0.5 * (ap + am)
U = u_av*vecAx[i,j,1] + v_av*vecAx[i,j,2]
lambda_facex[i,j] = abs(U) + a_av + 2*mu_av/(rho_av*dx[i,j])
end
end
for j in 1+icell:cellymax+1 -icell
for i in 1+icell:cellxmax -icell
rho_av = 0.5 * (Qbase[i,j,1] + Qbase[i,j-1,1])
u_av = 0.5 * (Qbase[i,j,2] + Qbase[i,j-1,2])
v_av = 0.5 * (Qbase[i,j,3] + Qbase[i,j-1,3])
mu_av = 0.5 * ( mu[i,j] + mu[i,j-1] )
ap = (g * Qbase[ i,j,4] / Qbase[ i,j,1])^0.5
am = (g * Qbase[i,j-1,4] / Qbase[i,j-1,1])^0.5
a_av = 0.5 * (ap + am)
V = u_av*vecAy[i,j,1] + v_av*vecAy[i,j,2]
lambda_facey[i,j] = abs(V) + a_av + 2*mu_av/(rho_av*dy[i,j])
end
end
for j in 1+icell:cellymax-icell
for i in 1+icell:cellxmax-icell
a1 = lambda_facex[ i, j]
a2 = lambda_facex[i+1, j]
a3 = lambda_facey[ i, j]
a4 = lambda_facey[ i,j+1]
lmax = maximum([a1,a2,a3,a4])
dtau[i,j] = cfl * volume[i,j] / lmax
end
end
return dtau
end
# ------------------------------------
# set viscosity by Sutherland's formula
# https://cattech-lab.com/science-tools/sutherland/
# ------------------------------------
function set_mu(mu, Qbase, cellxmax, cellymax, specific_heat_ratio, Rd)
mu0 = 1.82e-5 # Reference Viscosity, Pa s
T0 = 293.15 # Reference Temperature, K
C = 117 # Sutherland's constant, K
for i in 1:cellxmax
for j in 1:cellymax
# T = p/(rho Rd )
T = Qbase[i,j,4]/(Qbase[i,j,1]*Rd)
mu[i,j] = mu0 * (T/T0)^1.5 * (T0+C)/(T+C)
end
end
return mu
end
# ------------------------------------
# set thermal Conductivity by Sutherland's formula
# https://doi.org/10.11357/jsam1937.37.694
# ------------------------------------
function set_lambda(lambda, Qbase, cellxmax, cellymax, mu, specific_heat_ratio, Rd)
lam0 = 22.3*10^(-3) # Reference thermal Conductivity, W/(m K)
T0 = 273.15 # Reference Temperature, K
C = 125 # Sutherland's constant, K
for j in 1:cellymax
for i in 1:cellxmax
T = Qbase[i,j,4]/(Qbase[i,j,1]*Rd)
lambda[i,j] = lam0*((T0+C)/(T+C))*(T/T0)^1.5
end
end
return lambda
end
# ------------------------------------
# set Minf for AUSM+up (don't use)
# ------------------------------------
function set_Minf(bdcon, specific_heat_ratio, Rd)
rho = 0
u = 0
v = 0
p = 0
for i in 1:4
if Int(bdcon[i][1]) == 0 || Int(bdcon[i][1]) == 5
rho = bdcon[i][2]
u = bdcon[i][3]
v = bdcon[i][4]
p = bdcon[i][5]
elseif Int(bdcon[i][1]) == 6
rho = bdcon[i][2]
u = bdcon[i][3]
v = bdcon[i][4]
T = bdcon[i][8]
p = rho*Rd*T
end
end
a = (specific_heat_ratio * p / rho)^0.5
u = (u^2 + v^2)^0.5
M = u/a
return M
end
# ------------------------------------
# Conversion from primitive variables to conserved variables
# ------------------------------------
function base_to_conservative(Qbase, Qcon, cellxmax, cellymax, specific_heat_ratio)
"""
Qbase=[rho,u,v,p]
Qcon=[rho,rhou,rhov,e]
"""
for j in 1:cellymax
for i in 1:cellxmax
Qcon[i,j,1] = Qbase[i,j,1]
Qcon[i,j,2] = Qbase[i,j,1]*Qbase[i,j,2]
Qcon[i,j,3] = Qbase[i,j,1]*Qbase[i,j,3]
Qcon[i,j,4] = Qbase[i,j,4]/(specific_heat_ratio-1)+Qbase[i,j,1]*(Qbase[i,j,2]^2+Qbase[i,j,3]^2)/2
end
end
return Qcon
end
# ------------------------------------
# Conversion from conserved variables to primitive variables
# ------------------------------------
function conservative_to_base(Qbase, Qcon, cellxmax, cellymax, specific_heat_ratio)
"""
Qbase=[rho,u,v,p]
Qcon=[rho,rhou,rhov,e]
"""
for j in 1:cellymax
for i in 1:cellxmax
Qbase[i,j,1] = Qcon[i,j,1]
Qbase[i,j,2] = Qcon[i,j,2]/Qcon[i,j,1]
Qbase[i,j,3] = Qcon[i,j,3]/Qcon[i,j,1]
Qbase[i,j,4] = (Qcon[i,j,4]-Qcon[i,j,1]*(Qbase[i,j,2]^2+Qbase[i,j,3]^2)/2)*(specific_heat_ratio-1)
end
end
return Qbase
end
# ------------------------------------
# Conversion from Cartesian coordinate system to general coordinate system
# ------------------------------------
function setup_Qcon_hat(Qcon, Qcon_hat, cellxmax, cellymax, volume, nval)
for l in 1:nval
for j in 1:cellymax
for i in 1:cellxmax
Qcon_hat[i,j,l] = Qcon[i,j,l] * volume[i,j]
end
end
end
return Qcon_hat
end
# ------------------------------------
# Conversion from general coordinate system to Cartesian coordinate system
# ------------------------------------
function Qhat_to_Q(Qcon, Qcon_hat, cellxmax, cellymax, volume, nval)
for l in 1:nval
for j in 1:cellymax
for i in 1:cellxmax
Qcon[i,j,l] = Qcon_hat[i,j,l] / volume[i,j]
end
end
end
return Qcon
end
# ------------------------------------
# cal average convective value
# ------------------------------------
function cal_Qave(Qbase, Qbase_ave, cellxmax, cellymax, nval)
for l in 1:nval
for j in 1:cellymax
for i in 1:cellxmax
Qbase_ave[i,j,l] += Qbase[i,j,l]
end
end
end
return Qbase_ave
end
|
{"hexsha": "8fe9b356901010a814919584996bfa0ae5a697b0", "size": 10055, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src_muscl/value_setup.jl", "max_stars_repo_name": "hide-dog/general_2d_NS_LES", "max_stars_repo_head_hexsha": "571e4d3d63882ec0829ed5f56b33bec9b0eaf50e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src_muscl/value_setup.jl", "max_issues_repo_name": "hide-dog/general_2d_NS_LES", "max_issues_repo_head_hexsha": "571e4d3d63882ec0829ed5f56b33bec9b0eaf50e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src_muscl/value_setup.jl", "max_forks_repo_name": "hide-dog/general_2d_NS_LES", "max_forks_repo_head_hexsha": "571e4d3d63882ec0829ed5f56b33bec9b0eaf50e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3239875389, "max_line_length": 110, "alphanum_fraction": 0.4361014421, "num_tokens": 3430}
|
"""
_checked_rrule
like `rrule` but throws an error if the `rrule` is not defined.
Rather than returning `nothing`
"""
function _checked_rrule(f, args...; kwargs...)
r = rrule(f, args...; kwargs...)
r isa Nothing && _throw_checked_rrule_error(f, args...; kwargs...)
return r
end
@noinline function _throw_checked_rrule_error(f, args...; kwargs...)
io = IOBuffer()
print(io, "can't differentiate `", f, '(')
join(io, map(arg->string("::", typeof(arg)), args), ", ")
if !isempty(kwargs)
print(io, ";")
join(io, map(((k, v),)->string(k, "=", v), kwargs), ", ")
end
print(io, ")`; no matching `rrule` is defined")
throw(ArgumentError(String(take!(io))))
end
|
{"hexsha": "f5ea27d49417cc35092b3cf7098dd1ae7bdd102e", "size": 717, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/helper_functions.jl", "max_stars_repo_name": "freemin7/ChainRules.jl", "max_stars_repo_head_hexsha": "59f06bce6ebb21ed2d5f71769887d9b6523c9d61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/helper_functions.jl", "max_issues_repo_name": "freemin7/ChainRules.jl", "max_issues_repo_head_hexsha": "59f06bce6ebb21ed2d5f71769887d9b6523c9d61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/helper_functions.jl", "max_forks_repo_name": "freemin7/ChainRules.jl", "max_forks_repo_head_hexsha": "59f06bce6ebb21ed2d5f71769887d9b6523c9d61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.68, "max_line_length": 70, "alphanum_fraction": 0.5969316597, "num_tokens": 201}
|
import argparse
import numpy as np
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
|
{"hexsha": "7dff4de995b677855c6aef2e0aa5307e06979f88", "size": 197, "ext": "py", "lang": "Python", "max_stars_repo_path": "references/ati_cnn/crnn_keras.py", "max_stars_repo_name": "busyyang/torch_ecg", "max_stars_repo_head_hexsha": "031d90a32b8a1e202364efe1e5a19a9ba1f0a726", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-06-26T03:00:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T13:43:00.000Z", "max_issues_repo_path": "references/ati_cnn/crnn_keras.py", "max_issues_repo_name": "busyyang/torch_ecg", "max_issues_repo_head_hexsha": "031d90a32b8a1e202364efe1e5a19a9ba1f0a726", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-01T09:29:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-02T03:41:55.000Z", "max_forks_repo_path": "references/ati_cnn/crnn_keras.py", "max_forks_repo_name": "busyyang/torch_ecg", "max_forks_repo_head_hexsha": "031d90a32b8a1e202364efe1e5a19a9ba1f0a726", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-28T03:13:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-15T14:15:34.000Z", "avg_line_length": 28.1428571429, "max_line_length": 71, "alphanum_fraction": 0.8426395939, "include": true, "reason": "import numpy", "num_tokens": 39}
|
PROGRAM p17
IMPLICIT NONE
REAL a,b,c,x,y,z,f1
DATA c / 5./
f1(x,y) = a+b*x**2+c*y
a = 1
b = 2
z = f1(2.,2.0)
PRINT *, 'f1(2.,2.): ', z
z = f1(b,b)
PRINT *, 'f1(b,b): ', z
z = f2(2.,2.0)
PRINT *, 'f2(2.,2.): ', z
z = f2(b,b)
PRINT *, 'f2(b,b): ', z
CONTAINS
FUNCTION f2(x,y)
REAL x,y,f2
f2 = a+b*x**2+c*y
RETURN
END FUNCTION f2
END PROGRAM p17
|
{"hexsha": "cd63c206a66a6c99e97176329ba3df518f202124", "size": 480, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "lab3/p17.f90", "max_stars_repo_name": "M1nified/UJ-Fortran", "max_stars_repo_head_hexsha": "3067d036e38a2bdf433bd63eb47498cc163dedaf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab3/p17.f90", "max_issues_repo_name": "M1nified/UJ-Fortran", "max_issues_repo_head_hexsha": "3067d036e38a2bdf433bd63eb47498cc163dedaf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab3/p17.f90", "max_forks_repo_name": "M1nified/UJ-Fortran", "max_forks_repo_head_hexsha": "3067d036e38a2bdf433bd63eb47498cc163dedaf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.0, "max_line_length": 29, "alphanum_fraction": 0.3833333333, "num_tokens": 194}
|
#pragma once
#include <algorithm>
#include <atomic>
#include <chrono>
#include <cmath>
#include <thread>
#include <boost/test/unit_test.hpp>
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics.hpp>
#include "snapshot.hpp"
namespace bacc = boost::accumulators;
inline void test_snapshot(const std::vector<int>& vec, const metrics::snapshot& snap){
int min = *std::min_element(vec.begin(),vec.end());
int max = *std::max_element(vec.begin(),vec.end());
BOOST_CHECK_EQUAL(min,snap.min());
BOOST_CHECK_EQUAL(max,snap.max());
double total = std::accumulate(vec.begin(),vec.end(),0.0);
BOOST_CHECK_CLOSE(total/vec.size(),snap.mean(),0.0001);
bacc::accumulator_set<double, bacc::stats<bacc::tag::variance> > acc;
for_each(vec.begin(), vec.end(), std::bind<void>(std::ref(acc), std::placeholders::_1));
BOOST_CHECK_CLOSE(bacc::mean(acc),snap.mean(),1e-6);
// TODO : Evaluate how to allow sample stats in std dev nicely
if(vec.size() > 1){
BOOST_CHECK_CLOSE(sqrt((bacc::variance(acc)) * vec.size() / (vec.size() - 1)),snap.std_dev(),1e-6);
} else {
BOOST_CHECK_CLOSE(0.0,snap.std_dev(),1e-6);
}
}
inline void wait_for(const std::function<bool(void)>& func){
do {
std::this_thread::yield();
} while (!func());
}
namespace mock {
class clock {
public:
typedef std::chrono::seconds duration;
typedef std::chrono::time_point<clock> time_point;
static time_point now();
static void set_time(const int& val);
static int time();
};
}
|
{"hexsha": "c3638fafdc5f420ee10dc6f52f7a337f1508fef9", "size": 1525, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "test/common.hpp", "max_stars_repo_name": "andrew-murray/cpp-metrics", "max_stars_repo_head_hexsha": "9d126227b825c561fab5db79b01f267bd0ea9412", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-07-31T00:49:23.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-31T00:49:23.000Z", "max_issues_repo_path": "test/common.hpp", "max_issues_repo_name": "andrew-murray/cpp-metrics", "max_issues_repo_head_hexsha": "9d126227b825c561fab5db79b01f267bd0ea9412", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/common.hpp", "max_forks_repo_name": "andrew-murray/cpp-metrics", "max_forks_repo_head_hexsha": "9d126227b825c561fab5db79b01f267bd0ea9412", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3269230769, "max_line_length": 103, "alphanum_fraction": 0.686557377, "num_tokens": 402}
|
using KernelFunctions
using AxisArrays
using Distances
using Documenter
using Kronecker: Kronecker
using LinearAlgebra
using LogExpFunctions
using PDMats
using Random
using SpecialFunctions
using Test
using Flux
using Zygote: Zygote
using ForwardDiff: ForwardDiff
using ReverseDiff: ReverseDiff
using FiniteDifferences: FiniteDifferences
using KernelFunctions: SimpleKernel, metric, kappa, ColVecs, RowVecs, TestUtils
using KernelFunctions.TestUtils: test_interface
# Writing tests:
# 1. The file structure of the test should match precisely the file structure of src.
# Amongst other things, this means that there should be exactly 1 test file per src file.
# This makes it trivially easy for someone to find the tests associated with a particular
# src file.
# 2. A consequence of 1 is that there should be exactly 1 test file per src file.
# 3. A test file called foo.jl should have the structure:
# @testset "foo" begin
# code
# end
#
# Note that the testset is called `foo`, not `foo.jl`. Use whatever testset structure
# seems appropriate within a given file. eg. if multiple types / functions are defined in
# a particular source file, you might want multiple testsets in the test file.
# 4. Each directory should have its own testset, in which each test file is `include`d.
# 5. Each test file should create its own state, and shouldn't rely on state defined in
# other test files. e.g. don't define a matrix used by all of the files in kernels. If
# two test files are similar enough to share state, perhaps the corresponding source code
# should be in the same file.
# 6. If you write a src file without any tests, create a corresponding test file with the
# usual structure, but without any tests.
# 7. Explicitly create a new random number generate for _at_ _least_ each new test file, and
# use it whenever generating randomness. This ensures complete control over random number
# generation and makes it clear what randomness depends on other randomness.
# 8. All `using` statements should appear in runtests.jl.
# 9. List out all test files explicitly (eg. don't loop over them). This makes it easy to
# disable tests by simply commenting them out, and makes it very clear which tests are not
# currently being run.
# 10. If utility functionality is required, it should be placed in `src/test_utils.jl` so
# that other packages can benefit from it when implementing new kernels.
@info "Packages Loaded"
include("test_utils.jl")
@testset "KernelFunctions" begin
include("utils.jl")
@testset "distances" begin
include(joinpath("distances", "pairwise.jl"))
include(joinpath("distances", "dotproduct.jl"))
include(joinpath("distances", "delta.jl"))
include(joinpath("distances", "sinus.jl"))
end
@info "Ran tests on Distances"
@testset "transform" begin
include(joinpath("transform", "transform.jl"))
print(" ")
include(joinpath("transform", "scaletransform.jl"))
print(" ")
include(joinpath("transform", "ardtransform.jl"))
print(" ")
include(joinpath("transform", "lineartransform.jl"))
print(" ")
include(joinpath("transform", "functiontransform.jl"))
print(" ")
include(joinpath("transform", "selecttransform.jl"))
print(" ")
include(joinpath("transform", "chaintransform.jl"))
print(" ")
include(joinpath("transform", "periodic_transform.jl"))
print(" ")
end
@info "Ran tests on Transform"
@testset "basekernels" begin
include(joinpath("basekernels", "constant.jl"))
print(" ")
include(joinpath("basekernels", "cosine.jl"))
print(" ")
include(joinpath("basekernels", "exponential.jl"))
print(" ")
include(joinpath("basekernels", "exponentiated.jl"))
print(" ")
include(joinpath("basekernels", "fbm.jl"))
print(" ")
include(joinpath("basekernels", "gabor.jl"))
print(" ")
include(joinpath("basekernels", "matern.jl"))
print(" ")
include(joinpath("basekernels", "nn.jl"))
print(" ")
include(joinpath("basekernels", "periodic.jl"))
print(" ")
include(joinpath("basekernels", "piecewisepolynomial.jl"))
print(" ")
include(joinpath("basekernels", "polynomial.jl"))
print(" ")
include(joinpath("basekernels", "rational.jl"))
print(" ")
include(joinpath("basekernels", "sm.jl"))
print(" ")
include(joinpath("basekernels", "wiener.jl"))
print(" ")
end
@info "Ran tests on BaseKernel"
@testset "kernels" begin
include(joinpath("kernels", "kernelproduct.jl"))
include(joinpath("kernels", "kernelsum.jl"))
include(joinpath("kernels", "kerneltensorproduct.jl"))
include(joinpath("kernels", "overloads.jl"))
include(joinpath("kernels", "scaledkernel.jl"))
include(joinpath("kernels", "transformedkernel.jl"))
include(joinpath("kernels", "normalizedkernel.jl"))
include(joinpath("kernels", "neuralkernelnetwork.jl"))
end
@info "Ran tests on Kernel"
@testset "matrix" begin
include(joinpath("matrix", "kernelmatrix.jl"))
include(joinpath("matrix", "kernelkroneckermat.jl"))
include(joinpath("matrix", "kernelpdmat.jl"))
end
@info "Ran tests on matrix"
@testset "multi_output" begin
include(joinpath("mokernels", "moinput.jl"))
include(joinpath("mokernels", "independent.jl"))
include(joinpath("mokernels", "slfm.jl"))
include(joinpath("mokernels", "intrinsiccoregion.jl"))
include(joinpath("mokernels", "lmm.jl"))
end
@info "Ran tests on Multi-Output Kernels"
@testset "approximations" begin
include(joinpath("approximations", "nystrom.jl"))
end
include("generic.jl")
include("chainrules.jl")
include("zygoterules.jl")
@testset "doctests" begin
DocMeta.setdocmeta!(
KernelFunctions,
:DocTestSetup,
quote
using KernelFunctions
using LinearAlgebra
using Random
using PDMats: PDMats
end;
recursive=true,
)
doctest(
KernelFunctions;
doctestfilters=[
r"{([a-zA-Z0-9]+,\s?)+[a-zA-Z0-9]+}",
r"(Array{[a-zA-Z0-9]+,\s?1}|Vector{[a-zA-Z0-9]+})",
r"(Array{[a-zA-Z0-9]+,\s?2}|Matrix{[a-zA-Z0-9]+})",
],
)
end
end
|
{"hexsha": "51a72742b7dffffb466cf1ca05ee7685290362af", "size": 6620, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "thomasgudjonwright/KernelFunctions.jl", "max_stars_repo_head_hexsha": "037cd74f525dc8fdac0fdaef37a0ee53a8cb2772", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "thomasgudjonwright/KernelFunctions.jl", "max_issues_repo_head_hexsha": "037cd74f525dc8fdac0fdaef37a0ee53a8cb2772", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "thomasgudjonwright/KernelFunctions.jl", "max_forks_repo_head_hexsha": "037cd74f525dc8fdac0fdaef37a0ee53a8cb2772", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4011299435, "max_line_length": 92, "alphanum_fraction": 0.6463746224, "num_tokens": 1691}
|
import numpy as np
import torch
import matplotlib.pyplot as plt
from pathlib import Path
from matplotlib.backends.backend_pdf import PdfPages
import glob
import pandas as pd
import os
from itertools import compress
from results_utils import display_dataset_name, display_decoder_name, coef_variation, metrics, dict_mean, dict_ste, dict_mean_ste, display_metrics_name
from distinctipy import distinctipy
import pandas as pd
import copy
res_dir = Path("../work/results/")
log_dir = "../work/out/"
latex_dir = Path("../work/latex/")
pdfs_dir = Path("../work/pdfs/")
latex_dir.mkdir(parents=True, exist_ok=True)
pdfs_dir.mkdir(parents=True, exist_ok=True)
make_results_file = False
tag = "real" #"abl" # "real" # "synth" # both
datasets_real = ['yelp_airport', 'taxi', 'yelp_mississauga', 'twitter', 'wikipedia', 'pubg', 'yelp_toronto',
'reddit_askscience_comments', 'reddit_politics_submissions', 'lastfm', 'mooc', 'reddit']
datasets_synth = ['synth/poisson', 'synth/renewal', 'synth/self_correcting', 'synth/hawkes2', 'synth/hawkes1']
datasets = datasets_synth + datasets_real
configs_rqs_real = ["1", "2", "3", "5", "8", "10", "15"]
configs_rqs_synth = ["3", "5", "8", "10"]
###
datasets_metrics_real_sub = ['yelp_airport', 'yelp_mississauga', 'pubg', 'wikipedia', 'lastfm']
datasets_metrics_real_all = datasets_metrics_real_sub + ['taxi', 'twitter','yelp_toronto',
'reddit_askscience_comments', 'reddit_politics_submissions', 'mooc', 'reddit']
datasets_metrics_synth_sub = ['synth/poisson', 'synth/renewal','synth/hawkes1']
datasets_metrics_synth_all = datasets_metrics_synth_sub + ['synth/self_correcting', 'synth/hawkes2']
datasets_metrics_both_sub = datasets_metrics_synth_sub + datasets_metrics_real_sub
datasets_metrics_both_all = datasets_metrics_synth_all + datasets_metrics_real_all
if tag == "real":
datasets_metrics_sub = datasets_metrics_real_sub
#datasets_metrics_all = datasets_metrics_real_all
my_order = [1, 2, 3, 6, 8, 10, 11, 9, 7, 12, 5, 4]
datasets_metrics_all = [datasets_metrics_real_all[i - 1] for i in my_order]
elif tag == "synth":
datasets_metrics_sub = datasets_metrics_synth_sub
datasets_metrics_all = datasets_metrics_synth_all
elif tag == "both":
datasets_metrics_sub = datasets_metrics_both_sub
datasets_metrics_all = datasets_metrics_both_all
elif tag == "abl":
datasets = ['yelp_airport', 'taxi', 'yelp_mississauga', 'twitter', 'wikipedia', 'pubg', 'yelp_toronto', 'reddit_askscience_comments', 'reddit_politics_submissions', 'lastfm']
datasets_metrics_sub = datasets
datasets_metrics_all = datasets
configs_others = ["64", "64b", "64c"]
runs = [1, 2, 3]
#runs = [1]
#decoders = ['Exponential', 'RMTPP', 'LogNormMix', 'RQS_EXP-crps_qapprox', 'RQS_EXP-logs' ]
decoders = ['LogNormMix', 'RQS_EXP-crps_qapprox', 'Exponential', 'RMTPP']
decoders_metrics = ['RQS_EXP-crps_qapprox', 'LogNormMix', 'RMTPP', 'Exponential']
if tag == "abl":
decoders = ['LogNormMix', 'RQS_EXP-crps_qapprox', 'RQS_EXP-logs', 'Exponential', 'RMTPP']
decoders_metrics = ['RQS_EXP-logs', 'RQS_EXP-crps_qapprox', 'LogNormMix', 'RMTPP', 'Exponential']
#viz_decoders = ['LogNormMix', 'RQS_EXP-crps_qapprox']
#viz_decoders = decoders
viz_decoders = ['RQS_EXP-crps_qapprox', 'LogNormMix']
#decoder_colors = distinctipy.get_colors(len(decoders))
decoder_colors = ["blue", "black", "green", "orange"]
if tag == "abl":
decoder_colors = ["blue", "black", "yellow", "green", "orange"]
all_results_file = res_dir / ("all_results.pt")
if make_results_file or not all_results_file.exists():
results_loss_mean = dict()
results_loss_ste = dict()
results_loss_mean_ste = dict()
results_metrics_mean = dict()
results_metrics_ste = dict()
results_metrics_mean_ste = dict()
for dataset in datasets: ######
print("--- ", dataset, " ---")
#decoders_loss_test, decoders_metrics_test_analytical = [], []
decoders_loss_test_mean = dict()
decoders_loss_test_ste = dict()
decoders_loss_test_mean_ste = dict()
decoders_metrics_mean = dict()
decoders_metrics_ste = dict()
decoders_metrics_mean_ste = dict()
for decoder in decoders: ######
print(decoder)
best_loss_val = np.inf
best_config = None
metrics_test_analytical = None
configs = configs_others
if "RQS" in decoder and "synth" in dataset:
configs = configs_rqs_synth
elif "RQS" in decoder and "synth" not in dataset:
configs = configs_rqs_real
for config in configs:
list_loss_val = []
for run in runs:
suffix = str(dataset).replace("/", "-") + "-" + str(decoder) + '-' + config + '-' + str(run) + '.pt'
results_file = res_dir / suffix
#print(results_file)
if results_file.exists():
loaded = torch.load(results_file, map_location=torch.device('cpu'))
list_loss_val.append(loaded['loss_val'].item())
if not 'predictions_test' in loaded.keys():
print("Predictions missing in ", results_file, " !!!!!")
else:
print(results_file, " missing!")
print(list_loss_val)
loss_val = np.mean(list_loss_val)
if loss_val < best_loss_val:
best_loss_val = loss_val
best_config = config
# END FILE CONFIG
all_metrics_sampling_best = []
all_loss_test_best = []
# BEST FILE CONFIG
for run in runs:
suffix_best = str(dataset).replace("/", "-") + "-" + str(decoder) + '-' + best_config + '-' + str(run) + '.pt'
best_results_file = res_dir / suffix_best
if best_results_file.exists():
loaded_best = torch.load(best_results_file, map_location=torch.device('cpu'))
if not 'predictions_test' in loaded_best.keys():
print("Predictions missing in ", best_results_file, " !!!!!")
else:
metrics_sampling_best = metrics(loaded_best["predictions_test"])
all_metrics_sampling_best.append(metrics_sampling_best)
#if "RQS_EXP" in decoder:
#metrics_analytical_best = metrics(loaded_best["list_predictions"], analytical = True)
#else:
# metrics_analytical_best = None
#metrics_analytical_best = None
#decoders_metrics_test_analytical.append(metrics_analytical_best)
loss_test_best = loaded_best['loss_test'].item()
all_loss_test_best.append(loss_test_best)
decoders_loss_test_mean[decoder] = np.mean(all_loss_test_best)
decoders_loss_test_ste[decoder] = np.std(all_loss_test_best)/np.sqrt(len(all_loss_test_best))
decoders_loss_test_mean_ste[decoder] = "{:0.3f}".format(decoders_loss_test_mean[decoder]) + " (" + "{:0.3f}".format(decoders_loss_test_ste[decoder]) + ")"
decoders_metrics_mean[decoder] = dict_mean(all_metrics_sampling_best)
decoders_metrics_ste[decoder] = dict_ste(all_metrics_sampling_best)
decoders_metrics_mean_ste[decoder] = dict_mean_ste(all_metrics_sampling_best)
# END DECODER
#results_loss.append(decoders_loss_test)
#results_metrics.append(decoders_metrics_test_sampling)
#results_metrics_analytical.append(decoders_metrics_test_analytical)
results_loss_mean[dataset] = decoders_loss_test_mean
results_loss_ste[dataset] = decoders_loss_test_ste
results_loss_mean_ste[dataset] = decoders_loss_test_mean_ste
####
# results_loss_mean_ste[dataset][best_decoder] = str("\bf{") + results_loss_mean_ste[dataset][best_decoder] + "}"
results_metrics_mean[dataset] = decoders_metrics_mean
results_metrics_ste[dataset] = decoders_metrics_ste
results_metrics_mean_ste[dataset] = decoders_metrics_mean_ste
# END DATASET
#breakpoint()
dict_results = {"results_metrics_mean": results_metrics_mean,
"results_metrics_ste": results_metrics_ste,
"results_metrics_mean_ste": results_metrics_mean_ste,
"results_loss_mean": results_loss_mean,
"results_loss_ste": results_loss_ste,
"results_loss_mean_ste": results_loss_mean_ste}
torch.save(dict_results, all_results_file)
loaded_results = torch.load(all_results_file, map_location=torch.device('cpu'))
results_loss_mean = loaded_results["results_loss_mean"]
results_loss_ste = loaded_results["results_loss_ste"]
results_loss_mean_ste = loaded_results["results_loss_mean_ste"]
results_metrics_mean = loaded_results["results_metrics_mean"]
results_metrics_ste = loaded_results["results_metrics_ste"]
results_metrics_mean_ste = loaded_results["results_metrics_mean_ste"]
###
def make_metrics(set_metrics, my_datasets, my_decoders, results_metrics_mean, results_metrics_ste, add_ste = False):
k_datasets, frames_mean, frames_ste = [], [], []
results_subset_metrics_mean = { mydata: results_metrics_mean[mydata] for mydata in my_datasets }
results_subset_metrics_ste = { mydata: results_metrics_ste[mydata] for mydata in my_datasets }
results_subset_loss_mean = { mydata: results_loss_mean[mydata] for mydata in my_datasets }
results_subset_loss_ste = { mydata: results_loss_ste[mydata] for mydata in my_datasets }
for k_data, v_dict_decoders in results_subset_metrics_mean.items():
k_datasets.append(display_dataset_name(k_data, add_hline = True))
new_dict_decoders_mean = dict()
new_dict_decoders_ste = dict()
v_dict_my_decoders = {dec: v_dict_decoders[dec] for dec in my_decoders}
for k_decoder, v_dict_metrics in v_dict_my_decoders.items():
new_dict_metrics_mean = dict()
new_dict_metrics_ste = dict()
for k in set_metrics:
if k_data == "lastfm" and k_decoder == "LogNormMix":
# Outliers in LogNormMix results
print("MODIFICATIONS DUE TO OUTLIERS !!!!!!!! ")
#v_dict_metrics["qs"][-10:] = results_subset_mean[k_data]["Exponential"]["qs"][-10:]
#v_dict_metrics["crps"] = torch.mean(v_dict_metrics["qs"])
results_subset_metrics_mean[k_data][k_decoder]["crps"] = results_subset_metrics_mean[k_data]["RMTPP"]["crps"]
results_subset_metrics_ste[k_data][k_decoder]["crps"] = results_subset_metrics_ste[k_data]["RMTPP"]["crps"]
if k == "qs50" or k == "qs90":
probs = torch.arange(1,500)/500
if k == "qs50":
idx = torch.where(probs == 0.5)[0].item()
elif k == "qs90":
idx = torch.where(probs == 0.9)[0].item()
x_mean = v_dict_metrics["qs"][idx]
x_ste = results_subset_metrics_ste[k_data][k_decoder]["qs"][idx]
if torch.is_tensor(x_mean):
new_dict_metrics_mean[k] = x_mean.item()
new_dict_metrics_ste[k] = x_ste.item()
else:
new_dict_metrics_mean[k] = x_mean
new_dict_metrics_ste[k] = x_ste
elif k == "logs":
if "RQS" in k_decoder and "logs" not in k_decoder:
new_dict_metrics_mean[k] = np.inf
new_dict_metrics_ste[k] = np.inf
else:
new_dict_metrics_mean[k] = results_subset_loss_mean[k_data][k_decoder]
new_dict_metrics_ste[k] = results_subset_loss_ste[k_data][k_decoder]
else:
new_dict_metrics_mean[k] = v_dict_metrics[k]
new_dict_metrics_ste[k] = results_subset_metrics_ste[k_data][k_decoder][k]
new_dict_decoders_mean[display_decoder_name(k_decoder)] = new_dict_metrics_mean
new_dict_decoders_ste[display_decoder_name(k_decoder)] = new_dict_metrics_ste
df_dataset_mean = pd.DataFrame.from_dict(new_dict_decoders_mean, orient='index')
df_dataset_ste = pd.DataFrame.from_dict(new_dict_decoders_ste, orient='index')
for j in np.arange(df_dataset_mean.shape[1]):
best_mean = np.inf
best_std = np.inf
best_i = np.inf
for i in np.arange(df_dataset_mean.shape[0]):
x_mean = df_dataset_mean.iloc[i, j]
if x_mean < best_mean:
best_mean = x_mean
best_std = df_dataset_ste.iloc[i, j]
best_i = i
#
for i in np.arange(df_dataset_mean.shape[0]):
x_mean = df_dataset_mean.iloc[i, j]
#if df_dataset_mean.columns[j] != "piw90":
if x_mean != np.inf:
if x_mean <= best_mean + best_std and "piw" not in df_dataset_mean.columns[j]:
df_dataset_mean.iloc[i, j] = "$\\bm{" + "{:0.3f}".format(x_mean) + "}$"
else:
df_dataset_mean.iloc[i, j] = "$ {:0.3f} $".format(x_mean)
if add_ste:
df_dataset_mean.iloc[i, j] = df_dataset_mean.iloc[i, j] + " $(" + "{:0.3f}".format(df_dataset_ste.iloc[i, j]) + ")$"
else:
df_dataset_mean.iloc[i, j] = "~~---~~"
frames_mean.append(df_dataset_mean)
#frames_ste.append(df_dataset_ste)
metrics_df_mean = pd.concat(frames_mean, keys=k_datasets)
#metrics_df_ste = pd.concat(frames_ste, keys=k_datasets)
metrics_df_mean.rename(display_metrics_name, axis = 1, inplace = True)
#metrics_df_ste.rename(display_metrics_name, axis = 1, inplace = True)
return metrics_df_mean
set_metrics = ['logs', 'crps', 'mace', 'qs50', 'qs90', 'is50', 'piw50', 'is90', 'piw90', 'smape']
metrics_subset_data = make_metrics(set_metrics, datasets_metrics_sub, decoders_metrics, results_metrics_mean, results_metrics_ste)
print(metrics_subset_data)
table_file = "table_metrics_subset_data_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = metrics_subset_data.to_latex(float_format="{:0.3f}".format, index_names = False, escape=False)
tf.write(res)
##
set_metrics = ['logs', 'crps', 'mace', 'qs50', 'qs90', 'is50', 'is90', 'smape']
metrics_all_data = make_metrics(set_metrics, datasets_metrics_all, decoders_metrics, results_metrics_mean, results_metrics_ste, add_ste = True)
print(metrics_all_data)
table_file = "table_metrics_all_data_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = metrics_all_data.to_latex(float_format="{:0.3f}".format, index_names = False, escape=False)
tf.write(res)
##
set_metrics = ['logs', 'crps', 'mace', 'qs50', 'qs90', 'is50', 'is90', 'smape']
metrics_all_data_no_ste = make_metrics(set_metrics, datasets_metrics_all, decoders_metrics, results_metrics_mean, results_metrics_ste, add_ste = False)
print(metrics_all_data_no_ste)
table_file = "table_metrics_all_data_no_ste_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = metrics_all_data_no_ste.to_latex(float_format="{:0.3f}".format, index_names = False, escape=False)
tf.write(res)
### LOSS
loss_df = pd.DataFrame.from_dict(results_loss_mean_ste, orient='index')
loss_df.rename(display_dataset_name, axis = 0, inplace = True)
loss_df.rename(display_decoder_name, axis = 1, inplace = True)
if tag == "abl":
decoders_reordered = [display_decoder_name(decoders[i]) for i in [0, 2, 3, 4, 1]]
else:
decoders_reordered = [display_decoder_name(decoders[i]) for i in [0, 2, 3, 1]]
loss_df = loss_df.loc[:, decoders_reordered]
if tag == "abl":
columns = pd.MultiIndex.from_arrays([['NLL', 'NLL', 'NLL', 'NLL', 'CRPS'], decoders_reordered], names=['Score', 'Method'])
else:
columns = pd.MultiIndex.from_arrays([['NLL', 'NLL', 'NLL', 'CRPS'], decoders_reordered], names=['Score', 'Method'])
loss_df = pd.DataFrame(loss_df.to_numpy(), index=loss_df.index, columns=columns)
print(loss_df)
table_file = "table_loss_" + tag + ".tex"
results_file = latex_dir / table_file
with open(results_file, 'w') as tf:
res = loss_df.to_latex(float_format="{:0.3f}".format, index_names = False)
tf.write(res)
###
def pdf_metrics(my_datasets, my_id):
myfile = pdfs_dir / ('all_metrics_' + tag + '_' + str(my_id) + '.pdf')
with PdfPages(myfile) as pdf:
fig, axes = plt.subplots(4, 4, figsize=(8.27, 8.27))
if tag == "synth":
fig, axes = plt.subplots(5, 4, figsize=(8.27, 10))
#fig = plt.figure(constrained_layout=True)
#subfigs = fig.subfigures(nrows=4, ncols=1)
for i_dataset, dataset in enumerate(my_datasets):
#subfigs[i_dataset].suptitle(dataset)
#axs = subfig.subplots(nrows=1, ncols=3)
for i_decoder, decoder in enumerate(viz_decoders):
#
if False:
if dataset == "mooc":
#print(dataset)
#print(x)
#print("--------------")
percentage_to_keep = 0.99
elif dataset == "lastfm" and decoder == "LogNormMix":
percentage_to_keep = 0.80
else:
percentage_to_keep = 1
else:
percentage_to_keep = 1
decoder_name = display_decoder_name(decoder)
## based on 500 values
x = results_metrics_mean[dataset][decoder]["qs"]
taus = torch.arange(1, len(x)+1)/len(x)
# Outliers in quantile scores for LogNormMix
if dataset == "lastfm" and decoder == "LogNormMix":
x[-10:] = 0
elif dataset == "yelp_toronto" and decoder == "LogNormMix":
x[-2:] = 0
m = int(percentage_to_keep * len(x))
axes[i_dataset][0].plot(taus[:m], x[:m], label = decoder_name, color = decoder_colors[i_decoder])
#axs[0].plot(taus, x, label = decoder_name, color = decoder_colors[i_decoder])
axes[i_dataset][0].set_ylabel(dataset)
## based on 99 values
z = results_metrics_mean[dataset][decoder]["ace"]
taus = torch.arange(1, len(z)+1)/len(z)
axes[i_dataset][1].plot(taus, z, label = decoder_name, color = decoder_colors[i_decoder])
#axs[1].plot(taus, z, label = decoder_name, color = decoder_colors[i_decoder])
## Inerval scores
z_bis = results_metrics_mean[dataset][decoder]["int_scores"]
## PI length
x_bis = results_metrics_mean[dataset][decoder]["pilen"]
n = len(x_bis)
#taus = torch.arange(1, n+1)/n
coverage = np.arange(2, n * 2 + 1, 2)
m = int(percentage_to_keep * n)
axes[i_dataset][2].plot(coverage[:m], np.log(z_bis[:m]), label = decoder_name, color = decoder_colors[i_decoder])
#axs[2].plot(coverage[:n], np.log(z_bis[:n]), label = decoder_name, color = decoder_colors[i_decoder])
axes[i_dataset][3].plot(coverage[:m], np.log(x_bis[:m]), label = decoder_name, color = decoder_colors[i_decoder])
#axs[3].plot(coverage[:n], np.log(x_bis[:n]), label = decoder_name, color = decoder_colors[i_decoder])
axes[i_dataset][0].set_title(display_dataset_name(dataset))
axes[i_dataset][0].set_ylabel("QS", fontsize=8)
axes[i_dataset][0].set_xlabel("Probability level", fontsize=8)
axes[i_dataset][1].set_ylabel("ACE", fontsize=8)
axes[i_dataset][1].set_xlabel("Probability level", fontsize=8)
axes[i_dataset][2].set_ylabel("IS (log. scale)", fontsize=8)
axes[i_dataset][2].set_xlabel("Coverage probability (%)", fontsize=8)
axes[i_dataset][3].set_ylabel("IW (log. scale)", fontsize=8)
axes[i_dataset][3].set_xlabel("Coverage probability (%)", fontsize=8)
#axes[0].set_ylabel("Quantile score")
#axes[1].set_ylabel("Absolute calibration error")
#axes[2].set_ylabel("Quantile score")
#fig.suptitle("Data set " + "'"+ display_dataset_name(dataset) + "'", fontsize=16)
handles, labels = axes[0][0].get_legend_handles_labels()
####fig.legend(handles, labels, loc=(0.25,0.9), ncol = len(decoders))
fig.legend(handles, labels, ncol = len(decoders))
fig.tight_layout()
pdf.savefig()
plt.close()
if tag == "synth" or tag == "abl":
pdf_metrics(datasets_metrics_all, 1)
else:
pdf_metrics(datasets_metrics_all[:4], 1)
pdf_metrics(datasets_metrics_all[4:8], 2)
pdf_metrics(datasets_metrics_all[8:], 3)
#pdf_metrics(datasets[8:], 3)
breakpoint()
def make_relative_error(results_mean, results_ste, my_datasets, wanted, tag):
results_copy_mean = copy.deepcopy(results_mean)
results_copy_mean = { mydata: results_copy_mean[mydata] for mydata in my_datasets }
results_copy_ste = copy.deepcopy(results_ste)
results_copy_ste = { mydata: results_copy_ste[mydata] for mydata in my_datasets }
for dataset, dict_decoders in results_copy_mean.items():
for decoder, dict_metrics in dict_decoders.items():
#print(dataset)
#print(decoder)
#print(dict_metrics[wanted])
#if decoder == "RMTPP" and dataset == "yelp_airport":
# breakpoint()
results_copy_mean[dataset][decoder] = dict_metrics[wanted]
results_copy_ste[dataset][decoder] = results_copy_ste[dataset][decoder][wanted]
res_mean = pd.DataFrame.from_dict(results_copy_mean, orient='index')
mydiff_mean = res_mean.sub(res_mean.loc[:, "RQS_EXP-crps_qapprox"], axis = 0)
#mydiff_mean = res_mean
res_ste = pd.DataFrame.from_dict(results_copy_ste, orient='index')
mydiff_ste = res_ste
#if tag == "real":
# print("I removed lastfm and reddit ")
# mydiff = mydiff.drop("lastfm", axis = 0)
# mydiff = mydiff.drop("reddit", axis = 0)
#mydiff = mydiff.drop("RQS_EXP-crps_qapprox", axis = 1)
return (mydiff_mean, mydiff_ste)
myfile = pdfs_dir / ('plot_loss_' + tag + '.pdf')
with PdfPages(myfile) as pdf:
#fig, ax = plt.subplots(3, 1, figsize=(8.27, 2.5))
fig, ax = plt.subplots(1, 1, figsize=(8.27, 2.5))
mydiff_mean, mydiff_ste = make_relative_error(results_metrics_mean, results_metrics_ste, datasets_metrics, "crps", tag)
if False:
y = mydiff_mean.values.tolist()
#y = [item for sublist in mydiff.values.tolist() for item in sublist]
x = np.arange(len(y))
#marks = np.tile(["o", "v", "s", "x"], nval).tolist()
ax[0].plot(x, y, "o", markersize = 3)
ax[0].axhline(y=0, linestyle='dashed', color = "grey")
ax[0].set_ylabel("CRPS")
#if tag == "real":
# print("Changing axis limit! ")
# ax[0].set_ylim(top = 0.25)
else:
y = mydiff_mean.to_numpy().flatten()
e = 10 * mydiff_ste.to_numpy().flatten()
m = mydiff_mean.shape[1]
r = mydiff_mean.shape[0]
my_inc1 = 6
my_inc2 = 20
x = 1 + np.cumsum([0] + np.repeat(my_inc1, m - 1).tolist() + np.tile([my_inc2] + np.repeat(my_inc1, m - 1).tolist(), r - 1).tolist())
#my_table = np.arange(len(y) * myc).reshape((mydiff.shape[0] * myc, mydiff.shape[1]))
#x = my_table[np.arange(0, len(my_table), myc)].flatten()
#breakpoint()
#x = np.arange(len(y))
decod_list = np.tile(mydiff_mean.columns, mydiff_mean.shape[0])
marks = np.tile(["o", "v", "s", "x"], mydiff_mean.shape[0]).tolist()
for xp, yp, m in zip(x, y, marks):
ax.scatter([xp],[yp], marker=m, color = "black", s = 5)
ax.set_ylim(bottom = -0.1, top = 0.8)
#plt.errorbar(x, y, yerr=e, fmt='o')
ax.axhline(y=0, linestyle='dashed', color = "grey")
if False:
#
mydiff = make_relative_error(results_metrics_mean, datasets_metrics, "mace", tag)
y = mydiff.values.tolist()
x = np.arange(len(y))
#y = [item for sublist in mydiff.values.tolist() for item in sublist]
ax[1].plot(x, y,"o", markersize = 3)
ax[1].axhline(y=0, linestyle='dashed', color = "grey")
ax[1].set_ylabel("MACE")
#
mydiff = make_relative_error(results_metrics_mean, datasets_metrics, "smape", tag)
y = mydiff.values.tolist()
x = np.arange(len(y))
#y = [item for sublist in mydiff.values.tolist() for item in sublist]
ax[2].plot(x, y,"o", markersize = 3)
ax[2].axhline(y=0, linestyle='dashed', color = "grey")
ax[2].set_ylabel("SMAPE")
#if tag == "real":
# print("Changing axis limit! ")
# ax[2].set_ylim(bottom = -1, top = 1)
fig.tight_layout()
pdf.savefig()
plt.close()
|
{"hexsha": "525d3184e202736c494c91d6725c35d89e42922f", "size": 25998, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/make_results.py", "max_stars_repo_name": "bsouhaib/qf-tpp", "max_stars_repo_head_hexsha": "a5adf3f7203b920528c1c397329c4afd9039c3b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/make_results.py", "max_issues_repo_name": "bsouhaib/qf-tpp", "max_issues_repo_head_hexsha": "a5adf3f7203b920528c1c397329c4afd9039c3b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/make_results.py", "max_forks_repo_name": "bsouhaib/qf-tpp", "max_forks_repo_head_hexsha": "a5adf3f7203b920528c1c397329c4afd9039c3b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2731707317, "max_line_length": 178, "alphanum_fraction": 0.6122778675, "include": true, "reason": "import numpy", "num_tokens": 6396}
|
# coding=utf-8
# Copyright 2022 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ibc.data.dataset."""
import collections
import os
import tempfile
from typing import List
from ibc.data.dataset import filter_episodes
from ibc.data.dataset import load_tfrecord_dataset_sequence
from ibc.environments.block_pushing import block_pushing # pylint: disable=unused-import
from ibc.environments.block_pushing.oracles import oriented_push_oracle as oriented_push_oracle_module
import numpy as np
import tensorflow as tf
from tf_agents.environments import suite_gym
from tf_agents.trajectories import trajectory
from tf_agents.trajectories.time_step import StepType
from tf_agents.trajectories.trajectory import Trajectory
from tf_agents.utils import example_encoding_dataset
class FilterEpisodesTest(tf.test.TestCase):
def _build_traj(self, step_types):
return Trajectory(step_type=tf.constant(step_types),
action=tf.range(len(step_types)),
observation=(),
policy_info=(),
next_step_type=(),
reward=(),
discount=())
def test_no_first(self):
sample = self._build_traj([StepType.MID] * 6)
sample = filter_episodes(sample)
self.assertEqual(sample.action.numpy().tolist(), list(range(6)))
def test_first_at_start(self):
sample = self._build_traj([StepType.FIRST] + 5 * [StepType.MID])
sample = filter_episodes(sample)
self.assertEqual(sample.action.numpy().tolist(), list(range(6)))
def test_first_at_mid(self):
sample = self._build_traj([StepType.FIRST, StepType.MID, StepType.LAST,
StepType.FIRST, StepType.MID, StepType.MID])
sample = filter_episodes(sample)
self.assertEqual(sample.action.numpy().tolist(), [3, 3, 3, 3, 4, 5])
def test_first_at_end(self):
sample = self._build_traj([StepType.FIRST] * 6)
sample = filter_episodes(sample)
self.assertEqual(sample.action.numpy().tolist(), [5] * 6)
class LoadTFRecordDatasetSequence(tf.test.TestCase):
def _get_test_step(self,
global_step,
step_type,
dataset_spec):
"""Create step where all data (action, observation, etc) is global_step."""
def _arr(shape):
return np.full(shape, global_step, dtype=np.float32)
observation = collections.OrderedDict()
for key, value in dataset_spec.observation.items():
observation[key] = _arr(value.shape)
args = {
'action': _arr(dataset_spec.action.shape),
'observation': observation,
'policy_info': (),
'reward': _arr(shape=()),
'discount': _arr(shape=()),
}
if step_type == StepType.FIRST:
return trajectory.first(**args)
elif step_type == StepType.MID:
return trajectory.mid(**args)
elif step_type == StepType.LAST:
return trajectory.last(**args)
def _init_test_shards(self,
num_shards,
episodes_per_shard,
steps_per_episode):
"""Build a test dataset of BlockPush data."""
datadir = tempfile.mkdtemp(dir=self.get_temp_dir())
shards = [os.path.join(datadir, 'shard%d' % i) for i in range(num_shards)]
# Replicate the ibc data pattern to keep episodes within a single
# shard (episodes never straddle shard boundary).
# Initialize an environment and policy just to get the data spec.
env = suite_gym.load('BlockPush-v0')
policy = oriented_push_oracle_module.OrientedPushOracle(env)
global_step = 0
for shard in shards:
observer = example_encoding_dataset.TFRecordObserver(
shard, policy.collect_data_spec, py_mode=True)
assert steps_per_episode > 2
for _ in range(episodes_per_shard):
for i_step in range(steps_per_episode):
if i_step == 0:
step_type = StepType.FIRST
elif i_step == steps_per_episode - 1:
step_type = StepType.LAST
else:
step_type = StepType.MID
traj = self._get_test_step(
global_step, step_type, policy.collect_data_spec)
observer(traj)
global_step += 1
return shards
def _check_sample(self, sample, expected_values, step_type, next_step_type):
self.assertEqual(sample.step_type.numpy().tolist(), step_type)
self.assertEqual(sample.next_step_type.numpy().tolist(), next_step_type)
self.assertEqual(sample.reward.numpy().tolist(), expected_values)
self.assertEqual(sample.discount.numpy().tolist(), expected_values)
self.assertLen(sample.action.shape, 2) # (seq_len, n)
self.assertEqual(sample.action.numpy().tolist(),
[[v] * sample.action.shape[1] for v in expected_values])
for value in sample.observation.values():
self.assertLen(value.shape, 2) # (seq_len, n)
self.assertEqual(value.numpy().tolist(),
[[v] * value.shape[1] for v in expected_values])
def test_load_tfrecord_dataset_sequence(self):
shards = self._init_test_shards(num_shards=2,
episodes_per_shard=2,
steps_per_episode=4)
dataset = load_tfrecord_dataset_sequence(shards, seq_len=3,
deterministic=True)
dataset_iter = iter(dataset)
self._check_sample(next(dataset_iter), [0, 1, 2], # 1st shard, 0
[StepType.FIRST, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [8, 9, 10], # 2nd shard, 0
[StepType.FIRST, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [1, 2, 3], # 1st shard, 1
[StepType.MID, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.LAST])
self._check_sample(next(dataset_iter), [9, 10, 11], # 2nd shard, 1
[StepType.MID, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.LAST])
self._check_sample(next(dataset_iter), [4, 4, 4], # 1st shard, 2
[StepType.FIRST, StepType.FIRST, StepType.FIRST],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [12, 12, 12], # 2nd shard, 2
[StepType.FIRST, StepType.FIRST, StepType.FIRST],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [4, 4, 5], # 1st shard, 3
[StepType.FIRST, StepType.FIRST, StepType.MID],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [12, 12, 13], # 2nd shard, 3
[StepType.FIRST, StepType.FIRST, StepType.MID],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [4, 5, 6], # 1st shard, 4
[StepType.FIRST, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [12, 13, 14], # 2nd shard, 4
[StepType.FIRST, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [5, 6, 7], # 1st shard, 5
[StepType.MID, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.LAST])
self._check_sample(next(dataset_iter), [13, 14, 15], # 2nd shard, 5
[StepType.MID, StepType.MID, StepType.MID],
[StepType.MID, StepType.MID, StepType.LAST])
self._check_sample(next(dataset_iter), [0, 0, 0], # 1st shard, 6
[StepType.FIRST, StepType.FIRST, StepType.FIRST],
[StepType.MID, StepType.MID, StepType.MID])
self._check_sample(next(dataset_iter), [8, 8, 8], # 2nd shard, 6
[StepType.FIRST, StepType.FIRST, StepType.FIRST],
[StepType.MID, StepType.MID, StepType.MID])
if __name__ == '__main__':
tf.test.main()
|
{"hexsha": "90bdda2930dc7e186bf308f1f10a9b79d14ec33c", "size": 8888, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/dataset_test.py", "max_stars_repo_name": "google-research/ibc", "max_stars_repo_head_hexsha": "c2f6775418c3d7b1ffd0e822fc0050c834030d15", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 180, "max_stars_repo_stars_event_min_datetime": "2021-11-05T19:34:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T00:54:43.000Z", "max_issues_repo_path": "data/dataset_test.py", "max_issues_repo_name": "google-research/ibc", "max_issues_repo_head_hexsha": "c2f6775418c3d7b1ffd0e822fc0050c834030d15", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-11-08T21:13:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T08:42:03.000Z", "max_forks_repo_path": "data/dataset_test.py", "max_forks_repo_name": "google-research/ibc", "max_forks_repo_head_hexsha": "c2f6775418c3d7b1ffd0e822fc0050c834030d15", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2021-11-07T05:43:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T10:30:09.000Z", "avg_line_length": 44.44, "max_line_length": 102, "alphanum_fraction": 0.62950045, "include": true, "reason": "import numpy", "num_tokens": 2181}
|
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from action_space import ActionSpace
from channel_space import ChannelSpace
from state_space import StateSpace
from mempool import Mempool, Transaction, Block
from gym.spaces import Discrete
class BlockchainNetworkingEnv(gym.Env):
SUCCESS_REWARD = 5
LATE_PROB = 1
MAX_ATTACK = 0.1
def __init__(self):
# Channel parameters
self.nb_channels = 4
self.idleChannel = 1
self.prob_switching = 0.9
self.channelObservation = None
self.prob_late = BlockchainNetworkingEnv.LATE_PROB
self.cost_channels = [0.1, 0.1, 0.1, 0.1]
# Blockchain parameters
self.mempool = Mempool()
self.userTransaction = Transaction()
self.lastBlock = Block()
self.hashRate = None
self.doubleSpendSuccess = None
# System parameters
self.nb_past_observations = 4
self.state_size = Mempool.NB_FEE_INTERVALS + 2*self.nb_past_observations
self.action_space = ActionSpace(self.nb_channels + 1)
self.observation_space = StateSpace((Discrete(Mempool.MAX_FEE), Discrete(Mempool.MAX_FEE),
Discrete(Mempool.MAX_FEE), Discrete(Mempool.MAX_FEE),
Discrete(Mempool.MAX_FEE), Discrete(Mempool.MAX_FEE),
Discrete(Mempool.MAX_FEE), Discrete(Mempool.MAX_FEE),
Discrete(Mempool.MAX_FEE), Discrete(Mempool.MAX_FEE),
ActionSpace(self.nb_channels + 1), ChannelSpace(),
ActionSpace(self.nb_channels + 1), ChannelSpace(),
ActionSpace(self.nb_channels + 1), ChannelSpace(),
ActionSpace(self.nb_channels + 1), ChannelSpace()))
# reward define
self.totalReward = 0
self.successReward = 0
self.channelCost = 0
self.transactionFee = 0
self.cost = 0
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
# reset the rewards
self.totalReward = 0
self.successReward = 0
self.channelCost = 0
self.transactionFee = 0
self.prob_late = None
self.attacked = False
state = list(self.state)
# 1. User's transaction initialization
self.userTransaction = Transaction()
if (len(self.lastBlock.blockTransaction) != 0):
self.userTransaction.estimateFee(self.lastBlock)
# 2. The channel state changes - single idle channel, round robin switching
if (np.random.rand() < self.prob_switching):
self.idleChannel = (self.idleChannel + 1) % self.nb_channels
# print(self.idleChannel)
# 3. Mempool updates - some new transactions come
self.mempool.generateNewTransactions()
# if user does not submit transaction
if (action == 0):
self.totalReward = 0
self.channelObservation = 2
# miners mine a block
self.lastBlock.mineBlock(self.mempool)
# if user submits transaction
else:
self.channelCost = self.cost_channels[action-1]
# in case, channel is idle
if((action-1) == self.idleChannel):
self.prob_late = 0
self.channelObservation = 1
# if channel is busy, transaction can be late of mining process
else:
self.prob_late = BlockchainNetworkingEnv.LATE_PROB
self.channelObservation = 0
# if the transaction comes late
if(np.random.rand() < self.prob_late):
# mining process occurs before user's transaction is added
# 4. Miners start mining process, transactions which are included in Block will be removed from mempool
self.lastBlock.mineBlock(self.mempool)
self.mempool.listTransactions.append(self.userTransaction)
self.transactionFee = self.userTransaction.transactionFee
else:
self.mempool.listTransactions.append(self.userTransaction)
# 4. Miners start mining process, transactions which are included in Block will be removed from mempool
self.lastBlock.mineBlock(self.mempool)
self.transactionFee = self.userTransaction.transactionFee
# 5. Attack process
self.hashRate = np.random.uniform(0, BlockchainNetworkingEnv.MAX_ATTACK)
self.doubleSpendSuccess = 2 * self.hashRate
if(np.random.rand() < self.doubleSpendSuccess):
self.attacked = True
# if user's transaction is successfully added inti the block -> reward=2
if (self.userTransaction in self.lastBlock.blockTransaction and not self.attacked):
self.successReward = BlockchainNetworkingEnv.SUCCESS_REWARD
self.totalReward = self.successReward - self.channelCost - self.transactionFee
self.cost = self.channelCost + self.transactionFee
# 6. determine new state
self.mempool.updateMempoolState()
for index in range(0, Mempool.NB_FEE_INTERVALS):
state[index] = self.mempool.mempoolState[index]
state.insert(Mempool.NB_FEE_INTERVALS, action)
state.insert(Mempool.NB_FEE_INTERVALS+1, self.channelObservation)
state.pop()
state.pop()
self.state = tuple(state)
done = False
# print(np.array(self.state), [self.totalReward, self.cost], done, {})
return np.array(self.state), [self.totalReward, self.channelCost, self.transactionFee, self.cost], done, {}
def reset(self):
self.state = []
self.mempool.resetMempool()
self.idleChannel = 1
for index in range(0, len(self.mempool.mempoolState)):
self.state.append(self.mempool.mempoolState[index])
for obs_index in range(0, self.nb_past_observations):
self.state.append(0)
self.state.append(2)
print(self.state)
self.steps_beyond_done = None
return np.array(self.state)
def updateObservation(self):
return
def render(self, mode='human', close=False):
return
def close(self):
"""Override in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
raise NotImplementedError()
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
# Returns
Returns the list of seeds used in this env's random number generators
"""
raise NotImplementedError()
def configure(self, *args, **kwargs):
"""Provides runtime configuration to the environment.
This configuration should consist of data that tells your
environment how to run (such as an address of a remote server,
or path to your ImageNet data). It should not affect the
semantics of the environment.
"""
raise NotImplementedError()
# env = BlockchainNetworkingEnv()
# env.reset()
# for index in range(0, 50):
# env.step(np.random.randint(0, env.nb_channels))
|
{"hexsha": "27d43766b4f7ebf477fcc21947fbbead64eabac7", "size": 7762, "ext": "py", "lang": "Python", "max_stars_repo_path": "blockchain_networking/blockchain_networking_env.py", "max_stars_repo_name": "TonnyTran/blockchain_networking_DRL", "max_stars_repo_head_hexsha": "3d3bbdfc0c12e2b770df1f6243578aa08b27d135", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-21T08:28:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T13:37:21.000Z", "max_issues_repo_path": "blockchain_networking/blockchain_networking_env.py", "max_issues_repo_name": "TonnyTran/blockchain_networking_DRL", "max_issues_repo_head_hexsha": "3d3bbdfc0c12e2b770df1f6243578aa08b27d135", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "blockchain_networking/blockchain_networking_env.py", "max_forks_repo_name": "TonnyTran/blockchain_networking_DRL", "max_forks_repo_head_hexsha": "3d3bbdfc0c12e2b770df1f6243578aa08b27d135", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-02T01:13:14.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-02T01:13:14.000Z", "avg_line_length": 40.6387434555, "max_line_length": 119, "alphanum_fraction": 0.6141458387, "include": true, "reason": "import numpy", "num_tokens": 1671}
|
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file tests/2d/slipdir/genspatialdb.py
##
## @brief Python script to generate spatial database with displacement
## boundary conditions.
import numpy
# ======================================================================
class GenerateDB(object):
"""
Python object to generate spatial database with displacement
boundary conditions.
"""
def __init__(self):
"""
Constructor.
"""
self.soln = None
self.filename = None
return
def run(self):
"""
Generate the database.
"""
# Domain
x = numpy.arange(-4000.0, 4000.1, 500.0)
y = numpy.arange(-4000.0, 4000.1, 500.0)
npts = x.shape[0]
xx = x * numpy.ones( (npts, 1), dtype=numpy.float64)
yy = y * numpy.ones( (npts, 1), dtype=numpy.float64)
xy = numpy.zeros( (npts**2, 2), dtype=numpy.float64)
xy[:,0] = numpy.ravel(xx)
xy[:,1] = numpy.ravel(numpy.transpose(yy))
disp = self.soln.displacement(xy)
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
data = {'points': xy,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "displacement-x",
'units': "m",
'data': numpy.ravel(disp[0,:,0])},
{'name': "displacement-y",
'units': "m",
'data': numpy.ravel(disp[0,:,1])}]}
from spatialdata.spatialdb.SimpleIOAscii import SimpleIOAscii
io = SimpleIOAscii()
io.inventory.filename = self.filename
io._configure()
io.write(data)
return
# ======================================================================
class GenDBFaultX(GenerateDB):
"""
Python object to generate spatial database with displacement
boundary conditions for the faultx test.
"""
def __init__(self):
"""
Constructor.
"""
from solution import SolnFaultX
self.soln = SolnFaultX()
self.filename = "faultx_disp.spatialdb"
return
# ======================================================================
class GenDBFaultY(GenerateDB):
"""
Python object to generate spatial database with displacement
boundary conditions for the faulty test.
"""
def __init__(self):
"""
Constructor.
"""
from solution import SolnFaultY
self.soln = SolnFaultY()
self.filename = "faulty_disp.spatialdb"
return
# ======================================================================
class GenDBFaultXYP(GenerateDB):
"""
Python object to generate spatial database with displacement
boundary conditions for the faultxyp test.
"""
def __init__(self):
"""
Constructor.
"""
from solution import SolnFaultXYP
self.soln = SolnFaultXYP()
self.filename = "faultxyp_disp.spatialdb"
return
# ======================================================================
class GenDBFaultXYN(GenerateDB):
"""
Python object to generate spatial database with displacement
boundary conditions for the faultxyn test.
"""
def __init__(self):
"""
Constructor.
"""
from solution import SolnFaultXYN
self.soln = SolnFaultXYN()
self.filename = "faultxyn_disp.spatialdb"
return
# End of file
|
{"hexsha": "a68d168c22f93a6f149a79f44b97d4a7fc9ef4a9", "size": 3802, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests_auto/2d/slipdir/genspatialdb.py", "max_stars_repo_name": "joegeisz/pylith", "max_stars_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-09T06:24:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T06:24:11.000Z", "max_issues_repo_path": "tests_auto/2d/slipdir/genspatialdb.py", "max_issues_repo_name": "joegeisz/pylith", "max_issues_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests_auto/2d/slipdir/genspatialdb.py", "max_forks_repo_name": "joegeisz/pylith", "max_forks_repo_head_hexsha": "f74060b7b19d7e90abf8597bbe9250c96593c0ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5167785235, "max_line_length": 72, "alphanum_fraction": 0.5444502893, "include": true, "reason": "import numpy", "num_tokens": 874}
|
import numpy as np
import pytest
from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings
@pytest.fixture
def vocab():
return SimpleVocab(['a', 'b', 'c'])
@pytest.fixture
def embeds(vocab):
array = np.eye(len(vocab))
return SimpleEmbeddings(array, vocab)
class TestSimpleVocab(object):
def test_save_load(self, vocab, tmpdir):
path = str(tmpdir.join('vocab.txt'))
vocab.save(path)
new_vocab = SimpleVocab.load(path)
assert vocab == new_vocab
|
{"hexsha": "a8d0300a0b6e0e47c7b71eb05adc79b68a341fe9", "size": 494, "ext": "py", "lang": "Python", "max_stars_repo_path": "third-party/gtd/gtd/ml/tests/test_vocab.py", "max_stars_repo_name": "timpowellgit/phrasenode", "max_stars_repo_head_hexsha": "a4dc105a69785f289a4e7998d078d6727686b94d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2018-02-21T15:53:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T21:25:49.000Z", "max_issues_repo_path": "third-party/gtd/gtd/ml/tests/test_vocab.py", "max_issues_repo_name": "timpowellgit/phrasenode", "max_issues_repo_head_hexsha": "a4dc105a69785f289a4e7998d078d6727686b94d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2018-03-09T19:04:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-06T13:54:40.000Z", "max_forks_repo_path": "third-party/gtd/gtd/ml/tests/test_vocab.py", "max_forks_repo_name": "timpowellgit/phrasenode", "max_forks_repo_head_hexsha": "a4dc105a69785f289a4e7998d078d6727686b94d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2018-03-09T19:42:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T15:39:51.000Z", "avg_line_length": 21.4782608696, "max_line_length": 54, "alphanum_fraction": 0.6801619433, "include": true, "reason": "import numpy", "num_tokens": 120}
|
# LNS.py
# Contact: Jacob Schreiber
# jmschr@cs.washington.edu
'''
This is an implementation of Local Network Similarity (LNS), a metric
introduced by Yuanfang Guan et al in their 2013 paper "Comparative
gene expression between two yeast species".
'''
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
def scale( data ):
'''
Scale a matrix in a columnwise manner.
'''
return ( data - data.mean( axis=0 ) ) / data.std( axis=0 )
class LNS( object ):
'''
This is a Local Similarity Network score calculator. It calculates a
standardized correlation coefficient for each pairwise interaction in
order to build a pairwise-interaction network. Data must be input as a
pandas dataframe where each column is a feature.
'''
def __init__( self ):
pass
def fit_score( self, null, alternate, node_names ):
'''
Take in a matrix of values, and compute the standardized correlation
between each of them in order to produce a standardized
'''
# Unpack
null = null.values
alternate = alternate.values
# Get the number of nodes, and initialize the edge matrix as zeros.
n, d = null.shape
null_edges = np.zeros((d,d))
alternate_edges = np.zeros((d,d))
for i in xrange( d ):
for j in xrange( i+1 ):
null_edges[i, j] = pearsonr( null[:,i], null[:,j] )[0]
null_edges[j, i] = null_edges[i, j]
alternate_edges[i, j] = pearsonr( alternate[:,i], alternate[:,j] )[0]
alternate_edges[j, i] = alternate_edges[i, j]
# Perform Fisher's Z Transform, which is just the arctan
null_edges = np.arctan( null_edges )
alternate_edges = np.arctan( alternate_edges )
# Normalize the edges so that they follow the normal distribution
#null_edges = scale( null_edges )
#alternate_edges = scale( alternate_edges )
# Calculate the score for each node
scores = np.zeros((d,2))
for i in xrange( d ):
r, p = pearsonr( null_edges[:,i], alternate_edges[:,i] )
scores[i,0], scores[i,1] = r, p
self._scores = pd.DataFrame( scores, columns=['r', 'p'] )
self._scores.index = node_names
return self._scores
|
{"hexsha": "a864cd81d99f00bb812c0a4d12c12903ed04be32", "size": 2092, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/LNS.py", "max_stars_repo_name": "jmschrei/discern", "max_stars_repo_head_hexsha": "50b6f03d070604479c160569cca9ef7f031ff38d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-18T09:53:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-18T09:53:58.000Z", "max_issues_repo_path": "analysis/LNS.py", "max_issues_repo_name": "jmschrei/discern", "max_issues_repo_head_hexsha": "50b6f03d070604479c160569cca9ef7f031ff38d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/LNS.py", "max_forks_repo_name": "jmschrei/discern", "max_forks_repo_head_hexsha": "50b6f03d070604479c160569cca9ef7f031ff38d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2702702703, "max_line_length": 73, "alphanum_fraction": 0.6926386233, "include": true, "reason": "import numpy,from scipy", "num_tokens": 580}
|
#!/usr/bin/env python
# encoding=utf-8
# named sting because: if you're a cell,
# every breath you take, every step you make,
# we'll be watching you
# © tim smith 2014, tim.smith@uci.edu
# released under the terms of the wtfpl http://wtfpl.net
from __future__ import division
import argparse
import codecs
from StringIO import StringIO
import sys
import time
import ggplot as gg
import pandas as pd
from numpy import sqrt, sum, array, arctan2
def read_mtrack2(filename):
"""Reads position data from MTrack2 ImageJ plugin results.
params:
filename (str): Path to the result file
returns:
df (pd.DataFrame): with Object, Frame, X, Y columns
"""
with open(filename) as f:
buf = f.readlines()
if '\n' in buf:
# there might be an extra table at the end that we don't care about
buf = buf[:buf.index('\n')]
buf = ''.join(buf)
iobuf = StringIO(buf)
# row 0 is headers and data starts on row 2 so skip row 1
df = pd.read_csv(iobuf, skiprows=[1])
# df.replace(to_replace=' ', value=float('NaN'), inplace=True)
df = df.convert_objects(convert_numeric=True)
# throw out flag columns
df = df[df.columns[[not i.startswith('Flag') for i in df.columns]]]
# fill NA's backwards and then forwards
df.bfill(inplace=True)
df.ffill(inplace=True)
# get X and Y columns
melted = pd.melt(df, id_vars='Frame')
melted['Axis'] = melted['variable'].apply(lambda x: x[0])
melted['Object'] = melted['variable'].apply(lambda x: int(x[1:]))
df = melted.groupby(['Object', 'Frame', 'Axis']).sum().unstack()['value']
df = df.reset_index()
return df
def read_manual_track(filename):
"""Reads position data from Manual Tracking ImageJ plugin results.
Assumes file was produced on a (modern, circa 2016) Mac.
(Why does Fiji default to mac-roman? Deep Java Mysteries.)
params:
filename (str): Path to the result file
returns:
df (pd.DataFrame): with Object, Frame, X, Y columns
"""
df = pd.read_csv(filename, sep='\t', encoding='mac-roman')
df.rename(columns={u'Track n°': 'Object',
u'Slice n°': 'Frame'},
inplace=True)
for col in ['Object', 'Frame']:
df[col] = df[col].astype(int)
return df
def read_mtrackj_mdf(filename):
"""Reads position data from MTrackJ ImageJ plugin results.
Assumes file was produced on a (modern, circa 2016) Mac.
(Why does Fiji default to mac-roman? Deep Java Mysteries.)
params:
filename (str): Path to the result file
returns:
df (pd.DataFrame): with Object, Frame, X, Y columns
"""
f = codecs.open(filename, 'r', 'mac-roman')
if not f.readline().startswith("MTrackJ"):
raise ValueError("File {} is not in MTrackJ MDF format.".format(filename))
this_track = None
x, y, obj, frame = [], [], [], []
for line in f:
if line.startswith('Track'):
this_track = int(float(line.split()[1]))
elif line.startswith('Point'):
tmp = line.split()
obj.append(this_track)
x.append(float(tmp[2]))
y.append(float(tmp[3]))
frame.append(int(float(tmp[5])))
return pd.DataFrame({'Object': obj, 'Frame': frame, 'X': x, 'Y': y})
def center(df):
"""Adds cX and cY columns, which are X and Y relative to the initial position
of the object. Probably modifies its argument in-place.
params:
df (pd.DataFrame): with X and Y columns
returns:
df (pd.DataFrame): input DataFrame augmented with cX, cY columns
"""
def center_transform(x):
x['cX'] = x['X'] - x['X'].iloc[0]
x['cY'] = x['Y'] - x['Y'].iloc[0]
return x
centered = df.groupby('Object').apply(center_transform)
centered['cY'] = -centered['cY']
return centered
def displacement_plot(centered, limits=None, style=None):
u"""Draws nice displacement plots using ggplot2.
params:
centered (pd.DataFrame): needs cX, cY, Object, Frame columns, probably
produced by calling center() above
limits (real): Sets the limits of the scales to a square window showing
±limits on each axis.
style (Iterable): Collection of strings. Recognized values are 'theme-bw'
(which uses theme_bw instead of theme_seaborn) and 'no-terminal-dot'
(which does not label the end of tracks which terminate early).
Returns:
g (gg.ggplot): Plot object
"""
style = {} if style is None else style
centered['Object'] = centered['Object'].map(str)
centered = centered.sort(['Frame', 'Object'])
g = (gg.ggplot(centered, gg.aes(x='cX', y='cY', color='Object')) +
gg.geom_path(size=0.3))
g += gg.theme_bw() # if 'theme-bw' in style else gg.theme_seaborn()
if limits:
g = g + gg.ylim(-limits, limits) + gg.xlim(-limits, limits)
if 'no-terminal-dot' not in style:
max_frame = centered['Frame'].max()
endframe = centered.groupby('Object')['Frame'].max()
endframe = endframe[endframe != max_frame].reset_index()
endframe = endframe.merge(centered, on=['Object', 'Frame'])
# we should check if endframe is empty before adding it:
# https://github.com/yhat/ggplot/issues/425
if not endframe.empty:
g += gg.geom_point(data=endframe, color='black', size=1)
return g
def segment_lengths(obj):
"""Augments its argument obj with a column SegmentLength containing the
Euclidean distance between the point at each row and the row following.
Params:
obj (pd.DataFrame): A data frame with X and Y columns describing a single
object (i.e. all values of Object should be identical within
this data frame!)
Returns:
obj (pd.DataFrame): Input augmented with SegmentLength column
"""
obj.loc[:, 'SegmentLength'] = 0
# use array() to prevent index alignment
obj['SegmentLength'].iloc[1:] = sqrt((obj['X'].iloc[1:] - array(obj['X'][:-1]))**2 +
(obj['Y'].iloc[1:] - array(obj['Y'][:-1]))**2)
return obj
def stats(df, length_scale=1, time_scale=1):
u"""Computes summary statistics for each track in an observation file.
Object is assumed to be unique.
Params:
df (pd.DataFrame): Must have columns Object, cX, cY; Object is assumed
to be unique
length_scale (real): Length scale of images in pixels per micron.
time_scale (real): Time scale of images in minutes per frame.
Returns:
stats (pd.DataFrame): One row for each object containing columns:
rms_displacement: Root-mean-square distance from origin
max_displacement: Furthest (not final!) distance from origin
path_length: Number of points sampled
velocity: Velocity, as path length divided by time observed, in
µm/hr
angle: final angle vs. the origin, in radians
"""
rms = lambda x: sqrt(sum(x**2))
df['Distance'] = sqrt(df['cX']**2 + df['cY']**2) / length_scale
df = df.groupby('Object').apply(segment_lengths)
df['SegmentLength'] /= length_scale
per_object = df.groupby('Object')
rms_dx = per_object['Distance'].aggregate(rms)
max_dx = per_object['Distance'].max()
path_length = per_object['SegmentLength'].sum()
n_points = per_object['SegmentLength'].aggregate(len)
last_frame = per_object['Frame'].max()
velocity = path_length/(last_frame * time_scale / 60.0)
def compute_angle(obj_df):
last_frame = obj_df['Frame'].argmax()
return arctan2(
obj_df.loc[last_frame, 'cY'],
obj_df.loc[last_frame, 'cX']
)
angle = per_object.apply(compute_angle)
return pd.DataFrame({'rms_displacement': rms_dx,
'max_displacement': max_dx,
'path_length': path_length,
'n_points': n_points,
'velocity': velocity,
'angle': angle,
})
def summary(df):
"""Yields a list of single summary metrics over all tracks in a results file.
Params:
df (pd.DataFrame): The output of stats() above. All rows are expected
to have an identical value of df['filename'].
Returns:
df (pd.DataFrame): A DataFrame with 1 row and several columns:
filename: The filename of the results file
median_rms_dx: Median (over all tracks) RMS displacement from origin
mean_rms_dx: Mean (over all tracks) RMS displacement from origin
n: Number of tracks in the results file
mean_path_length: Mean (over all tracks) path length
median_path_length: Median (over all tracks) path length
mean_max_dx: Mean (over all tracks) maximum displacement
median_max_dx: Median (over all tracks) maximum displacement
mean_velocity: Mean (over all tracks) of velocity along paths
sd_velocity: Standard deviation (over all tracks) of velocities
"""
return pd.DataFrame({'filename': [df['filename'].iloc[0]],
'median_rms_dx': [df['rms_displacement'].median()],
'mean_rms_dx': [df['rms_displacement'].mean()],
'n': [len(df)],
'mean_path_length': [df['path_length'].mean()],
'median_path_length': [df['path_length'].median()],
'mean_max_dx': [df['max_displacement'].mean()],
'median_max_dx': [df['max_displacement'].median()],
'mean_velocity': [df['velocity'].mean()],
'sd_velocity': [df['velocity'].std()]})
def main():
parser = argparse.ArgumentParser(description="Draws displacement plots.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--limits', type=int, help="Maximum extent of the axes")
parser.add_argument('--no-plots', action='store_true', help="Don't save plots")
parser.add_argument('--summary', help='Save summary stats by file')
parser.add_argument('--imagetype', '-i', default='png', help="Extension to use for plots")
parser.add_argument('--pixels-per-micron', '--pixels', '-p', default=1.51, type=float,
help="Pixels per µm (length scale of tracked images)")
parser.add_argument('--minutes-per-frame', '--minutes', '-m', default=10, type=float,
help="Minutes between each frame of the tracked images")
parser.add_argument('--plot-titles', type=argparse.FileType('r'),
help="CSV file with filename and title columns")
parser.add_argument('--style', action='append', default=[],
choices=['theme-bw', 'no-terminal-dot'],
help='Change style options for the plot.')
parser.add_argument('--tick-breaks', '--ticks', '-t', nargs=3, type=int,
metavar=('start', 'end', 'step'),
help="Beginning and end tick breaks on displacement plots")
parser.add_argument('--plot-text', type=int, default=8,
help='Plot text size (pt)')
parser.add_argument('--plot-height', type=float, default=1.81,
help='Plot height (in)')
parser.add_argument('--plot-width', type=float, default=2.5,
help='Plot width (in)')
parser.add_argument('infile', nargs='+', help="File(s) to process.")
args = parser.parse_args()
style = {argument: True for argument in args.style}
plot_titles = pd.read_csv(args.plot_titles, index_col="filename") if args.plot_titles else None
all_dfs = []
for filename in args.infile:
# there has to be a better pattern for this
try:
df = read_mtrackj_mdf(filename)
except ValueError:
try:
df = read_mtrack2(filename)
except Exception:
df = read_manual_track(filename)
centered = center(df)
centered.to_csv(filename + '.centered')
if not args.no_plots:
g = displacement_plot(centered, limits=args.limits, style=style)
g += gg.theme(axis_text=gg.element_text(size=args.plot_text))
g += gg.labs(x='px', y='px')
if args.tick_breaks:
g += gg.scale_x_continuous(breaks=range(*args.tick_breaks))
g += gg.scale_y_continuous(breaks=range(*args.tick_breaks))
if plot_titles is not None and filename in plot_titles.index:
g += gg.labs(title=plot_titles.ix[filename, 'title'])
g.save('{}.{}'.format(filename, args.imagetype),
width=args.plot_width, height=args.plot_height)
centered['filename'] = filename
all_dfs.append(centered)
mega_df = pd.concat(all_dfs, ignore_index=True)
stats_for = lambda x: stats(x, length_scale=args.pixels_per_micron,
time_scale=args.minutes_per_frame)
obj_stats = (mega_df.groupby('filename', sort=False)
.apply(stats_for)
.reset_index())
summary_by_file = obj_stats.groupby('filename').apply(summary)
if args.summary:
summary_by_file.to_csv(args.summary, index=False)
print("# Produced by {} at {}".format(' '.join(sys.argv), time.ctime()))
print("# {} pixels per micron, {} minutes per frame".
format(args.pixels_per_micron, args.minutes_per_frame))
print("# distance units are microns; velocity units are microns/hour")
obj_stats.to_csv(sys.stdout, index=False)
if __name__ == '__main__':
main()
|
{"hexsha": "dbde60d919cf61305f72b77c5cf6dcd8521bd17f", "size": 13891, "ext": "py", "lang": "Python", "max_stars_repo_path": "sting/sting.py", "max_stars_repo_name": "tdsmith/migrationscripts", "max_stars_repo_head_hexsha": "42cc44f5b1a8058dd92f986152d908585f620498", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sting/sting.py", "max_issues_repo_name": "tdsmith/migrationscripts", "max_issues_repo_head_hexsha": "42cc44f5b1a8058dd92f986152d908585f620498", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sting/sting.py", "max_forks_repo_name": "tdsmith/migrationscripts", "max_forks_repo_head_hexsha": "42cc44f5b1a8058dd92f986152d908585f620498", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7147147147, "max_line_length": 99, "alphanum_fraction": 0.6076596357, "include": true, "reason": "from numpy", "num_tokens": 3229}
|
! { dg-do compile }
! Program to test ENUM parsing errors
program main
implicit none
integer :: i = 1
enum, bind (c)
enumerator :: red, black = i ! { dg-error "is a variable" }
enumerator :: blue = 1
end enum junk ! { dg-error "Syntax error" }
blue = 10 ! { dg-error " assign to a named constant" }
end program main ! { dg-excess-errors "" }
|
{"hexsha": "b27aaf289c04517d02958363f2e651a04fd16035", "size": 371, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.dg/enum_5.f90", "max_stars_repo_name": "vidkidz/crossbridge", "max_stars_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-04-09T02:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2016-04-09T02:58:13.000Z", "max_issues_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.dg/enum_5.f90", "max_issues_repo_name": "vidkidz/crossbridge", "max_issues_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "llvm-gcc-4.2-2.9/gcc/testsuite/gfortran.dg/enum_5.f90", "max_forks_repo_name": "vidkidz/crossbridge", "max_forks_repo_head_hexsha": "ba0bf94aee0ce6cf7eb5be882382e52bc57ba396", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1875, "max_line_length": 64, "alphanum_fraction": 0.6091644205, "num_tokens": 107}
|
From Coq Require Import List Bool FunInd.
From CoLoR Require Import weaved_relation closure term_spec terminaison.
From CoLoR Require equational_theory_spec closure_extension.
Module Make (Eqt:equational_theory_spec.EqTh).
Lemma one_step_list_same_length :
forall R l1 l2, one_step_list (Eqt.one_step R) l2 l1 -> length l1 = length l2.
Proof.
intros R l1 l2 H; apply sym_eq; apply one_step_list_length_eq with (Eqt.one_step R); assumption.
Qed.
Lemma rwr_as_star : forall R t t', Eqt.rwr R t t' -> star _ (Eqt.one_step R) t t'.
Proof.
induction 1.
apply star_R. assumption.
apply star_trans with y;auto.
right with x; [left | assumption].
Qed.
Lemma one_step_list_star_decompose_cons :
forall R x l l'', refl_trans_clos (one_step_list (Eqt.one_step R)) (x::l) l'' ->
exists x', exists l',
l'' = x'::l' /\
refl_trans_clos (Eqt.one_step R) x x'/\
refl_trans_clos (one_step_list (Eqt.one_step R)) l l'.
Proof.
intros R x l l''.
set (l1 := x::l) in *.
generalize (eq_refl l1); unfold l1 at 1;clearbody l1.
intros H H0 .
revert x l H.
induction H0.
rename x into l0.
intros x l He;subst.
exists x;exists l;repeat split;constructor.
rename x into l';rename y into l.
induction H.
inversion H;clear H;subst.
intros a b Heq;injection Heq;clear Heq;intros;subst.
exists t2;exists l.
repeat (assumption||constructor).
intros a b Heq;injection Heq;clear Heq;intros;subst.
exists t;exists l2.
repeat (assumption||constructor).
inversion H;clear H;subst; intros; injection H;clear H;intros;subst.
destruct (IHtrans_clos _ _ (eq_refl _)) as [u [l'' [h1 [h2 h3]]]];clear IHtrans_clos.
subst.
exists u;exists l''.
split. apply eq_refl.
split.
apply refl_trans_clos_is_trans with t2; repeat (assumption||constructor).
assumption.
destruct (IHtrans_clos _ _ (eq_refl _)) as [u [l'' [h1 [h2 h3]]]];clear IHtrans_clos.
subst.
exists u;exists l''.
split. apply eq_refl.
split.
assumption.
apply refl_trans_clos_is_trans with l2; repeat (assumption||constructor).
Qed.
Lemma one_step_list_star_decompose_nil :
forall R l'', refl_trans_clos (one_step_list (Eqt.one_step R)) nil l'' -> l'' = nil.
Proof.
intros R l'' H.
set (l:= @nil Eqt.T.term) in *.
generalize (eq_refl l).
unfold l at 1;clearbody l.
induction H;intro;subst;auto.
inversion H;clear H;subst; inversion H0.
Qed.
Lemma one_step_list_star_l : forall R l x y,
refl_trans_clos (Eqt.one_step R) x y ->
refl_trans_clos (one_step_list (Eqt.one_step R)) (x::l) (y::l).
Proof.
induction 1.
constructor.
constructor.
induction H.
repeat (assumption || constructor).
constructor 2 with (y::l).
repeat (assumption || constructor).
assumption.
Qed.
Lemma one_step_list_star_r : forall R l l' x,
refl_trans_clos (one_step_list (Eqt.one_step R)) l l' ->
refl_trans_clos (one_step_list (Eqt.one_step R)) (x::l) (x::l').
Proof.
induction 1.
constructor.
induction H.
repeat (exact H || constructor).
apply refl_trans_clos_is_trans with (x::y).
constructor.
constructor.
repeat (exact H || constructor).
assumption.
Qed.
Lemma one_step_list_refl_trans_clos : forall R l l' x y,
refl_trans_clos (Eqt.one_step R) x y ->
refl_trans_clos (one_step_list (Eqt.one_step R)) l l' ->
refl_trans_clos (one_step_list (Eqt.one_step R)) (x::l) (y::l').
Proof.
intros R l l' x y H H0.
apply refl_trans_clos_is_trans with (x::l').
apply one_step_list_star_r. assumption.
apply one_step_list_star_l. assumption.
Qed.
Import Eqt.
Import T.
Import Relation_Definitions.
Lemma one_step_ind2
: forall (R : relation term)
(P : forall t t0 : term, one_step R t t0 -> Prop)
(P0 : forall l l0 : list term, one_step_list (one_step R) l l0 -> Prop),
(forall (t1 t2 : term) (a : axiom R t1 t2), P t1 t2 (at_top R t1 t2 a)) ->
(forall (f0 : symbol) (l1 l2 : list term) (o : one_step_list (one_step R) l1 l2),
P0 l1 l2 o -> P (Term f0 l1) (Term f0 l2) (in_context R f0 l1 l2 o)) ->
(forall (t1 t2 : term) (l : list term) (o : one_step R t1 t2),
P t1 t2 o -> P0 (t1 :: l) (t2 :: l) (head_step (one_step R) t1 t2 l o)) ->
(forall (t : term) (l1 l2 : list term) (o : one_step_list (one_step R) l1 l2),
P0 l1 l2 o -> P0 (t :: l1) (t :: l2) (tail_step t o)) ->
forall (t t0 : term) (o : Eqt.one_step R t t0), P t t0 o
.
Proof.
intros R P P0 H H0 H1 H2.
fix one_step_ind2 3.
intros t t0 [t1 t2 a|f l1 l2 o].
apply H.
apply H0.
revert l1 l2 o.
fix one_step_list_ind2 3 .
intros l1 l2 [t1 t2 l a|t1 l1' l2' a].
apply H1.
apply one_step_ind2.
apply H2.
apply one_step_list_ind2.
Qed.
Lemma star_list :
forall R f l l', refl_trans_clos (one_step_list (one_step R)) l' l ->
refl_trans_clos (one_step R) (Term f l') (Term f l).
Proof.
intros R f l l' H.
induction H.
constructor.
constructor.
induction H.
constructor.
constructor 2;assumption.
constructor 2 with (Eqt.T.Term f y).
constructor 2;assumption.
assumption.
Qed.
Import closure_extension.
Lemma star_cons : forall R t l t' l',
refl_trans_clos (one_step R) t' t ->
refl_trans_clos (one_step_list (one_step R)) l' l ->
refl_trans_clos (one_step_list (one_step R)) (t'::l') (t::l).
Proof.
intros R t l t' l' H H0.
apply refl_trans_clos_is_trans with (t::l').
clear H0.
induction H.
constructor.
induction H.
apply refl_trans_clos_with_R.
constructor;assumption.
apply refl_trans_clos_is_trans with (y::l').
apply refl_trans_clos_with_R.
constructor;assumption.
assumption.
clear H;induction H0.
constructor.
induction H.
apply refl_trans_clos_with_R.
constructor;assumption.
apply refl_trans_clos_is_trans with (t::y).
apply refl_trans_clos_with_R.
constructor;assumption.
assumption.
Qed.
Lemma cons_star : forall R t l t' l',
refl_trans_clos (one_step_list (one_step R)) (t'::l') (t::l) ->
refl_trans_clos (one_step R) t' t/\
refl_trans_clos (one_step_list (one_step R)) l' l.
Proof.
intros R t l t' l' H.
set (l1:= t'::l') in *; generalize (eq_refl l1);unfold l1 at 1;clearbody l1.
set (l2:= t::l) in *; generalize (eq_refl l2);unfold l2 at 1;clearbody l2.
revert t l t' l'.
induction H.
intros;do 2 subst.
injection H0;clear H0;intros;subst.
split;constructor.
induction H.
induction H.
intros t l0 t' l' H0 H1.
injection H0;injection H1;clear H0 H1.
intros;repeat subst.
split.
constructor 2;constructor;assumption.
constructor.
intros tk l0 t' l' H0 H1.
injection H0;injection H1;clear H0 H1.
intros;repeat subst.
split.
constructor.
constructor 2;constructor;assumption.
intros t l t' l' H1 H2.
subst.
inversion H;subst.
destruct (IHtrans_clos _ _ _ _ (eq_refl _) (eq_refl _)) as [h1 h2].
clear IHtrans_clos H H0.
split.
inversion h1;subst;clear h1.
constructor. constructor;assumption.
constructor. constructor 2 with t2; assumption.
assumption.
destruct (IHtrans_clos _ _ _ _ (eq_refl _) (eq_refl _)) as [h1 h2].
clear IHtrans_clos H H0.
split.
assumption.
inversion h2;subst;clear h2.
constructor. constructor;assumption.
constructor. constructor 2 with l2; assumption.
Qed.
Function inb (A:Type) (eq_bool: A -> A -> bool) (f:A) (l:list A) {struct l} : bool :=
match l with
| nil => false
| g::l => orb (eq_bool g f) (inb A eq_bool f l)
end.
Lemma inb_equiv :
forall (A:Type) aeq_bool f l, (forall f g:A, f=g <-> aeq_bool f g = true) ->
(In f l <-> inb _ aeq_bool f l=true).
Proof.
intros A aeq_bool f l H.
functional induction (inb _ aeq_bool f l).
simpl;intuition.
simpl.
rewrite H.
case (aeq_bool g f);simpl. clear;intuition.
rewrite (IHb). intuition.
Qed.
Section is_def.
Variable defined_list : list symbol.
Variables rules : relation term.
Variables rule_list : list (term*term).
Hypothesis rules_equiv : forall l r : term, rules r l <-> In (l, r) rule_list.
Hypothesis defined_list_equiv :
forall f : symbol,
In f (defined_list) <-> defined rules f.
Definition is_def f :=
inb _ F.Symb.eq_bool f defined_list.
Lemma is_def_equiv :
forall f : symbol,
is_def f = true <-> defined rules f.
Proof.
intros f.
unfold is_def.
rewrite <- defined_list_equiv.
rewrite (inb_equiv _ F.Symb.eq_bool) .
reflexivity.
clear;intros f g.
generalize (F.Symb.eq_bool_ok f g).
case (F.Symb.eq_bool f g);[tauto|intuition discriminate].
Qed.
End is_def.
End Make.
|
{"author": "fblanqui", "repo": "color", "sha": "f2ef98f7d13c5d71dd2a614ed2e6721703a34532", "save_path": "github-repos/coq/fblanqui-color", "path": "github-repos/coq/fblanqui-color/color-f2ef98f7d13c5d71dd2a614ed2e6721703a34532/Coccinelle/examples/cime_trace/equational_extension.v"}
|
from sklearn import svm
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import scipy.io as sio # 用于加载mat文件
import pickle
from sklearn.model_selection import GridSearchCV
# 划分训练集和测试集
data_mat = sio.loadmat(r'svm_data.mat') #评估模型时用
label_mat = sio.loadmat(r'svm_label.mat')
# train_data, test_data, train_label, test_label = train_test_split(data_mat['svm_data'], label_mat['svm_label'],
# random_state=1, test_size=0.2) #评估模型时用,最终预测模型时注释掉
# 用于最终预测时加载训练和测试数据,评估模型时注释掉
train_datamat = sio.loadmat('train_data.mat') # 最终预测模型时用
train_labelmat = sio.loadmat('train_label.mat')
test_datamat = sio.loadmat('test_data.mat')
train_data = train_datamat['train_data']
train_label = train_labelmat['train_label']
test_data = test_datamat['test_data']
# 训练svm分类器,根据分类预测模型2,3,5按照论文中的参数进行修改
classifier = svm.SVC(C=40, kernel='rbf', gamma=14, decision_function_shape='ovr')
classifier.fit(train_data, train_label.ravel())
## 模型调参时用
# param_test1 = {'kernel': ['rbf', 'linear', 'sigmoid', 'poly']}
# param_test1 = {'C': range(1, 100, 1)}
# param_test1 = {'gamma': range(1, 100, 1)}
# gsearch1 = GridSearchCV(estimator=classifier, param_grid=param_test1)
# gsearch1.fit(train_data, train_label.ravel())
# print(f'{gsearch1.best_params_}+:{gsearch1.best_score_}')
# 计算分类器的分类准确率
train_fit = classifier.predict(train_data)
test_fit = classifier.predict(test_data)
print(test_fit)
sio.savemat('model5_pridict.mat', {'test_fit': test_fit}) # 保存模型预测输出,对应修改模型名称为model[2,3,5]_pridict.mat
print("训练集:", accuracy_score(train_label, train_fit))
# print("测试集:", accuracy_score(test_label, test_fit)) #评估模型
model = pickle.dumps(classifier) #保存模型
with open('svm.model', 'wb+') as f:
f.write(model)
print("done")
# # 绘制混淆矩阵,评估模型时用
# confusion_matrix = confusion_matrix(test_label, test_fit)
# N_class = 2
# classes = []
# for i in range(1, N_class+1):
# classes.append(str(i))
# titles_options = [("Confusion matrix,without normalization", None),
# ("Normalized confusion matrix", 'true')]
#
# for title, normalize in titles_options:
# disp = plot_confusion_matrix(classifier, test_data, test_label,
# display_labels=classes,
# cmap=plt.cm.Blues,
# normalize=normalize)
# disp.ax_.set_title(title)
# plt.show()
|
{"hexsha": "e3b62061d858392a2ad7a2a3831a7641ab187225", "size": 2566, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/s3/svm/svm.py", "max_stars_repo_name": "little111cow/2021-D-", "max_stars_repo_head_hexsha": "db128ae39678581a28b974a6a91a3d8d7d119704", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-01T07:37:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T07:37:30.000Z", "max_issues_repo_path": "code/s3/svm/svm.py", "max_issues_repo_name": "little111cow/2021-D-mathematical_modeling", "max_issues_repo_head_hexsha": "db128ae39678581a28b974a6a91a3d8d7d119704", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/s3/svm/svm.py", "max_forks_repo_name": "little111cow/2021-D-mathematical_modeling", "max_forks_repo_head_hexsha": "db128ae39678581a28b974a6a91a3d8d7d119704", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1884057971, "max_line_length": 118, "alphanum_fraction": 0.7034294622, "include": true, "reason": "import scipy", "num_tokens": 792}
|
import numpy as np
from scipy.stats import f
from statsmodels.stats.stattools import durbin_watson
class AR_model(object):
def __init__(self, X, Y, Q_L=1.6, Q_U=2.3, upper=True, basis_type='residual'):
self.Y = Y
self.X = X
self.n = X.shape[0]
self.p = X.shape[1]
self.Q_L = Q_L
self.Q_U = Q_U
self.upper = upper
self.hat = X @ np.linalg.inv(X.T @ X) @ X.T
self.basis_type = basis_type
def cov_mat(self, rho, n=None, sigma=1):
if n is None:
n = self.n
C = np.tile(np.arange(1, self.n + 1), (n, 1))
C_cov = np.power(rho, abs(C - C.T)) / (1 - rho ** 2) * sigma**2
return C_cov
def basis(self, res):
tmp = np.cov(res[:-1], res[1:])
Z_b = np.array([np.mean(res[:-1]), np.mean(res[1:]), tmp[0, 0], tmp[1, 1], tmp[0, 1]])
return Z_b
def basis_linear(self, X_b, Y_b):
XY = X_b.T @ Y_b / len(Y_b)
XX = np.mean(X_b, 0)
return np.concatenate([XY, XX])
def test_statistic(self, resids):
rho_hat = (np.mean(resids[1:] * resids[:-1]) - np.mean(resids[1:]) * np.mean(resids[:-1])) / \
(np.mean(resids[:-1] ** 2) - np.mean(resids[:-1]) ** 2)
return rho_hat, durbin_watson(resids)
def gen_train_data(self, ntrain, n_b, beta_hat, rho_hat):
C_cov = self.cov_mat(rho_hat, n_b)
C_inv = np.linalg.inv(C_cov)
Z_train = []
W_train = np.zeros(ntrain)
theta_hat_train = []
X = np.copy(self.X)
for i in range(ntrain):
e_b = np.random.multivariate_normal(np.zeros(n_b), C_cov)
Y_b = X @ beta_hat + e_b
res = Y_b - self.hat @ Y_b
if self.basis_type == 'residual':
Z_b = self.basis(res) # residual
theta_hat, dw = self.test_statistic(res)
else:
Z_b = self.basis_linear(X, Y_b)
theta_hat = np.linalg.inv(X.T @ C_inv @ X) @ X.T @ C_inv @ Y_b
dw = durbin_watson(res)
Z_train.append(Z_b)
if self.upper and dw >= self.Q_U:
W_train[i] = 1
elif not self.upper and dw <= self.Q_L:
W_train[i] = 1
theta_hat_train.append(theta_hat)
Z_train = np.array(Z_train)
theta_hat_train = np.array(theta_hat_train)
cov_Z_theta = (Z_train - np.mean(Z_train, 0)).T @ (theta_hat_train - np.mean(theta_hat_train)) / ntrain
if self.basis_type == 'residual':
var_theta = np.var(theta_hat_train)
Gamma = cov_Z_theta / var_theta
else:
var_theta = np.cov(theta_hat_train.T)
print(cov_Z_theta.shape, var_theta.shape)
Gamma = cov_Z_theta @ np.linalg.inv(var_theta)
return Z_train, W_train, Gamma, var_theta
|
{"hexsha": "88dc63b99f077e3a7af4543ba9f32ba1c386087b", "size": 2870, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/blackbox_selectinf/usecase/AR_model.py", "max_stars_repo_name": "liusf15/blackbox_selectinf", "max_stars_repo_head_hexsha": "874c073ca56c042cbaaed606bf52d6b36ccebd59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/blackbox_selectinf/usecase/AR_model.py", "max_issues_repo_name": "liusf15/blackbox_selectinf", "max_issues_repo_head_hexsha": "874c073ca56c042cbaaed606bf52d6b36ccebd59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/blackbox_selectinf/usecase/AR_model.py", "max_forks_repo_name": "liusf15/blackbox_selectinf", "max_forks_repo_head_hexsha": "874c073ca56c042cbaaed606bf52d6b36ccebd59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2727272727, "max_line_length": 111, "alphanum_fraction": 0.5404181185, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 828}
|
from data.fashionmnist import *
from examples.simple_cnn.model import *
from hyper.container import HyperContainer
from base_step_optimizer import *
from base_trainer import *
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import numpy as np
import torch.nn as nn
import argparse
import sys
import os
import random
import torch
import wandb
parser = argparse.ArgumentParser(description="SimpleCNN STN Experiment")
parser.add_argument("--experiment_name", type=str, default="simple_cnn_stn_experiment")
parser.add_argument("--delta_stn", action="store_true", default=False)
parser.add_argument("--linearize", action="store_true", default=False)
# Tuning options:
parser.add_argument("--scale", type=float, default=1.)
parser.add_argument("--tune_scales", action="store_true", default=False)
parser.add_argument("--tune_input_dropout", action="store_true", default=True)
parser.add_argument("--tune_dropout", action="store_true", default=True)
parser.add_argument("--initial_dropout_value", type=float, default=0.05)
parser.add_argument("--initial_dropout_scale", type=float, default=1.)
parser.add_argument("--tune_cutout", action="store_true", default=True)
parser.add_argument("--initial_cutout_num", type=int, default=1.)
parser.add_argument("--initial_cutout_length", type=int, default=4.)
parser.add_argument("--initial_cutout_scale", type=int, default=1.)
parser.add_argument("--percent_valid", type=float, default=0.15)
parser.add_argument("--train_batch_size", type=int, default=128)
parser.add_argument("--valid_batch_size", type=int, default=128)
parser.add_argument("--test_batch_size", type=int, default=128)
parser.add_argument("--total_epochs", type=int, default=200)
parser.add_argument("--warmup_epochs", type=int, default=5)
parser.add_argument("--train_lr", type=float, default=1e-2)
parser.add_argument("--valid_lr", type=float, default=1e-2)
parser.add_argument("--scale_lr", type=float, default=1e-2)
parser.add_argument("--train_steps", type=int, default=5)
parser.add_argument("--valid_steps", type=int, default=1)
parser.add_argument("--entropy_weight", type=float, default=1e-3)
parser.add_argument("--log_interval", type=int, default=50)
parser.add_argument("--no_cuda", action="store_true", default=False)
parser.add_argument("--save_dir", type=str, default=None)
parser.add_argument("--data_seed", type=int, default=0)
parser.add_argument("--model_seed", type=int, default=0)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.data_seed)
np.random.seed(args.data_seed)
random.seed(args.data_seed)
if args.cuda:
torch.cuda.manual_seed(args.data_seed)
torch.cuda.manual_seed_all(args.data_seed)
wandb.init(project=args.experiment_name,
tensorboard=True,
dir=os.getcwd() if args.save_dir is None else args.save_dir)
wandb.config.update(args)
info = vars(args)
# Load data.
train_loader, valid_loader, test_loader = stn_fashion_mnist_loader(info)
# Configure hyperparameters.
h_container = HyperContainer(device)
if info["tune_input_dropout"]:
h_container.register(name="dropout0",
value=info["initial_dropout_value"],
scale=info["scale"],
min_range=0., max_range=0.95,
discrete=False, same_perturb_mb=False)
if info["tune_dropout"]:
h_container.register("dropout1",
info["initial_dropout_value"],
info["scale"],
min_range=0., max_range=0.95,
discrete=False, same_perturb_mb=False)
h_container.register("dropout2",
info["initial_dropout_value"],
info["scale"],
min_range=0., max_range=0.95,
discrete=False, same_perturb_mb=False)
h_container.register("dropout_fc",
info["initial_dropout_value"],
info["scale"],
min_range=0., max_range=0.95,
discrete=False, same_perturb_mb=False)
if info["tune_cutout"]:
h_container.register("cutout_length",
info["initial_cutout_length"],
info["scale"],
min_range=2., max_range=24.,
discrete=True, same_perturb_mb=False)
h_container.register("cutout_holes",
info["initial_cutout_num"],
info["scale"],
min_range=0., max_range=5.,
discrete=True, same_perturb_mb=False)
num_hyper = h_container.get_size()
# Define models and optimizers.
torch.manual_seed(args.model_seed)
np.random.seed(args.model_seed)
random.seed(args.model_seed)
if args.cuda:
torch.cuda.manual_seed(args.model_seed)
torch.cuda.manual_seed_all(args.model_seed)
model = StnSimpleCNN(h_container=h_container, num_hyper=num_hyper)
model = model.to(device)
criterion = nn.CrossEntropyLoss(reduction="mean").to(device)
total_params = sum(param.numel() for param in model.parameters())
print("Args:", args)
print("Model total parameters:", total_params)
if info["delta_stn"]:
model_general_optimizer = torch.optim.SGD(model.get_general_parameters(),
lr=args.train_lr,
momentum=0.9)
model_response_optimizer = torch.optim.SGD(model.get_response_parameters(),
lr=args.train_lr,
momentum=0.9)
hyper_optimizer = torch.optim.RMSprop([h_container.h_tensor], lr=args.valid_lr)
scale_optimizer = torch.optim.RMSprop([h_container.h_scale], lr=args.scale_lr)
stn_step_optimizer = DeltaStnStepOptimizer(model, model_general_optimizer, model_response_optimizer,
hyper_optimizer, scale_optimizer, criterion, h_container,
info["tune_scales"], info["entropy_weight"], info["linearize"])
else:
model_optimizer = torch.optim.SGD(model.parameters(), lr=args.train_lr, momentum=0.9)
hyper_optimizer = torch.optim.RMSprop([h_container.h_tensor], lr=args.valid_lr)
scale_optimizer = torch.optim.RMSprop([h_container.h_scale], lr=args.scale_lr)
stn_step_optimizer = StnStepOptimizer(model, model_optimizer, hyper_optimizer, scale_optimizer, criterion,
h_container, info["tune_scales"], info["entropy_weight"])
# Evaluation functions.
def delta_stn_per_epoch_evaluate(current_epoch, train_loss=None):
def evaluate(loader):
model.eval()
correct = total = loss = 0.
with torch.no_grad():
for data in loader:
images, labels = data[0].to(device), data[1].to(device)
repeated_h_tensor = h_container.h_tensor.unsqueeze(0).repeat((images.shape[0], 1))
pred = model(images, repeated_h_tensor - repeated_h_tensor.detach(), repeated_h_tensor)
loss += F.cross_entropy(pred.float(), labels.long(), reduction="sum").item()
hard_pred = torch.max(pred, 1)[1]
total += labels.size(0)
correct += (hard_pred == labels).sum().item()
accuracy = correct / float(total)
mean_loss = loss / float(total)
return mean_loss, accuracy
train_loader.dataset.reset_hyper_params()
if train_loss is None:
train_loss, train_acc = evaluate(train_loader)
val_loss, val_acc = evaluate(valid_loader)
tst_loss, tst_acc = evaluate(test_loader)
print("=" * 80)
print("Train Epoch: {} | Trn Loss: {:.3f} | Val Loss: {:.3f} | Val Acc: {:.3f}"
" | Test Loss: {:.3f} | Test Acc: {:.3f}".format(current_epoch, train_loss, val_loss,
val_acc, tst_loss, tst_acc))
print("=" * 80)
epoch_dict = {"epoch": current_epoch,
"train_loss": train_loss,
"val_loss": val_loss,
"val_acc": val_acc,
"test_loss": tst_loss,
"test_acc": tst_acc,
"lr": model_general_optimizer.param_groups[0]["lr"]}
wandb.log(epoch_dict)
return val_loss
def stn_per_epoch_evaluate(current_epoch, train_loss=None):
def evaluate(loader):
model.eval()
correct = total = loss = 0.
with torch.no_grad():
for data in loader:
images, labels = data[0].to(device), data[1].to(device)
repeated_h_tensor = h_container.h_tensor.unsqueeze(0).repeat((images.shape[0], 1))
pred = model(images, repeated_h_tensor, repeated_h_tensor)
loss += F.cross_entropy(pred.float(), labels.long(), reduction="sum").item()
hard_pred = torch.max(pred, 1)[1]
total += labels.size(0)
correct += (hard_pred == labels).sum().item()
accuracy = correct / float(total)
mean_loss = loss / float(total)
return mean_loss, accuracy
train_loader.dataset.reset_hyper_params()
if train_loss is None:
train_loss, train_acc = evaluate(train_loader)
val_loss, val_acc = evaluate(valid_loader)
tst_loss, tst_acc = evaluate(test_loader)
print("=" * 80)
print("Train Epoch: {} | Trn Loss: {:.3f} | Val Loss: {:.3f} | Val Acc: {:.3f}"
" | Test Loss: {:.3f} | Test Acc: {:.3f}".format(current_epoch, train_loss, val_loss,
val_acc, tst_loss, tst_acc))
print("=" * 80)
epoch_dict = {"epoch": current_epoch,
"train_loss": train_loss,
"val_loss": val_loss,
"val_acc": val_acc,
"test_loss": tst_loss,
"test_acc": tst_acc,
"lr": model_optimizer.param_groups[0]["lr"]}
wandb.log(epoch_dict)
return val_loss
evaluate_fnc = delta_stn_per_epoch_evaluate if info["delta_stn"] else stn_per_epoch_evaluate
stn_trainer = StnTrainer(step_optimizer=stn_step_optimizer, train_loader=train_loader, valid_loader=valid_loader,
test_loader=test_loader, h_container=h_container, evaluate_fnc=evaluate_fnc,
device=device, lr_scheduler=None, warmup_epochs=info["warmup_epochs"],
total_epochs=info["total_epochs"], train_steps=info["train_steps"],
valid_steps=info["valid_steps"], log_interval=info["log_interval"],
patience=None)
try:
stn_trainer.train()
evaluate_fnc(info["total_epochs"])
sys.stdout.flush()
except KeyboardInterrupt:
print("=" * 80)
print("Exiting from training early ...")
sys.stdout.flush()
|
{"hexsha": "2dd150a032dee5c6b3f6424adbea62b758348371", "size": 11052, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/simple_cnn/train.py", "max_stars_repo_name": "pomonam/Self-Tuning-Networks", "max_stars_repo_head_hexsha": "3fa949bb1da5beb2b4e7f1d07a26b819b42ad7f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2020-10-27T03:00:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T10:47:15.000Z", "max_issues_repo_path": "examples/simple_cnn/train.py", "max_issues_repo_name": "pomonam/Self-Tuning-Networks", "max_issues_repo_head_hexsha": "3fa949bb1da5beb2b4e7f1d07a26b819b42ad7f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-22T11:17:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-14T13:22:38.000Z", "max_forks_repo_path": "examples/simple_cnn/train.py", "max_forks_repo_name": "pomonam/Self-Tuning-Networks", "max_forks_repo_head_hexsha": "3fa949bb1da5beb2b4e7f1d07a26b819b42ad7f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-10-27T07:05:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-16T01:33:01.000Z", "avg_line_length": 41.393258427, "max_line_length": 113, "alphanum_fraction": 0.6342743395, "include": true, "reason": "import numpy", "num_tokens": 2363}
|
"""
Notes:
- 1. For the conv layer we have H' = H since H' = H+2p-k+1 = H' for p=1, k=3. i.e. same as previous layer
- since stride=1 as default (so only moves by 1) since you want to see all the image for a conv.
- 2. For the avg pool layer we have H' = H/2 i.e. half of previous layer
- since stride=kernel_size as default (so you want to avg more significantly for pool layers e.g. for invariance)
- 3. H = W should the true for this model and data, unless you feed rectangular data for some reason.
For this model if H = 84, input layer H^(l)_{layer_type} = is the H at layer l for layer_type we have:
- H^(l)_{conv} = H/2**(l-1)
- H^(l)_{avg_pool} = H/2**(l)
since, for the conv layer the height don't change and the pooling layer halves for each spatial dimension.
"""
from __future__ import division, print_function, absolute_import
import pdb
import copy
from argparse import Namespace
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
from typing import Optional
from automl.core.operations import SPP
def helloworld(msg="hello"):
print(f'hello world with mgs: {msg}')
def get_defaul_args_for_5cnn() -> Namespace:
args: Namespace = Namespace()
args.image_size = 84
args.bn_eps = 1e-3
args.bn_momentum = 0.95
args.n_classes = 5
args.filter_size = 32
args.levels = None
args.spp = False
return args
def get_learner_from_args(args: Namespace) -> nn.Module:
return Learner(args.image_size, args.bn_eps, args.bn_momentum, args.n_classes)
def get_default_learner(image_size: int = 84,
bn_eps: float = 1e-3,
bn_momentum: float = 0.95,
n_classes: int = 5,
filter_size: int = 32,
levels: Optional = None,
spp: bool = False) -> nn.Module:
return Learner(image_size, bn_eps, bn_momentum, n_classes, filter_size, levels, spp)
def get_default_learner_from_default_args(args: Optional[Namespace] = None) -> nn.Module:
if args is None:
args = get_defaul_args_for_5cnn()
mdl = get_learner_from_args(args)
return mdl
def get_feature_extractor_pool_layers(L: int = 4) -> list[str]:
return [f'model.features.pool{i}' for i in range(1, L + 1)]
def get_feature_extractor_conv_layers(L: int = 4, include_cls: bool = False) -> list[str]:
"""
Note: if the cls is present then we need B >= s*D since the output for it has shape
[B, n_c] where n_c so we need, B >= 10*5 = 50 for example.
s being used for B = 13 is
s_cls = B/n_c = 13/5 = 2.6
s_cls = B/n_c = 26/5 = 5.2
"""
layers: list[str] = [f'model.features.conv{i}' for i in range(1, L + 1)]
if include_cls:
layers: list[str] = layers + ['model.cls']
return layers
def get_head_cls() -> list[str]:
return ['model.cls']
def get_all_layers_minus_cls(L: int = 4) -> list[str]:
layer_names: str = []
for l in range(1, L + 1):
layer_name1: str = f'model.features.conv{l}'
layer_name2: str = f'model.features.norm{l}'
layer_name3: str = f'model.features.relu{l}'
layer_name4: str = f'model.features.pool{l}'
layer_names.append(layer_name1)
layer_names.append(layer_name2)
layer_names.append(layer_name3)
layer_names.append(layer_name4)
return layer_names
def get_last_two_layers(layer_type: str = 'conv', include_cls: bool = True,
start_L: int = 4, end_L: int = 4
) -> list[str]:
assert layer_type in ['conv', 'norm', 'relu', 'pool']
layers: list[str] = [f'model.features.{layer_type}{i}' for i in range(start_L, end_L + 1)]
if include_cls:
layers: list[str] = layers + ['model.cls']
return layers
class Learner(nn.Module):
def __init__(self, image_size,
bn_eps: float,
bn_momentum: float,
n_classes: int,
filter_size: int = 32, # Meta-LSTM & MAML use 32 filters
levels: Optional = None,
spp: bool = False
):
"""[summary]
Args:
image_size ([type]): [description]
bn_eps ([type]): [description]
bn_momentum ([type]): [description]
n_classes ([type]): [description]
levels ([type], optional): [description]. Defaults to None.
spp (bool, optional): [description]. Defaults to False.
"""
super().__init__()
self.spp = spp
# - note: "model" is also a Module
self.model = nn.ModuleDict({'features': nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(in_channels=3, out_channels=filter_size, kernel_size=3, padding=1)),
('norm1', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu1', nn.ReLU(inplace=False)),
('pool1', nn.MaxPool2d(kernel_size=2)),
('conv2', nn.Conv2d(in_channels=filter_size, out_channels=filter_size, kernel_size=3, padding=1)),
('norm2', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu2', nn.ReLU(inplace=False)),
('pool2', nn.MaxPool2d(kernel_size=2)),
('conv3', nn.Conv2d(in_channels=filter_size, out_channels=filter_size, kernel_size=3, padding=1)),
('norm3', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu3', nn.ReLU(inplace=False)),
('pool3', nn.MaxPool2d(kernel_size=2)),
('conv4', nn.Conv2d(in_channels=filter_size, out_channels=filter_size, kernel_size=3, padding=1)),
('norm4', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu4', nn.ReLU(inplace=False)),
('pool4', nn.MaxPool2d(kernel_size=2))]))
})
if spp:
spp_ = SPP(filter_size, levels)
self.model.update({'spp': spp_})
self.model.update({'cls': nn.Linear(spp_.output_size, n_classes)})
else:
clr_in = image_size // 2 ** 4
self.model.update({'cls': nn.Linear(filter_size * clr_in * clr_in, n_classes)})
# self.criterion = nn.CrossEntropyLoss()
def forward(self, x):
out = self.model.features(x)
if self.spp:
out = self.model.spp(out)
else:
out = torch.reshape(out, [out.size(0), -1])
outputs = self.model.cls(out)
return outputs
def get_flat_params(self):
# return torch_uu.cat([p.view(-1) for p in self.model.parameters()], 0)
pass
def copy_flat_params(self, cI):
# idx = 0
# for p in self.model.parameters():
# plen = p.view(-1).size(0)
# p.data.copy_(cI[idx: idx+plen].view_as(p))
# idx += plen
pass
def transfer_params(self, learner_w_grad, cI):
# Use load_state_dict only to copy the running mean/var in batchnorm, the values of the parameters
# are going to be replaced by cI
# self.load_state_dict(learner_w_grad.state_dict())
# # replace nn.Parameters with tensors from cI (NOT nn.Parameters anymore).
# idx = 0
# for m in self.model.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear):
# wlen = m._parameters['weight'].view(-1).size(0)
# m._parameters['weight'] = cI[idx: idx+wlen].view_as(m._parameters['weight']).clone()
# idx += wlen
# if m._parameters['bias'] is not None:
# blen = m._parameters['bias'].view(-1).size(0)
# m._parameters['bias'] = cI[idx: idx+blen].view_as(m._parameters['bias']).clone()
# idx += blen
pass
def reset_batch_stats(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.reset_running_stats()
|
{"hexsha": "450c2061b4b62d73a82596d70bdbf5f196755e9f", "size": 7991, "ext": "py", "lang": "Python", "max_stars_repo_path": "ultimate-utils-proj-src/uutils/torch_uu/models/learner_from_opt_as_few_shot_paper.py", "max_stars_repo_name": "pestun/ultimate-utils", "max_stars_repo_head_hexsha": "676002e80422067256c43172a78825ed12954bcb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ultimate-utils-proj-src/uutils/torch_uu/models/learner_from_opt_as_few_shot_paper.py", "max_issues_repo_name": "pestun/ultimate-utils", "max_issues_repo_head_hexsha": "676002e80422067256c43172a78825ed12954bcb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ultimate-utils-proj-src/uutils/torch_uu/models/learner_from_opt_as_few_shot_paper.py", "max_forks_repo_name": "pestun/ultimate-utils", "max_forks_repo_head_hexsha": "676002e80422067256c43172a78825ed12954bcb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0523809524, "max_line_length": 117, "alphanum_fraction": 0.5952947065, "include": true, "reason": "import numpy", "num_tokens": 2090}
|
# Should be run from EchelleCCFs/deps or EchelleCCFs/examples or EchelleCCFs/test
# Since this May be called before EchelleCCFs is installed, so this doesn't use pkgdir.
include("download.jl")
download_url = "https://zenodo.org/record/3753254/files/res-1000-1years_full_id1.h5?download=1"
download_filename = joinpath("..","data","spectra","res-1000-1years_full_id1.h5")
download_md5 = "5659082144cd093d617bb54dca937ad9"
@warn "This is a large download. Be prepared to be patient."
download_and_check_md5sum(download_url, download_filename, md5_goal=download_md5)
|
{"hexsha": "1f3124c974aed22c6d98dc434cc774659850f290", "size": 568, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "deps/download_soap_example_from_gilbertson_etal_2020.jl", "max_stars_repo_name": "RvSpectML/EchelleCCFs", "max_stars_repo_head_hexsha": "a639217e24ebc59282d42e27d739d3aea1a032c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-28T02:51:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-28T02:51:29.000Z", "max_issues_repo_path": "deps/download_soap_example_from_gilbertson_etal_2020.jl", "max_issues_repo_name": "RvSpectML/EchelleCCFs", "max_issues_repo_head_hexsha": "a639217e24ebc59282d42e27d739d3aea1a032c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-10-13T20:26:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T21:01:10.000Z", "max_forks_repo_path": "deps/download_soap_example_from_gilbertson_etal_2020.jl", "max_forks_repo_name": "RvSpectML/EchelleCCFs", "max_forks_repo_head_hexsha": "a639217e24ebc59282d42e27d739d3aea1a032c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-15T07:35:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T14:32:03.000Z", "avg_line_length": 47.3333333333, "max_line_length": 95, "alphanum_fraction": 0.7992957746, "num_tokens": 171}
|
#include <a4/config.h>
#ifdef HAVE_ATOMIC
#include <atomic>
#endif
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <vector>
#include <stdexcept>
#include <boost/chrono.hpp>
namespace chrono = boost::chrono;
typedef chrono::duration<double> duration;
#include <boost/thread.hpp>
#include <google/protobuf/descriptor.h>
#include <a4/application.h>
#include <a4/input.h>
#include <a4/input_stream.h>
#include <a4/output.h>
#include <a4/output_stream.h>
#include <cpu_info.h>
using std::string;
using std::ifstream;
using a4::io::A4Input;
using a4::io::A4Output;
using a4::io::InputStream;
using a4::io::OutputStream;
typedef std::vector<string> FileList;
namespace a4 {
namespace process {
class ProcessStats {
public:
size_t events, bytes;
duration cputime;
ProcessStats() : events(0), bytes(0), cputime(0) {}
ProcessStats& operator+=(const ProcessStats& rhs) {
events += rhs.events;
bytes += rhs.bytes;
cputime += rhs.cputime;
return *this;
}
};
SimpleCommandLineDriver::SimpleCommandLineDriver(Configuration* cfg)
: _configuration(cfg), _compression_string("ZLIB 1")
{
assert(_configuration);
}
class BaseOutputAdaptor : public OutputAdaptor {
public:
BaseOutputAdaptor(Driver* d, Processor* p, bool forward_metadata,
A4Output* out, A4Output* res,
const a4::io::OutputStream::CompressionType compression_type,
const int compression_level)
: _output(out), _result(res), _forward_metadata(forward_metadata),
_in_block(false), _driver(d), _p(p), _last_postfix(""),
_compression_type(compression_type),
_compression_level(compression_level)
{
merge_key = split_key = "";
_outstream.reset();
_resstream.reset();
backstore.reset();
start_block(); // This writes no metadata
}
virtual ~BaseOutputAdaptor() {}
void start_block(std::string postfix="") {
if (_output and (!_outstream or postfix != _last_postfix)) {
_outstream = _output->get_stream(postfix);
_outstream->set_compression(_compression_type, _compression_level);
if (_forward_metadata)
_outstream->set_forward_metadata();
}
if (_result and (!_resstream or postfix != _last_postfix)) {
_resstream = _result->get_stream(postfix);
_resstream->set_compression(_compression_type, _compression_level);
_resstream->set_forward_metadata();
}
backstore.reset(new ObjectBackStore());
_driver->set_store(_p, backstore->store());
if (_outstream and _forward_metadata and current_metadata) {
current_metadata->unionize();
_outstream->metadata(*current_metadata->message());
}
_in_block = true;
}
void end_block() {
if (_resstream && current_metadata) {
current_metadata->unionize();
_resstream->metadata(*current_metadata->message());
}
if (_resstream && backstore) {
backstore->to_stream(*_resstream);
}
backstore.reset();
if (_outstream and !_forward_metadata and current_metadata) {
current_metadata->unionize();
_outstream->metadata(*current_metadata->message());
}
_in_block = false;
}
void new_outgoing_metadata(shared<const A4Message> new_metadata) {
// Check if we merge the old into the new metadata
// and hold off on writing it.
bool merge = false;
shared<A4Message> old_metadata = current_metadata;
// Determine if merging is necessary
if (old_metadata && merge_key != "") {
merge = old_metadata->check_key_mergable(*new_metadata, merge_key);
}
if (merge) {
//std::cerr << "Merging\n" << old_metadata.message()->ShortDebugString()
// << "\n...and...\n" << new_metadata.message()->ShortDebugString() << std::endl;
*current_metadata += *new_metadata;
//std::cerr << "...to...\n" << current_metadata->message()->ShortDebugString() << std::endl;
} else { // Normal action in case of new metadata
// If we are in charge of metadata, start a new block now...
end_block();
std::string postfix = "";
if (split_key != "")
postfix = new_metadata->assert_field_is_single_value(split_key);
current_metadata.reset(new A4Message(*new_metadata));
start_block(postfix);
} // end of normal action in case of new metadata
}
virtual void metadata(shared<const A4Message> m) {
FATAL("To write metadata manually, you have to change the metadata_behavior of the Processor!");
}
void write(shared<const A4Message> m) {
if (!_in_block)
FATAL("Whoa?? Writing outside of a metadata block? How did you do this?");
if (_outstream)
_outstream->write(m);
}
shared<A4Message> current_metadata;
std::string merge_key, split_key;
shared<ObjectBackStore> backstore;
protected:
A4Output* _output;
A4Output* _result;
bool _forward_metadata;
shared<OutputStream> _outstream, _resstream;
bool _in_block;
Driver* _driver;
Processor* _p;
std::string _last_postfix;
const a4::io::OutputStream::CompressionType _compression_type;
const int _compression_level;
};
class ManualOutputAdaptor : public BaseOutputAdaptor {
public:
ManualOutputAdaptor(Driver* d, Processor* p, bool forward_metadata,
A4Output* out, A4Output* res,
const a4::io::OutputStream::CompressionType compression_type,
const int compression_level)
: BaseOutputAdaptor(d, p, forward_metadata, out, res, compression_type, compression_level) {}
void metadata(shared<const A4Message> m) {
new_outgoing_metadata(m);
}
};
void SimpleCommandLineDriver::simple_thread(SimpleCommandLineDriver* self,
Processor* p, int limit, ProcessStats& stats, std::exception_ptr& error)
try {
// This is MY processor! (makes sure processor is deleted on function exit)
// The argument to this function should be a move into a UNIQUE...
UNIQUE<Processor> processor(p);
UNIQUE<BaseOutputAdaptor> output_adaptor;
bool metadata_forward;
bool auto_metadata = false;
switch(p->get_metadata_behavior()) {
case Processor::AUTO:
metadata_forward = (self->_metakey == ""); // forward if no merging
auto_metadata = true;
output_adaptor.reset(new BaseOutputAdaptor(self, p, metadata_forward,
self->_output.get(), self->_result.get(), self->_compression_type, self->_compression_level));
break;
case Processor::MANUAL_FORWARD:
if (self->_metakey != "") FATAL("This program is not compatible with metadata merging!"); // forward if no merging
// fall through to ...
case Processor::DROP:
metadata_forward = true;
output_adaptor.reset(new ManualOutputAdaptor(self, p, metadata_forward,
self->_output.get(), self->_result.get(), self->_compression_type, self->_compression_level));
break;
case Processor::MANUAL_BACKWARD:
metadata_forward = false;
output_adaptor.reset(new ManualOutputAdaptor(self, p, metadata_forward,
self->_output.get(), self->_result.get(), self->_compression_type, self->_compression_level));
break;
default:
FATAL("Unknown metadata behaviour specified: ", p->get_metadata_behavior());
}
output_adaptor->merge_key = self->_metakey;
output_adaptor->split_key = self->_split_metakey;
#ifdef BOOST_CHRONO_HAS_THREAD_CLOCK
boost::chrono::thread_clock::time_point start = boost::chrono::thread_clock::now();
#endif
self->set_output_adaptor(p, output_adaptor.get());
#ifdef HAVE_ATOMIC
static std::atomic<uint64_t> total_events_processed(0),
total_metadata_seen(0);
#endif
// Try as long as there are inputs
int cnt = 0;
bool run = true, should_close_stream = false;
while (shared<InputStream> instream = self->_input->get_stream()) {
if (!run)
break;
while (shared<A4Message> msg = instream->next_with_metadata()) {
if (!run)
break;
#ifdef HAVE_ATOMIC
const uint64_t n = total_events_processed++;
if (n % 10000 == 0) {
VERBOSE("Processed ", n, " events (seen ", total_metadata_seen, " metadata)");
}
#endif
if (instream->new_metadata()) { // Start of new metadata block
#ifdef HAVE_ATOMIC
total_metadata_seen++;
#endif
shared<const A4Message> c_new_metadata = instream->current_metadata();
// WARNING: "no metadata" events are subsumed into previous/next metadata here!
if (c_new_metadata) {
// Process end of old metadata block, if any.
if (p->metadata_present())
p->process_end_metadata();
// Process start of new incoming metadata block (this may modify new_metadata)
// In Manual Mode, this may also trigger a callback in the output_adaptor.
// Note that set_metadata is only called here, since the processor should only
// ever see incoming metadata (by contract).
self->set_metadata(p, c_new_metadata);
shared<A4Message> new_metadata = p->process_new_metadata();
if (auto_metadata){
if (new_metadata) {
output_adaptor->new_outgoing_metadata(new_metadata);
} else {
output_adaptor->new_outgoing_metadata(c_new_metadata);
}
} else if (new_metadata) {
FATAL("You must not modify metadata in process_new_metadata if auto_metadata is off!");
}
}
}
// Do not send metadata messages to process()
if (msg->metadata())
continue;
self->set_store(p, output_adaptor->backstore->store());
try {
process_rerun_systematics(p, msg);
} catch (...) {
ERROR("Caught an exception in the processor");
if (msg) {
try {
auto protomsg = msg->message();
ERROR(protomsg->GetDescriptor()->full_name(), ":");
ERROR(protomsg->DebugString());
} catch (...) {
ERROR("Could not show event");
}
} else {
ERROR("Processed message is invalid");
}
throw;
}
// Skip if the user wants us to.
if (p->skip_to_next_metadata) {
instream->skip_to_next_metadata();
p->skip_to_next_metadata = false;
}
// Check if we reached limit
if (++cnt == limit) {
run = false;
should_close_stream = true;
}
}
// We're about to get a new stream, record how many this one had
stats.bytes += instream->ByteCount();
if (should_close_stream)
instream->close();
if (instream->error()) {
ERROR("stream error in thread ", boost::this_thread::get_id());
return;
}
}
if (p->metadata_present())
p->process_end_metadata();
// Stream store to output
output_adaptor->end_block();
#ifdef BOOST_CHRONO_HAS_THREAD_CLOCK
stats.cputime = boost::chrono::thread_clock::now() - start;
#endif
stats.events = cnt;
} catch (...) {
error = std::current_exception();
}
Processor* SimpleCommandLineDriver::new_initialized_processor() {
Processor* p = _configuration->new_processor();
_configuration->setup_processor(*p);
return p;
}
int SimpleCommandLineDriver::main(int argc, const char* argv[])
try
{
// Verify that the version of the library that we linked against is
// compatible with the version of the headers we compiled against
//GOOGLE_PROTOBUF_VERIFY_VERSION;
chrono::steady_clock::time_point start = chrono::steady_clock::now();
int n_threads, number;
int hw_threads = get_cpuinfo().physical_cores;
bool no_gdb = false;
FileList inputs;
po::options_description commandline_options;
po::options_description config_file_options;
string config_filename = string(argv[0]) + ".ini";
string output = "", results = "";
_metakey = ""; _split_metakey = "";
bool verbose, quiet, debug;
// Define all
po::options_description gopt("General options");
gopt.add_options()
("help", "print this help message")
("verbose,v", po::bool_switch(&verbose), "verbose output")
("debug,d", po::bool_switch(&debug), "debug output")
("quiet,q", po::bool_switch(&quiet), "quiet output")
("config,c", po::value<string>(), (string("configuration file [default is '") + config_filename + "']").c_str())
("disable-gdb", po::bool_switch(&no_gdb), "disable internal segfault handling");
po::options_description popt("Processing options");
popt.add_options()
("input,i", po::value<FileList>(&inputs), "input file(s)")
("output,o", po::value<string>(&output), "output file")
("results,r", po::value<string>(&results), "result file")
("number,n", po::value<int>(&number)->default_value(-1), "maximum number of events to process (default: all)")
("per,p", po::value<string>(&_metakey), "granularity of output by metadata key (e.g. period, run, lumiblock...). Default is input granularity.")
("split-per,s", po::value<string>(&_split_metakey), "granularity of output by metadata key (e.g. period, run, lumiblock...). Default is input granularity.")
("compression", po::value(&_compression_string)->default_value("ZLIB 1"), "compression level '[TYPE] [LEVEL]'");
po::positional_options_description positional_options;
positional_options.add("input", -1);
po::options_description cfgopt("Configuration: (section [config] in configuration file)");
cfgopt.add_options()
("config.threads,t", po::value<int>(&n_threads)->default_value(hw_threads), "run N multi-threads [# of cores]");
po::options_description useropt;
_configuration->add_options(useropt.add_options());
commandline_options.add(gopt);
commandline_options.add(popt);
commandline_options.add(cfgopt);
commandline_options.add(useropt);
config_file_options.add(cfgopt);
config_file_options.add(useropt);
// Parse command line first
std::vector<string> _argvs;
for (int i = 1; i < argc; i++)
_argvs.push_back(string(argv[i]));
po::variables_map arguments;
po::store(po::command_line_parser(_argvs)
.options(commandline_options)
.positional(positional_options).run(), arguments);
if (2 > argc || arguments.count("help") || !arguments.count("input"))
{
std::cout << "Usage: " << argv[0] << " [Options] input file(s)" << std::endl;
std::cout << commandline_options << std::endl;
return 1;
}
// Parse config file
bool explicit_config_file = false;
if (arguments.count("config")) {
config_filename = arguments["config"].as<string>();
explicit_config_file = true;
}
std::ifstream config_file(config_filename.c_str());
if (!config_file && explicit_config_file) {
throw std::runtime_error("Configuration file '" + config_filename + "' not found!");
} else if (config_file && !explicit_config_file) {
WARNING("Using implicit config file '", config_filename, "'. Override this with -c 'other_configfile.ini'.");
}
po::store(po::parse_config_file(config_file, config_file_options), arguments);
// After finishing all option reading, notify the result
po::notify(arguments);
_configuration->read_arguments(arguments);
a4::io::set_log_level(debug ? 5 : verbose ? 4 : quiet ? 2 : 3);
std::stringstream ss(_compression_string);
std::string ctype;
ss >> ctype >> _compression_level;
_compression_type = a4::io::OutputStream::compression_type(ctype);
if (not no_gdb) {
a4::Fatal::enable_throw_on_segfault();
}
if (number != -1) n_threads = 1;
// DEBUG
//foreach (string& i, inputs) { cout << "inputs += " << i << endl; }
//cout << "output = " << output << endl;
//cout << "results = " << results << endl;
//cout << "config_filename = " << config_filename << endl;
//cout << "n_threads = " << n_threads << endl;
// Set up I/O
_input.reset(new A4Input("A4 Input Files"));
foreach(string& i, inputs)
_input->add_file(i);
if (output.size())
_output.reset(new A4Output(output, "A4 Output File"));
shared<A4Output> a4results;
if (results.size()) {
if (results == output) {
_result = _output;
} else {
_result.reset(new A4Output(results, "A4 Results File"));
}
}
std::vector<ProcessStats> stats(n_threads);
std::vector<std::exception_ptr> errors(n_threads);
std::vector<bool> done(n_threads);
if (n_threads > 1) {
std::vector<boost::thread> threads;
for (int i = 0; i < n_threads; i++) {
done[i] = false;
errors[i] = std::exception_ptr();
Processor* p = new_initialized_processor();
threads.push_back(std::move(boost::thread(std::bind(&simple_thread, this, p, -1, boost::ref(stats[i]), boost::ref(errors[i])))));
};
bool all_done;
do {
all_done = true;
for (int i = 0; i < n_threads; i++) {
if (!done[i]) {
if (threads[i].timed_join(boost::posix_time::millisec(100))) {
done[i] = true;
if (errors[i] != std::exception_ptr()) std::rethrow_exception(errors[i]);
} else {
all_done = false;
}
}
}
} while (not all_done);
} else {
Processor* p = new_initialized_processor();
std::exception_ptr error;
simple_thread(this, p, number, stats[0], error);
if (error != std::exception_ptr()) std::rethrow_exception(error);
}
ProcessStats total;
foreach(const ProcessStats& s, stats)
total += s;
chrono::duration<double> walltime = chrono::steady_clock::now() - start;
VERBOSE("A4 processed ", total.events, " objects in ", walltime.count(),
" seconds. (", total.events / walltime.count(), "Hz)");
#ifdef BOOST_CHRONO_HAS_THREAD_CLOCK
VERBOSE("CPU time: ", total.cputime, " (", total.events / total.cputime.count(), "Hz)");
#endif
const double megabytes = total.bytes / (1024.*1024.);
VERBOSE("Total data read ", megabytes, " (MB) Rate: ", megabytes / walltime.count(), " (MB/s)");
// Clean Up any memory allocated by libprotobuf
//google::protobuf::ShutdownProtobufLibrary();
return 0;
}
catch(a4::Terminate& x)
{
std::cerr << argv[0] << ": " << x.what() << std::endl;
return 1;
}
catch(std::exception& x)
{
std::cerr << argv[0] << ": Error (Exception): " << x.what() << std::endl;
return 2;
}
};}; // namespace a4::process
|
{"hexsha": "ab3037aa9b9ea90c1f20b631d28593b764ceef8e", "size": 20518, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "a4process/src/application.cpp", "max_stars_repo_name": "a4/a4", "max_stars_repo_head_hexsha": "e1de89260cb3894908f1d01dfacea125abc79da9", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2015-04-07T20:25:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-27T15:04:02.000Z", "max_issues_repo_path": "a4process/src/application.cpp", "max_issues_repo_name": "a4/a4", "max_issues_repo_head_hexsha": "e1de89260cb3894908f1d01dfacea125abc79da9", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "a4process/src/application.cpp", "max_forks_repo_name": "a4/a4", "max_forks_repo_head_hexsha": "e1de89260cb3894908f1d01dfacea125abc79da9", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-06-02T17:22:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-02T17:22:35.000Z", "avg_line_length": 37.3054545455, "max_line_length": 164, "alphanum_fraction": 0.5844136855, "num_tokens": 4497}
|
[STATEMENT]
lemma PO_l3_inv5 [simp,intro!]: "reach l3 \<subseteq> l3_inv5"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. reach l3 \<subseteq> l3_inv5
[PROOF STEP]
using l3_inv5_derived PO_l3_inv2 PO_l3_inv3
[PROOF STATE]
proof (prove)
using this:
l3_inv2 \<inter> l3_inv3 \<subseteq> l3_inv5
reach l3 \<subseteq> l3_inv2
reach l3 \<subseteq> l3_inv3
goal (1 subgoal):
1. reach l3 \<subseteq> l3_inv5
[PROOF STEP]
by blast
|
{"llama_tokens": 200, "file": "Key_Agreement_Strong_Adversaries_pfslvl3", "length": 2}
|
import random
import logging
import numpy as np
from collections import OrderedDict
from paddle import DataParallel
from .ofa import OFA
from .layers_base import BaseBlock
from ...core import GraphWrapper, dygraph2program
from .get_sub_model import get_prune_params_config, prune_params, check_search_space
from ...common import get_logger
_logger = get_logger(__name__, level=logging.INFO)
class HWOFA(OFA):
def __init__(self,
model,
run_config=None,
distill_config=None,
elastic_order=None,
train_full=False,
candidate_config=None,
backbone='resnet34'
):
super().__init__(model, run_config, distill_config, elastic_order, train_full)
self.model.eval()
self._clear_search_space()
self.cand_cfg = candidate_config
# self.cand_cfg = {
# 'i': [224], # image size
# 'd': [(3, 3), (4, 4), (6, 6), (3, 3)], # depth
# 'k': [3], # kernel size
# 'c': [1.0, 0.95, 0.9, 0.85] # channel ratio
# }
self.im_size_dict = {x: i for i, x in enumerate(self.cand_cfg['i'], 1)}
self.depth_dict = {k: k-1 for s, e in self.cand_cfg['d'] for k in range(s, e+1)}
self.kernel_dict = {x: i for i, x in enumerate(self.cand_cfg['k'], 1)}
self.channel_dict = {x: i for i, x in enumerate(self.cand_cfg['c'], 1)}
self.subnet_code = ''
self.layer_factor = 6
if backbone in ['resnet34']:
self.block_conv_num = 2 # res18,res34: 2, >res50: 3
elif backbone in ['resnet50']:
self.block_conv_num = 3
else:
raise ValueError
def gen_subnet_code(self):
submodel_code = [self.im_size_dict[self.act_im_size]]
submodel_code += [self.depth_dict[d] for d in self.act_depth_list]
submodel_code_str = ''.join([str(x) for x in submodel_code])
# k_code = ['', '', '', '', '']
c_code = ['', '', '', '', '']
for k, v in self.current_config.items():
if 'layer' in k and 'downsample' not in k:
if 'layer1' in k:
idx = 1
elif 'layer2' in k:
idx = 2
elif 'layer3' in k:
idx = 3
else:
idx = 4
# k_code[idx] += str(self.kernel_dict[v['kernel_size']])
c_code[idx] += str(self.channel_dict[v['expand_ratio']])
elif 'conv1' == k:
# k_code[0] += str(self.kernel_dict[v['kernel_size']])
c_code[0] += str(self.channel_dict[v['expand_ratio']])
c_code = [x.ljust(self.layer_factor*self.block_conv_num, '0') for x in c_code[1:]]
# k_code = [x.ljust(self.layer_factor*3,'0') for x in k_code[1:]]
for x in c_code:
submodel_code_str += x
# for x in k_code:
# submodel_code_str += x
return submodel_code_str
def active_subnet(self, img_size=None):
if img_size is None:
self.act_im_size = random.choice(self.cand_cfg['i'])
else:
self.act_im_size = img_size
self.act_depth_list = [random.randint(s, e) for s, e in self.cand_cfg['d']]
self.current_config = OrderedDict()
for key in self.universe:
if key in self._ofa_layers:
if key == 'conv1' or 'layer1' in key or 'layer2' in key:
self.current_config[key] = {'expand_ratio': random.choice(self.cand_cfg['c'])}
elif 'layer3' in key or 'layer4' in key:
self.current_config[key] = {'expand_ratio': random.choice(self.cand_cfg['c'])}
else:
raise ValueError
self.current_config['fc'] = {}
self._broadcast_ss()
def _clear_search_space(self):
""" find shortcut in model, and clear up the search space """
_st_prog = dygraph2program(self.model, inputs=[2, 3, 224, 224], dtypes=[np.float32])
self._same_ss = check_search_space(GraphWrapper(_st_prog))
self._same_ss = sorted(self._same_ss)
self._param2key = {}
self._broadcast = True
self.universe = []
### the name of sublayer is the key in search space
### param.name is the name in self._same_ss
model_to_traverse = self.model._layers if isinstance(self.model, DataParallel) else self.model
for name, sublayer in model_to_traverse.named_sublayers():
if isinstance(sublayer, BaseBlock):
for param in sublayer.parameters():
if self._find_ele(param.name, self._same_ss):
self._param2key[param.name] = name
if 'conv' in name:
self.universe.append(name)
self.universe.sort()
### double clear same search space to avoid outputs weights in same ss.
tmp_same_ss = []
for ss in self._same_ss:
per_ss = []
for key in ss:
if key not in self._param2key.keys():
continue
if self._param2key[key] in self._ofa_layers.keys() and (
'expand_ratio' in self._ofa_layers[self._param2key[key]] or \
'channel' in self._ofa_layers[self._param2key[key]]):
per_ss.append(key)
else:
_logger.info("{} not in ss".format(key))
if len(per_ss) != 0:
tmp_same_ss.append(per_ss)
self._same_ss = tmp_same_ss
for per_ss in self._same_ss:
for ss in per_ss[1:]:
if 'expand_ratio' in self._ofa_layers[self._param2key[ss]]:
self._ofa_layers[self._param2key[ss]].pop('expand_ratio')
elif 'channel' in self._ofa_layers[self._param2key[ss]]:
self._ofa_layers[self._param2key[ss]].pop('channel')
if len(self._ofa_layers[self._param2key[ss]]) == 0:
self._ofa_layers.pop(self._param2key[ss])
def forward(self, x):
teacher_output = None
if self._add_teacher:
self._reset_hook_before_forward()
teacher_output = self.ofa_teacher_model.model.forward(x)
teacher_output.stop_gradient = True
# self.active_subnet()
# print(self.gen_subnet_code())
if teacher_output is not None and self.training:
stu_out = self.model.forward(x, self.act_depth_list)
return stu_out, teacher_output
else:
return self.model.forward(x, self.act_depth_list)
|
{"hexsha": "de183cd3c408f992283124a23c6e36559a30da10", "size": 6730, "ext": "py", "lang": "Python", "max_stars_repo_path": "paddleslim/nas/ofa/hwofa.py", "max_stars_repo_name": "xiteng01/CVPR_2022_Track1_demo", "max_stars_repo_head_hexsha": "fa470ffc44e4c727048c1cbdc3cc5e48ec24a5c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paddleslim/nas/ofa/hwofa.py", "max_issues_repo_name": "xiteng01/CVPR_2022_Track1_demo", "max_issues_repo_head_hexsha": "fa470ffc44e4c727048c1cbdc3cc5e48ec24a5c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paddleslim/nas/ofa/hwofa.py", "max_forks_repo_name": "xiteng01/CVPR_2022_Track1_demo", "max_forks_repo_head_hexsha": "fa470ffc44e4c727048c1cbdc3cc5e48ec24a5c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2883435583, "max_line_length": 102, "alphanum_fraction": 0.5588410104, "include": true, "reason": "import numpy", "num_tokens": 1627}
|
import numpy as np
class univariate_Linear_Regression:
# initialize slope and intercept
def __init__(self):
self.m = 0.0 # slope
self.b = 0.0 # intercept
# sum of square deviation for single variable
def ss_x(self, x):
return sum((x-np.mean(x))**2)
# sum of square deviation for two variables x and y
def ss_xy(self, x, y):
x_mean = np.mean(x) # mean of x
y_mean = np.mean(y) # mean of y
return sum((x-x_mean)*(y-y_mean))
# Train our regression model based on shape and size
def train(self, x, y):
# verify the features and labels are of same size
assert(len(x) == len(y))
# calculate the slope
ss_x = self.ss_x(x)
ss_xy = self.ss_xy(x, y)
self.m = ss_xy/ss_x
# calculate the intercept
self.b = (np.mean(y)) - (self.m)*(np.mean(x))
# return the predicted values based on feature and weights
def predict(self, x):
predictions = np.zeros(len(x))
for i in range(len(x)):
predictions[i] = self.m * x[i] + self.b # Y = mx + b
return predictions
# Dataset to train model
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.array([1.1, 1.9, 2.8, 4, 5.2, 5.8, 6.9, 8.1, 9, 9.9])
# Initialize our model
reg = univariate_Linear_Regression()
# Train our model with the data
reg.train(x, y)
# Make a prediction
print(reg.predict(x))
|
{"hexsha": "792cd1b2751fad30f80d9eb79cd6d0207eb20918", "size": 1432, "ext": "py", "lang": "Python", "max_stars_repo_path": "Linear Regression/univariate_Linear_Regression.py", "max_stars_repo_name": "imskr/Machine-Learning-with-Maths", "max_stars_repo_head_hexsha": "ca8e7565bb01a2164fba1a1dc3de617b81c88116", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Linear Regression/univariate_Linear_Regression.py", "max_issues_repo_name": "imskr/Machine-Learning-with-Maths", "max_issues_repo_head_hexsha": "ca8e7565bb01a2164fba1a1dc3de617b81c88116", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Linear Regression/univariate_Linear_Regression.py", "max_forks_repo_name": "imskr/Machine-Learning-with-Maths", "max_forks_repo_head_hexsha": "ca8e7565bb01a2164fba1a1dc3de617b81c88116", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-01T19:10:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-01T19:10:58.000Z", "avg_line_length": 27.5384615385, "max_line_length": 72, "alphanum_fraction": 0.5858938547, "include": true, "reason": "import numpy", "num_tokens": 436}
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for flax.deprecated.nn."""
from absl.testing import absltest
from flax import linen as nn
import jax
from jax import random
from jax import test_util as jtu
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class PoolTest(absltest.TestCase):
def test_pool_custom_reduce(self):
x = jnp.full((1, 3, 3, 1), 2.)
mul_reduce = lambda x, y: x * y
y = nn.pooling.pool(x, 1., mul_reduce, (2, 2), (1, 1), 'VALID')
np.testing.assert_allclose(y, np.full((1, 2, 2, 1), 2. ** 4))
def test_avg_pool(self):
x = jnp.full((1, 3, 3, 1), 2.)
pool = lambda x: nn.avg_pool(x, (2, 2))
y = pool(x)
np.testing.assert_allclose(y, np.full((1, 2, 2, 1), 2.))
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0.25, 0.5, 0.25],
[0.5, 1., 0.5],
[0.25, 0.5, 0.25],
]).reshape((1, 3, 3, 1))
np.testing.assert_allclose(y_grad, expected_grad)
def test_avg_pool_no_batch(self):
x = jnp.full((3, 3, 1), 2.)
pool = lambda x: nn.avg_pool(x, (2, 2))
y = pool(x)
np.testing.assert_allclose(y, np.full((2, 2, 1), 2.))
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0.25, 0.5, 0.25],
[0.5, 1., 0.5],
[0.25, 0.5, 0.25],
]).reshape((3, 3, 1))
np.testing.assert_allclose(y_grad, expected_grad)
def test_max_pool(self):
x = jnp.arange(9).reshape((1, 3, 3, 1)).astype(jnp.float32)
pool = lambda x: nn.max_pool(x, (2, 2))
expected_y = jnp.array([
[4., 5.],
[7., 8.],
]).reshape((1, 2, 2, 1))
y = pool(x)
np.testing.assert_allclose(y, expected_y)
y_grad = jax.grad(lambda x: pool(x).sum())(x)
expected_grad = jnp.array([
[0., 0., 0.],
[0., 1., 1.],
[0., 1., 1.],
]).reshape((1, 3, 3, 1))
np.testing.assert_allclose(y_grad, expected_grad)
class NormalizationTest(absltest.TestCase):
def test_batch_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (4, 3, 2))
model_cls = nn.BatchNorm(momentum=0.9, use_running_average=False)
y, initial_params = model_cls.init_with_output(key2, x)
mean = y.mean((0, 1))
var = y.var((0, 1))
np.testing.assert_allclose(mean, np.array([0., 0.]), atol=1e-4)
np.testing.assert_allclose(var, np.array([1., 1.]), rtol=1e-4)
y, vars_out = model_cls.apply(initial_params, x, mutable=['batch_stats'])
ema = vars_out['batch_stats']
np.testing.assert_allclose(
ema['mean'], 0.1 * x.mean((0, 1), keepdims=False), atol=1e-4)
np.testing.assert_allclose(
ema['var'], 0.9 + 0.1 * x.var((0, 1), keepdims=False), rtol=1e-4)
def test_batch_norm_complex(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (4, 3, 2), dtype=jnp.complex64)
model_cls = nn.BatchNorm(momentum=0.9, use_running_average=False, dtype=jnp.complex64)
y, initial_params = model_cls.init_with_output(key2, x)
mean = y.mean((0, 1))
var = y.var((0, 1))
np.testing.assert_allclose(mean, np.array([0., 0.]), atol=1e-4)
np.testing.assert_allclose(var, np.array([1., 1.]), rtol=1e-4)
self.assertEqual(mean.dtype, jnp.complex64)
y, vars_out = model_cls.apply(initial_params, x, mutable=['batch_stats'])
ema = vars_out['batch_stats']
np.testing.assert_allclose(
ema['mean'], 0.1 * x.mean((0, 1), keepdims=False), atol=1e-4)
np.testing.assert_allclose(
ema['var'], 0.9 + 0.1 * x.var((0, 1), keepdims=False), rtol=1e-4)
def test_layer_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 3, 4))
model_cls = nn.LayerNorm(use_bias=False, use_scale=False, epsilon=e)
y, _ = model_cls.init_with_output(key2, x)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.shape, y.shape)
y_one_liner = ((x - x.mean(axis=-1, keepdims=True)) *
jax.lax.rsqrt(x.var(axis=-1, keepdims=True) + e))
np.testing.assert_allclose(y_one_liner, y, atol=1e-4)
def test_group_norm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 5, 4, 4, 32))
model_cls = nn.GroupNorm(num_groups=2, use_bias=False, use_scale=False, epsilon=e)
y, _ = model_cls.init_with_output(key2, x)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.shape, y.shape)
x_gr = x.reshape([2, 5, 4, 4, 2, 16])
y_test = ((x_gr - x_gr.mean(axis=[1, 2, 3, 5], keepdims=True)) *
jax.lax.rsqrt(x_gr.var(axis=[1, 2, 3, 5], keepdims=True) + e))
y_test = y_test.reshape([2, 5, 4, 4, 32])
np.testing.assert_allclose(y_test, y, atol=1e-4)
def test_group_norm_raises(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
e = 1e-5
x = random.normal(key1, (2, 5, 4, 4, 32))
model_cls = nn.GroupNorm(num_groups=3, use_bias=False, use_scale=False, epsilon=e)
with self.assertRaises(ValueError):
model_cls.init_with_output(key2, x)
def test_batch_norm_multi_init(self):
class Foo(nn.Module):
@nn.compact
def __call__(self, x):
norm = nn.BatchNorm(
name="norm",
use_running_average=False,
axis_name="batch",
)
x = norm(x)
return x, norm(x)
key = random.PRNGKey(0)
model = Foo()
x = random.normal(random.PRNGKey(1), (2, 4))
(y1, y2), variables = model.init_with_output(key, x)
np.testing.assert_allclose(y1, y2, rtol=1e-4)
class StochasticTest(absltest.TestCase):
def test_dropout(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
module = nn.Dropout(rate=0.5)
y1 = module.apply({},
jnp.ones((20, 20)),
deterministic=False,
rngs={'dropout': key1})
y2 = module.apply({},
jnp.ones((20, 20)),
deterministic=False,
rngs={'dropout': key2})
self.assertFalse(np.all(y1 == y2))
y1 = module.apply({},
jnp.ones((20, 20)),
deterministic=True,
rngs={'dropout': key1})
y2 = module.apply({},
jnp.ones((20, 20)),
deterministic=True,
rngs={'dropout': key2})
self.assertTrue(np.all(y1 == y2))
def test_dropout_rate_stats(self):
rootkey = random.PRNGKey(0)
for rate in np.arange(0.1, 1.0, 0.1):
rootkey, subkey = random.split(rootkey)
module = nn.Dropout(rate=rate)
n_trials = 10
nonzero_counts = 0
for key in random.split(subkey, n_trials):
y = module.apply({},
jnp.ones((100, 100)),
deterministic=False,
rngs={'dropout': key})
nonzero_counts += np.sum(y > 0.0)
all_counts = np.prod((100, 100, n_trials))
frac = np.sum(nonzero_counts) / all_counts
keep_rate = 1.0 - rate
# just check within 3 sigma.
delta = 3 * np.sqrt(rate * keep_rate) / np.sqrt(all_counts)
self.assertTrue(keep_rate - delta < frac < keep_rate + delta)
def test_dropout_rate_limits(self):
rng = random.PRNGKey(0)
key1, key2, key3 = random.split(rng, 3)
inputs = jnp.ones((20, 20))
d0 = nn.Dropout(rate=0.0)
y1 = d0.apply({}, inputs,
deterministic=False,
rngs={'dropout': key1})
np.testing.assert_array_equal(y1, inputs)
d1 = nn.Dropout(rate=1.0)
y2 = d1.apply({}, inputs,
deterministic=False,
rngs={'dropout': key2})
np.testing.assert_array_equal(y2, np.zeros_like(inputs))
# ensure gradient of rate==1.0 case is non-NaN
fn = lambda x, k: d1.apply({}, x,
rngs={'dropout': k},
deterministic=False)
res = jax.grad(lambda x, k: jnp.sum(fn(x, k)))(inputs, key3)
self.assertFalse(np.isnan(res).any())
# TODO(flax-dev): add integration tests for RNN cells
class RecurrentTest(absltest.TestCase):
def test_lstm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.LSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
lstm = nn.LSTMCell()
(carry, y), initial_params = lstm.init_with_output(key2, (c0, h0), x)
self.assertEqual(carry[0].shape, (2, 4))
self.assertEqual(carry[1].shape, (2, 4))
np.testing.assert_allclose(y, carry[1])
param_shapes = jax.tree_map(np.shape, initial_params['params'])
self.assertEqual(param_shapes, {
'ii': {'kernel': (3, 4)},
'if': {'kernel': (3, 4)},
'ig': {'kernel': (3, 4)},
'io': {'kernel': (3, 4)},
'hi': {'kernel': (4, 4), 'bias': (4,)},
'hf': {'kernel': (4, 4), 'bias': (4,)},
'hg': {'kernel': (4, 4), 'bias': (4,)},
'ho': {'kernel': (4, 4), 'bias': (4,)},
})
def test_gru(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
carry0 = nn.GRUCell.initialize_carry(rng, (2,), 4)
self.assertEqual(carry0.shape, (2, 4))
gru = nn.GRUCell()
(carry, y), initial_params = gru.init_with_output(key2, carry0, x)
#gru = nn.Model(nn.GRUCell, initial_params)
self.assertEqual(carry.shape, (2, 4))
np.testing.assert_allclose(y, carry)
param_shapes = jax.tree_map(np.shape, initial_params['params'])
self.assertEqual(param_shapes, {
'ir': {'kernel': (3, 4), 'bias': (4,)},
'iz': {'kernel': (3, 4), 'bias': (4,)},
'in': {'kernel': (3, 4), 'bias': (4,)},
'hr': {'kernel': (4, 4)},
'hz': {'kernel': (4, 4)},
'hn': {'kernel': (4, 4), 'bias': (4,)},
})
def test_convlstm(self):
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 4, 4, 3))
c0, h0 = nn.ConvLSTM.initialize_carry(rng, (2,), (4, 4, 6))
self.assertEqual(c0.shape, (2, 4, 4, 6))
self.assertEqual(h0.shape, (2, 4, 4, 6))
lstm = nn.ConvLSTM(features=6, kernel_size=(3, 3))
(carry, y), initial_params = lstm.init_with_output(key2, (c0, h0), x)
self.assertEqual(carry[0].shape, (2, 4, 4, 6))
self.assertEqual(carry[1].shape, (2, 4, 4, 6))
np.testing.assert_allclose(y, carry[1])
param_shapes = jax.tree_map(np.shape, initial_params['params'])
self.assertEqual(param_shapes, {
'hh': {'bias': (6*4,), 'kernel': (3, 3, 6, 6*4)},
'ih': {'bias': (6*4,), 'kernel': (3, 3, 3, 6*4)},
})
def test_optimized_lstm_cell_matches_regular(self):
# Create regular LSTMCell.
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.LSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
lstm = nn.LSTMCell()
(_, y), lstm_params = lstm.init_with_output(key2, (c0, h0), x)
# Create OptimizedLSTMCell.
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
x = random.normal(key1, (2, 3))
c0, h0 = nn.OptimizedLSTMCell.initialize_carry(rng, (2,), 4)
self.assertEqual(c0.shape, (2, 4))
self.assertEqual(h0.shape, (2, 4))
lstm_opt = nn.OptimizedLSTMCell()
(_, y_opt), lstm_opt_params = lstm_opt.init_with_output(key2, (c0, h0), x)
np.testing.assert_allclose(y, y_opt, rtol=1e-6)
jtu.check_eq(lstm_params, lstm_opt_params)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "82bf277e442181bdf5bbb7eda811adb1949bacd4", "size": 12345, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/linen/linen_test.py", "max_stars_repo_name": "melissatan/flax", "max_stars_repo_head_hexsha": "8ff7d702d989f4577d1166b9d90a19bebe0cce32", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/linen/linen_test.py", "max_issues_repo_name": "melissatan/flax", "max_issues_repo_head_hexsha": "8ff7d702d989f4577d1166b9d90a19bebe0cce32", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/linen/linen_test.py", "max_forks_repo_name": "melissatan/flax", "max_forks_repo_head_hexsha": "8ff7d702d989f4577d1166b9d90a19bebe0cce32", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1709401709, "max_line_length": 90, "alphanum_fraction": 0.5906034832, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 3913}
|
Gearhead Records
Gearhead has now split into two businesses; the label is run by Michelle Haunold in Woodland, Ca; Mike LaVella in Oakland, runs Gearhead Magazine.
Gearhead began as a magazine, and the first ten issues came with a 7inch. They made a decision in late 1999 to stop releasing the magazine with a 7inch and start Gearhead Records. In 2000, the label put out nearly a dozen records, and the last issue of Gearhead Magazine came out before it fell to the side for awhile, & then started up again in 2004.
Gearhead Records on http://www.myspace.com/gearheadrecords Myspace
Gearhead Records is now opening Gearhead Records and stuff a record store with a variety of music, clothing and merchandise, opening January 23rd, 2007.
Catalog {{{
Catalog # Artist Title Format
RPM 011 V/A Runnin On Fumes CD
RPM 012 V/A Gearfest CD
RPM 013 Demons Come Burst!ng Out! 12 & CD
RPM 014 The Hypnomen Trip With Satan 10 (OUT OF PRINT) & CD
RPM 015 Red Planet Lets Get Ripped 7 & CD EP
RPM 016 Red Planet Revolution 33 CD & LP
RPM 017 Demons Riot Salvation LP (OUT OF PRINT) & CD
RPM 018 The Sewergrooves The Race is over 7 (OUT OF PRINT)
RPM 019 The Dragons Whoa Yeah 7 (OUT OF PRINT)
RPM 020 The Pinkz Something About You 7
RPM 021 The Pattern NonStop 7 (OUT OF PRINT)
RPM 022 NRA New Recovery CD
RPM 023 The Hives AKAIDIOT 12 & CD (OUT OF PRINT)
RPM 024 The Hives Hate to Say I Told You So 7 & CD (OUT OF PRINT)
RPM 025 Mensen Delusions of Grandeur CD & LP
RPM 026 The Nads Saigon Hooker 7
RPM 027 Puffball The Super Commando CD
RPM 028 Red Planet Lets Degenerate CD & LP
RPM 029 The Hard Feelings Soul Party 7
RPM 030 The Hives Barely Legal CD & LP (OUT OF PRINT)
RPM 031 The Dukes of Hamburg Some Folks LP (OUT OF PRINT) & CD
RPM 032 Demons Stockholm Slump CD & LP
RPM 033 The Hellacopters High Visibility CD & LP
RPM 034 The Hellacopters Cream of the Crap Vol. 1 CD & LP (Double Disc Set)
RPM 035 Mensen Oslo City CD & LP
RPM 036 The New Bomb Turks The Night Before the Day the Earth
CD & LP
RPM 037 The Hypnomen Altamont Boogaloo 7
RPM 038 The Maggots Lets Go in 69 7
RPM 039 The Demonics Dune Buggy Gang 7
RPM 040 The Hives Veni Vidi Vicious LP
RPM 041 V/A Smash Up Derby CD
RPM 042 Demons Demonology CD
RPM 043 The Riverboat Gamblers Something to Crow About CD & LP
RPM 044 The Dragons Dirty Bomb/Save a Smile 7
RPM 045 The Dragons Sin Salvation CD & LP
RPM 046 New Bomb Turks Switchblade Tongues, Butterknife Brains CD & LP
RPM 047 V/A Greaseball Melodrama CD
RPM 048 The Turbo A.C.s Automatic CD
RPM 049 Lazy Cowgirls You b/w When it Comes to You 7
RPM 050 The Riverboat Gamblers/
Electric Eel Shock Split 7inch 7 (LTD PICTURE DISC)
RPM 051 V/A The Thingmaker CD
RPM 052 NRA Machine CD
RPM 053 The Million Dollar
Marxists Give it a Name CD
RPM 054 The Dragons Rock n Roll Kamikaze CD & LP (LIMITED WHITE VINYL)
RPM 055 The Wildhearts Riff After Riff CD & LP (LIMITED RED VINYL)
RPM 056 Red Planet We Know How It Goes CD
RPM 057 Gitogito Hustler Wonderful/Romantic 7 (LIMITED PINK VINYL)
RPM 058 Gitogito Hustler Gitogito Galore CD EP & 10 (LIMITED PURPLE VINYL)
RPM 059 Rock n Roll Soldiers The High School Sessions 12 EP (LIMITED GOLD VINYL)
RPM 060 Electric Eel Shock Go USA CD & LP (LIMITED BLUE VINYL)
RPM 061 V/A Welcome to Gearhead Country CD
RPM 062 GitoGito Hustler Love and Roll CD & LP (LIMITED BABY BLUE VINYL)
RPM 063 Pink Swords Shut Up and Take It CD & LP (LP CONTAINS BONUS TRACK)
RPM 064 Lords of Altamont Lords Have Mercy CD
RPM 065 Black Furies Death Trip Saturday Night CD
RPM 066 Spunks Russian Roulette b/w CanNana Fever 7 (LIMITED YELLOW VINYL)
RPM 067 Bottles and Skulls Scream Scream b/w Dead in the USA 7 (LIMITED RED VINYL)
RPM 068 The Turbo A.C.s Avenue X CD
RPM 069 Rock n Roll Soldiers The Weak Blame the Strong 12 EP (LIMITED GREEN VINYL)
RPM 070 Electric Eel Shock Beat Me CD & LP (LIMITED MAGENTA VINYL)
RPM 071 I Walk the Line Desolation Street CD
RPM 072 DTs Lights Out 7 (LIMITED WHITE VINYL)
RPM 075 White Barons Up All Night with the White Barons CD & LP (LIMITED WHITE VINYL)
RPM 076 Spunks Yellow Fever Blues CD & LP (LIMITED BLUE VINYL)
}}}
Newest Releases
|
{"hexsha": "ee3d5ba093dcd728a7523dacc8238c976f90bd8f", "size": 6227, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Gearhead_Records.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Gearhead_Records.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Gearhead_Records.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 68.4285714286, "max_line_length": 351, "alphanum_fraction": 0.5101975269, "num_tokens": 1524}
|
[STATEMENT]
lemma matchrel_rtrancl_matchers [simp]:
assumes "(x, y) \<in> matchrel\<^sup>*"
shows "matchers_map (snd x) \<inter> matchers (set_mset (fst x)) =
matchers_map (snd y) \<inter> matchers (set_mset (fst y))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. matchers_map (snd x) \<inter> matchers (set_mset (fst x)) = matchers_map (snd y) \<inter> matchers (set_mset (fst y))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(x, y) \<in> matchrel\<^sup>*
goal (1 subgoal):
1. matchers_map (snd x) \<inter> matchers (set_mset (fst x)) = matchers_map (snd y) \<inter> matchers (set_mset (fst y))
[PROOF STEP]
by (induct) (simp_all add: matchrel_def)
|
{"llama_tokens": 295, "file": "First_Order_Terms_Abstract_Matching", "length": 2}
|
[GOAL]
V : Type u
inst✝ : Fintype V
⊢ Fintype (SimpleGraph V)
[PROOFSTEP]
classical exact Fintype.ofInjective SimpleGraph.Adj SimpleGraph.ext
[GOAL]
V : Type u
inst✝ : Fintype V
⊢ Fintype (SimpleGraph V)
[PROOFSTEP]
exact Fintype.ofInjective SimpleGraph.Adj SimpleGraph.ext
[GOAL]
V : Type u_1
W : Type u_2
⊢ Symmetric fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true
[PROOFSTEP]
intro v w
[GOAL]
V : Type u_1
W : Type u_2
v w : V ⊕ W
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) v w →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) w v
[PROOFSTEP]
cases v
[GOAL]
case inl
V : Type u_1
W : Type u_2
w : V ⊕ W
val✝ : V
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inl val✝)
w →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) w
(Sum.inl val✝)
[PROOFSTEP]
cases w
[GOAL]
case inr
V : Type u_1
W : Type u_2
w : V ⊕ W
val✝ : W
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inr val✝)
w →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) w
(Sum.inr val✝)
[PROOFSTEP]
cases w
[GOAL]
case inl.inl
V : Type u_1
W : Type u_2
val✝¹ val✝ : V
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inl val✝¹)
(Sum.inl val✝) →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inl val✝)
(Sum.inl val✝¹)
[PROOFSTEP]
simp
[GOAL]
case inl.inr
V : Type u_1
W : Type u_2
val✝¹ : V
val✝ : W
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inl val✝¹)
(Sum.inr val✝) →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inr val✝)
(Sum.inl val✝¹)
[PROOFSTEP]
simp
[GOAL]
case inr.inl
V : Type u_1
W : Type u_2
val✝¹ : W
val✝ : V
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inr val✝¹)
(Sum.inl val✝) →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inl val✝)
(Sum.inr val✝¹)
[PROOFSTEP]
simp
[GOAL]
case inr.inr
V : Type u_1
W : Type u_2
val✝¹ val✝ : W
⊢ (fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inr val✝¹)
(Sum.inr val✝) →
(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inr val✝)
(Sum.inr val✝¹)
[PROOFSTEP]
simp
[GOAL]
V : Type u_1
W : Type u_2
⊢ Irreflexive fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true
[PROOFSTEP]
intro v
[GOAL]
V : Type u_1
W : Type u_2
v : V ⊕ W
⊢ ¬(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) v v
[PROOFSTEP]
cases v
[GOAL]
case inl
V : Type u_1
W : Type u_2
val✝ : V
⊢ ¬(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inl val✝)
(Sum.inl val✝)
[PROOFSTEP]
simp
[GOAL]
case inr
V : Type u_1
W : Type u_2
val✝ : W
⊢ ¬(fun v w => Sum.isLeft v = true ∧ Sum.isRight w = true ∨ Sum.isRight v = true ∧ Sum.isLeft w = true) (Sum.inr val✝)
(Sum.inr val✝)
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : Adj G a b
⊢ a ≠ b
[PROOFSTEP]
rintro rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a c u v w : V
e : Sym2 V
h : Adj G a a
⊢ False
[PROOFSTEP]
exact G.irrefl h
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
x y : SimpleGraph V
v w : V
h : (x.Adj ⊔ y.Adj) v w
⊢ (x.Adj ⊔ y.Adj) w v
[PROOFSTEP]
rwa [Pi.sup_apply, Pi.sup_apply, x.adj_comm, y.adj_comm]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
x y : SimpleGraph V
v w : V
h : (x.Adj ⊓ y.Adj) v w
⊢ (x.Adj ⊓ y.Adj) w v
[PROOFSTEP]
rwa [Pi.inf_apply, Pi.inf_apply, x.adj_comm, y.adj_comm]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
x✝ : (fun v w => v ≠ w ∧ ¬Adj G v w) v w
hne : v ≠ w
right✝ : ¬Adj G v w
⊢ ¬Adj G w v
[PROOFSTEP]
rwa [adj_comm]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
x y : SimpleGraph V
v w : V
h : (x.Adj \ y.Adj) v w
⊢ (x.Adj \ y.Adj) w v
[PROOFSTEP]
change x.Adj w v ∧ ¬y.Adj w v
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
x y : SimpleGraph V
v w : V
h : (x.Adj \ y.Adj) v w
⊢ Adj x w v ∧ ¬Adj y w v
[PROOFSTEP]
rwa [x.adj_comm, y.adj_comm]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (SimpleGraph V)
⊢ Irreflexive fun a b => ∃ G, G ∈ s ∧ Adj G a b
[PROOFSTEP]
rintro a ⟨G, _, ha⟩
[GOAL]
case intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a✝ b c u v w : V
e : Sym2 V
s : Set (SimpleGraph V)
a : V
G : SimpleGraph V
left✝ : G ∈ s
ha : Adj G a a
⊢ False
[PROOFSTEP]
exact ha.ne rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : ι → SimpleGraph V
⊢ Adj (⨆ (i : ι), f i) a b ↔ ∃ i, Adj (f i) a b
[PROOFSTEP]
simp [iSup]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : ι → SimpleGraph V
⊢ Adj (⨅ (i : ι), f i) a b ↔ (∀ (i : ι), Adj (f i) a b) ∧ a ≠ b
[PROOFSTEP]
simp [iInf]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (SimpleGraph V)
hs : Set.Nonempty s
⊢ (∀ (G : SimpleGraph V), G ∈ s → Adj G a b) → a ≠ b
[PROOFSTEP]
obtain ⟨G, hG⟩ := hs
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (SimpleGraph V)
G : SimpleGraph V
hG : G ∈ s
⊢ (∀ (G : SimpleGraph V), G ∈ s → Adj G a b) → a ≠ b
[PROOFSTEP]
exact fun h => (h _ hG).ne
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝ : Nonempty ι
f : ι → SimpleGraph V
⊢ Adj (⨅ (i : ι), f i) a b ↔ ∀ (i : ι), Adj (f i) a b
[PROOFSTEP]
rw [iInf, sInf_adj_of_nonempty (Set.range_nonempty _), Set.forall_range_iff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a✝ b✝ c u v w : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
s : Set (SimpleGraph V)
G : SimpleGraph V
hG : ∀ (b : SimpleGraph V), b ∈ s → b ≤ G
a b : V
⊢ Adj (sSup s) a b → Adj G a b
[PROOFSTEP]
rintro ⟨H, hH, hab⟩
[GOAL]
case intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a✝ b✝ c u v w : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
s : Set (SimpleGraph V)
G : SimpleGraph V
hG : ∀ (b : SimpleGraph V), b ∈ s → b ≤ G
a b : V
H : SimpleGraph V
hH : H ∈ s
hab : Adj H a b
⊢ Adj G a b
[PROOFSTEP]
exact hG _ hH hab
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
ι✝ : Type u
κ✝ : ι✝ → Type u
f : (a : ι✝) → κ✝ a → SimpleGraph V
⊢ ⨅ (a : ι✝), ⨆ (b : κ✝ a), f a b = ⨆ (g : (a : ι✝) → κ✝ a), ⨅ (a : ι✝), f a (g a)
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
ι✝ : Type u
κ✝ : ι✝ → Type u
f : (a : ι✝) → κ✝ a → SimpleGraph V
x✝¹ x✝ : V
⊢ Adj (⨅ (a : ι✝), ⨆ (b : κ✝ a), f a b) x✝¹ x✝ ↔ Adj (⨆ (g : (a : ι✝) → κ✝ a), ⨅ (a : ι✝), f a (g a)) x✝¹ x✝
[PROOFSTEP]
simp [Classical.skolem]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
G : SimpleGraph V
v w : V
hvw : Adj ⊤ v w
⊢ Adj (G ⊔ Gᶜ) v w
[PROOFSTEP]
by_cases G.Adj v w
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
G : SimpleGraph V
v w : V
hvw : Adj ⊤ v w
⊢ Adj (G ⊔ Gᶜ) v w
[PROOFSTEP]
by_cases G.Adj v w
[GOAL]
case pos
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
G : SimpleGraph V
v w : V
hvw : Adj ⊤ v w
h : Adj G v w
⊢ Adj (G ⊔ Gᶜ) v w
[PROOFSTEP]
exact Or.inl h
[GOAL]
case neg
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
G : SimpleGraph V
v w : V
hvw : Adj ⊤ v w
h : ¬Adj G v w
⊢ Adj (G ⊔ Gᶜ) v w
[PROOFSTEP]
exact Or.inr ⟨hvw, h⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
x y : SimpleGraph V
⊢ x \ y = x ⊓ yᶜ
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
x y : SimpleGraph V
v w : V
⊢ Adj (x \ y) v w ↔ Adj (x ⊓ yᶜ) v w
[PROOFSTEP]
refine' ⟨fun h => ⟨h.1, ⟨_, h.2⟩⟩, fun h => ⟨h.1, h.2.2⟩⟩
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
x y : SimpleGraph V
v w : V
h : Adj (x \ y) v w
⊢ v ≠ w
[PROOFSTEP]
rintro rfl
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
src✝ : DistribLattice (SimpleGraph V) := distribLattice
x y : SimpleGraph V
v : V
h : Adj (x \ y) v v
⊢ False
[PROOFSTEP]
exact x.irrefl h.1
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
⊢ edgeSet (G₁ ⊔ G₂) = edgeSet G₁ ∪ edgeSet G₂
[PROOFSTEP]
ext ⟨x, y⟩
[GOAL]
case h.mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
x✝ : Sym2 V
x y : V
⊢ Quot.mk Setoid.r (x, y) ∈ edgeSet (G₁ ⊔ G₂) ↔ Quot.mk Setoid.r (x, y) ∈ edgeSet G₁ ∪ edgeSet G₂
[PROOFSTEP]
rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
⊢ edgeSet (G₁ ⊓ G₂) = edgeSet G₁ ∩ edgeSet G₂
[PROOFSTEP]
ext ⟨x, y⟩
[GOAL]
case h.mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
x✝ : Sym2 V
x y : V
⊢ Quot.mk Setoid.r (x, y) ∈ edgeSet (G₁ ⊓ G₂) ↔ Quot.mk Setoid.r (x, y) ∈ edgeSet G₁ ∩ edgeSet G₂
[PROOFSTEP]
rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
⊢ edgeSet (G₁ \ G₂) = edgeSet G₁ \ edgeSet G₂
[PROOFSTEP]
ext ⟨x, y⟩
[GOAL]
case h.mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
x✝ : Sym2 V
x y : V
⊢ Quot.mk Setoid.r (x, y) ∈ edgeSet (G₁ \ G₂) ↔ Quot.mk Setoid.r (x, y) ∈ edgeSet G₁ \ edgeSet G₂
[PROOFSTEP]
rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ G : SimpleGraph V
s : Set (Sym2 V)
⊢ edgeSet G \ (s \ {e | Sym2.IsDiag e}) = edgeSet G \ s
[PROOFSTEP]
ext e
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
G₁ G₂ G : SimpleGraph V
s : Set (Sym2 V)
e : Sym2 V
⊢ e ∈ edgeSet G \ (s \ {e | Sym2.IsDiag e}) ↔ e ∈ edgeSet G \ s
[PROOFSTEP]
simp only [Set.mem_diff, Set.mem_setOf_eq, not_and, not_not, and_congr_right_iff]
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
G₁ G₂ G : SimpleGraph V
s : Set (Sym2 V)
e : Sym2 V
⊢ e ∈ edgeSet G → (e ∈ s → Sym2.IsDiag e ↔ ¬e ∈ s)
[PROOFSTEP]
intro h
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
G₁ G₂ G : SimpleGraph V
s : Set (Sym2 V)
e : Sym2 V
h : e ∈ edgeSet G
⊢ e ∈ s → Sym2.IsDiag e ↔ ¬e ∈ s
[PROOFSTEP]
simp only [G.not_isDiag_of_mem_edgeSet h, imp_false]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
v w : V
⊢ Adj G v w ↔ v ≠ w ∧ ∃ e, e ∈ edgeSet G ∧ v ∈ e ∧ w ∈ e
[PROOFSTEP]
refine' ⟨fun _ => ⟨G.ne_of_adj ‹_›, ⟦(v, w)⟧, by simpa⟩, _⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
v w : V
x✝ : Adj G v w
⊢ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ edgeSet G ∧
v ∈ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∧ w ∈ Quotient.mk (Sym2.Rel.setoid V) (v, w)
[PROOFSTEP]
simpa
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
v w : V
⊢ (v ≠ w ∧ ∃ e, e ∈ edgeSet G ∧ v ∈ e ∧ w ∈ e) → Adj G v w
[PROOFSTEP]
rintro ⟨hne, e, he, hv⟩
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e✝ : Sym2 V
G₁ G₂ : SimpleGraph V
v w : V
hne : v ≠ w
e : Sym2 V
he : e ∈ edgeSet G
hv : v ∈ e ∧ w ∈ e
⊢ Adj G v w
[PROOFSTEP]
rw [Sym2.mem_and_mem_iff hne] at hv
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e✝ : Sym2 V
G₁ G₂ : SimpleGraph V
v w : V
hne : v ≠ w
e : Sym2 V
he : e ∈ edgeSet G
hv : e = Quotient.mk (Sym2.Rel.setoid V) (v, w)
⊢ Adj G v w
[PROOFSTEP]
subst e
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
v w : V
hne : v ≠ w
he : Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ edgeSet G
⊢ Adj G v w
[PROOFSTEP]
rwa [mem_edgeSet] at he
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
⊢ Adj G a b ↔ ∃ e, ↑e = Quotient.mk (Sym2.Rel.setoid V) (a, b)
[PROOFSTEP]
simp only [mem_edgeSet, exists_prop, SetCoe.exists, exists_eq_right, Subtype.coe_mk]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
G₁ G₂ : SimpleGraph V
e : Sym2 V
he : e ∈ edgeSet G
v : V
h : v ∈ e
⊢ Sym2.Mem.other h ≠ v
[PROOFSTEP]
erw [← Sym2.other_spec h, Sym2.eq_swap] at he
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
G₁ G₂ : SimpleGraph V
e : Sym2 V
v : V
h : v ∈ e
he : Quotient.mk (Sym2.Rel.setoid V) (Sym2.Mem.other h, v) ∈ edgeSet G
⊢ Sym2.Mem.other h ≠ v
[PROOFSTEP]
exact G.ne_of_adj he
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
⊢ Fintype ↑(edgeSet ⊥)
[PROOFSTEP]
rw [edgeSet_bot]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
⊢ Fintype ↑∅
[PROOFSTEP]
infer_instance
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : DecidableEq V
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ Fintype ↑(edgeSet (G₁ ⊔ G₂))
[PROOFSTEP]
rw [edgeSet_sup]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : DecidableEq V
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ Fintype ↑(edgeSet G₁ ∪ edgeSet G₂)
[PROOFSTEP]
infer_instance
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : DecidableEq V
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ Fintype ↑(edgeSet (G₁ ⊓ G₂))
[PROOFSTEP]
rw [edgeSet_inf]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : DecidableEq V
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ Fintype ↑(edgeSet G₁ ∩ edgeSet G₂)
[PROOFSTEP]
exact Set.fintypeInter _ _
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : DecidableEq V
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ Fintype ↑(edgeSet (G₁ \ G₂))
[PROOFSTEP]
rw [edgeSet_sdiff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : DecidableEq V
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ Fintype ↑(edgeSet G₁ \ edgeSet G₂)
[PROOFSTEP]
exact Set.fintypeDiff _ _
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ edgeSet (fromEdgeSet s) = s \ {e | Sym2.IsDiag e}
[PROOFSTEP]
ext e
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
s : Set (Sym2 V)
e : Sym2 V
⊢ e ∈ edgeSet (fromEdgeSet s) ↔ e ∈ s \ {e | Sym2.IsDiag e}
[PROOFSTEP]
exact Sym2.ind (by simp) e
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
s : Set (Sym2 V)
e : Sym2 V
⊢ ∀ (x y : V),
Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ edgeSet (fromEdgeSet s) ↔
Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ s \ {e | Sym2.IsDiag e}
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ fromEdgeSet (edgeSet G) = G
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet (edgeSet G)) v w ↔ Adj G v w
[PROOFSTEP]
exact ⟨fun h => h.1, fun h => ⟨h, G.ne_of_adj h⟩⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ fromEdgeSet ∅ = ⊥
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet ∅) v w ↔ Adj ⊥ v w
[PROOFSTEP]
simp only [fromEdgeSet_adj, Set.mem_empty_iff_false, false_and_iff, bot_adj]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ fromEdgeSet Set.univ = ⊤
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet Set.univ) v w ↔ Adj ⊤ v w
[PROOFSTEP]
simp only [fromEdgeSet_adj, Set.mem_univ, true_and_iff, top_adj]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
⊢ fromEdgeSet s ⊓ fromEdgeSet t = fromEdgeSet (s ∩ t)
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet s ⊓ fromEdgeSet t) v w ↔ Adj (fromEdgeSet (s ∩ t)) v w
[PROOFSTEP]
simp only [fromEdgeSet_adj, Set.mem_inter_iff, Ne.def, inf_adj]
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
v w : V
⊢ (Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ s ∧ ¬v = w) ∧ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ t ∧ ¬v = w ↔
(Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ s ∧ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ t) ∧ ¬v = w
[PROOFSTEP]
tauto
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
⊢ fromEdgeSet s ⊔ fromEdgeSet t = fromEdgeSet (s ∪ t)
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet s ⊔ fromEdgeSet t) v w ↔ Adj (fromEdgeSet (s ∪ t)) v w
[PROOFSTEP]
simp [Set.mem_union, or_and_right]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
⊢ fromEdgeSet s \ fromEdgeSet t = fromEdgeSet (s \ t)
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet s \ fromEdgeSet t) v w ↔ Adj (fromEdgeSet (s \ t)) v w
[PROOFSTEP]
constructor
[GOAL]
case Adj.h.h.a.mp
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet s \ fromEdgeSet t) v w → Adj (fromEdgeSet (s \ t)) v w
[PROOFSTEP]
simp (config := { contextual := true })
[GOAL]
case Adj.h.h.a.mpr
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
v w : V
⊢ Adj (fromEdgeSet (s \ t)) v w → Adj (fromEdgeSet s \ fromEdgeSet t) v w
[PROOFSTEP]
simp (config := { contextual := true })
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
h : s ⊆ t
⊢ fromEdgeSet s ≤ fromEdgeSet t
[PROOFSTEP]
rintro v w
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
h : s ⊆ t
v w : V
⊢ Adj (fromEdgeSet s) v w → Adj (fromEdgeSet t) v w
[PROOFSTEP]
simp (config := { contextual := true }) only [fromEdgeSet_adj, Ne.def, not_false_iff, and_true_iff, and_imp]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s✝ s t : Set (Sym2 V)
h : s ⊆ t
v w : V
⊢ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ s → ¬v = w → Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ t
[PROOFSTEP]
exact fun vws _ => h vws
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : Fintype ↑s
⊢ Fintype ↑(edgeSet (fromEdgeSet s))
[PROOFSTEP]
rw [edgeSet_fromEdgeSet s]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : Fintype ↑s
⊢ Fintype ↑(s \ {e | Sym2.IsDiag e})
[PROOFSTEP]
infer_instance
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
d₁ d₂ : Dart G
⊢ d₁ = d₂ ↔ d₁.toProd = d₂.toProd
[PROOFSTEP]
cases d₁
[GOAL]
case mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
d₂ : Dart G
toProd✝ : V × V
is_adj✝ : Adj G toProd✝.fst toProd✝.snd
⊢ { toProd := toProd✝, is_adj := is_adj✝ } = d₂ ↔ { toProd := toProd✝, is_adj := is_adj✝ }.toProd = d₂.toProd
[PROOFSTEP]
cases d₂
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
toProd✝¹ : V × V
is_adj✝¹ : Adj G toProd✝¹.fst toProd✝¹.snd
toProd✝ : V × V
is_adj✝ : Adj G toProd✝.fst toProd✝.snd
⊢ { toProd := toProd✝¹, is_adj := is_adj✝¹ } = { toProd := toProd✝, is_adj := is_adj✝ } ↔
{ toProd := toProd✝¹, is_adj := is_adj✝¹ }.toProd = { toProd := toProd✝, is_adj := is_adj✝ }.toProd
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
s : (v : V) × ↑(neighborSet G v)
⊢ (fun d => { fst := d.fst, snd := { val := d.snd, property := (_ : Adj G d.fst d.snd) } })
((fun s => { toProd := (s.fst, ↑s.snd), is_adj := (_ : ↑s.snd ∈ neighborSet G s.fst) }) s) =
s
[PROOFSTEP]
ext
[GOAL]
case a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
s : (v : V) × ↑(neighborSet G v)
⊢ ((fun d => { fst := d.fst, snd := { val := d.snd, property := (_ : Adj G d.fst d.snd) } })
((fun s => { toProd := (s.fst, ↑s.snd), is_adj := (_ : ↑s.snd ∈ neighborSet G s.fst) }) s)).fst =
s.fst
[PROOFSTEP]
simp
[GOAL]
case a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
s : (v : V) × ↑(neighborSet G v)
⊢ ↑((fun d => { fst := d.fst, snd := { val := d.snd, property := (_ : Adj G d.fst d.snd) } })
((fun s => { toProd := (s.fst, ↑s.snd), is_adj := (_ : ↑s.snd ∈ neighborSet G s.fst) }) s)).snd =
↑s.snd
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
d : Dart G
⊢ (fun s => { toProd := (s.fst, ↑s.snd), is_adj := (_ : ↑s.snd ∈ neighborSet G s.fst) })
((fun d => { fst := d.fst, snd := { val := d.snd, property := (_ : Adj G d.fst d.snd) } }) d) =
d
[PROOFSTEP]
ext
[GOAL]
case h.h₁
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
d : Dart G
⊢ ((fun s => { toProd := (s.fst, ↑s.snd), is_adj := (_ : ↑s.snd ∈ neighborSet G s.fst) })
((fun d => { fst := d.fst, snd := { val := d.snd, property := (_ : Adj G d.fst d.snd) } }) d)).fst =
d.fst
[PROOFSTEP]
simp
[GOAL]
case h.h₂
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
d : Dart G
⊢ ((fun s => { toProd := (s.fst, ↑s.snd), is_adj := (_ : ↑s.snd ∈ neighborSet G s.fst) })
((fun d => { fst := d.fst, snd := { val := d.snd, property := (_ : Adj G d.fst d.snd) } }) d)).snd =
d.snd
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
⊢ ∀ (d₁ d₂ : Dart G), Dart.edge d₁ = Dart.edge d₂ ↔ d₁ = d₂ ∨ d₁ = Dart.symm d₂
[PROOFSTEP]
rintro ⟨p, hp⟩ ⟨q, hq⟩
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
p : V × V
hp : Adj G p.fst p.snd
q : V × V
hq : Adj G q.fst q.snd
⊢ Dart.edge { toProd := p, is_adj := hp } = Dart.edge { toProd := q, is_adj := hq } ↔
{ toProd := p, is_adj := hp } = { toProd := q, is_adj := hq } ∨
{ toProd := p, is_adj := hp } = Dart.symm { toProd := q, is_adj := hq }
[PROOFSTEP]
simp [Sym2.mk''_eq_mk''_iff, -Quotient.eq]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
⊢ ∀ {d : Dart G} {p : V × V}, Dart.edge d = Quotient.mk (Sym2.Rel.setoid V) p ↔ d.toProd = p ∨ d.toProd = Prod.swap p
[PROOFSTEP]
rintro ⟨p, h⟩
[GOAL]
case mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
p : V × V
h : Adj G p.fst p.snd
⊢ ∀ {p_1 : V × V},
Dart.edge { toProd := p, is_adj := h } = Quotient.mk (Sym2.Rel.setoid V) p_1 ↔
{ toProd := p, is_adj := h }.toProd = p_1 ∨ { toProd := p, is_adj := h }.toProd = Prod.swap p_1
[PROOFSTEP]
apply Sym2.mk''_eq_mk''_iff
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
⊢ ∀ {d : Dart G} {u v : V},
Dart.edge d = Quotient.mk (Sym2.Rel.setoid V) (u, v) ↔ d.fst = u ∧ d.snd = v ∨ d.fst = v ∧ d.snd = u
[PROOFSTEP]
rintro ⟨⟨a, b⟩, h⟩ u v
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a✝ b✝ c u✝ v✝ w : V
e : Sym2 V
a b : V
h : Adj G (a, b).fst (a, b).snd
u v : V
⊢ Dart.edge { toProd := (a, b), is_adj := h } = Quotient.mk (Sym2.Rel.setoid V) (u, v) ↔
{ toProd := (a, b), is_adj := h }.toProd.fst = u ∧ { toProd := (a, b), is_adj := h }.toProd.snd = v ∨
{ toProd := (a, b), is_adj := h }.toProd.fst = v ∧ { toProd := (a, b), is_adj := h }.toProd.snd = u
[PROOFSTEP]
rw [dart_edge_eq_mk'_iff]
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a✝ b✝ c u✝ v✝ w : V
e : Sym2 V
a b : V
h : Adj G (a, b).fst (a, b).snd
u v : V
⊢ { toProd := (a, b), is_adj := h }.toProd = (u, v) ∨ { toProd := (a, b), is_adj := h }.toProd = Prod.swap (u, v) ↔
{ toProd := (a, b), is_adj := h }.toProd.fst = u ∧ { toProd := (a, b), is_adj := h }.toProd.snd = v ∨
{ toProd := (a, b), is_adj := h }.toProd.fst = v ∧ { toProd := (a, b), is_adj := h }.toProd.snd = u
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
v : V
e₁ e₂ : ↑(neighborSet G v)
h : dartOfNeighborSet G v e₁ = dartOfNeighborSet G v e₂
⊢ ↑e₁ = ↑e₂
[PROOFSTEP]
injection h with h'
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
v : V
e₁ e₂ : ↑(neighborSet G v)
h' : (v, ↑e₁) = (v, ↑e₂)
⊢ ↑e₁ = ↑e₂
[PROOFSTEP]
convert congr_arg Prod.snd h'
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝ : Nontrivial V
⊢ Nonempty (Dart ⊤)
[PROOFSTEP]
obtain ⟨v, w, h⟩ := exists_pair_ne V
[GOAL]
case intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝ : Nontrivial V
v w : V
h : v ≠ w
⊢ Nonempty (Dart ⊤)
[PROOFSTEP]
exact ⟨⟨(v, w), h⟩⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : Adj G a b
⊢ incidenceSet G a ∩ incidenceSet G b = {Quotient.mk (Sym2.Rel.setoid V) (a, b)}
[PROOFSTEP]
refine' (G.incidenceSet_inter_incidenceSet_subset <| h.ne).antisymm _
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : Adj G a b
⊢ {Quotient.mk (Sym2.Rel.setoid V) (a, b)} ⊆ incidenceSet G a ∩ incidenceSet G b
[PROOFSTEP]
rintro _ (rfl : _ = ⟦(a, b)⟧)
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : Adj G a b
⊢ Quotient.mk (Sym2.Rel.setoid V) (a, b) ∈ incidenceSet G a ∩ incidenceSet G b
[PROOFSTEP]
exact ⟨G.mk'_mem_incidenceSet_left_iff.2 h, G.mk'_mem_incidenceSet_right_iff.2 h⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : a ≠ b
ha : e ∈ incidenceSet G a
hb : e ∈ incidenceSet G b
⊢ Adj G a b
[PROOFSTEP]
rwa [← mk'_mem_incidenceSet_left_iff, ← Set.mem_singleton_iff.1 <| G.incidenceSet_inter_incidenceSet_subset h ⟨ha, hb⟩]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : ¬Adj G a b
hn : a ≠ b
⊢ incidenceSet G a ∩ incidenceSet G b = ∅
[PROOFSTEP]
simp_rw [Set.eq_empty_iff_forall_not_mem, Set.mem_inter_iff, not_and]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
h : ¬Adj G a b
hn : a ≠ b
⊢ ∀ (x : Sym2 V), x ∈ incidenceSet G a → ¬x ∈ incidenceSet G b
[PROOFSTEP]
intro u ha hb
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u✝ v w : V
e : Sym2 V
h : ¬Adj G a b
hn : a ≠ b
u : Sym2 V
ha : u ∈ incidenceSet G a
hb : u ∈ incidenceSet G b
⊢ False
[PROOFSTEP]
exact h (G.adj_of_mem_incidenceSet hn ha hb)
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : Fintype ↑(edgeSet G)
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ edgeFinset G₁ = edgeFinset G₂ ↔ G₁ = G₂
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : Fintype ↑(edgeSet G)
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ edgeFinset G₁ ⊆ edgeFinset G₂ ↔ G₁ ≤ G₂
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : Fintype ↑(edgeSet G)
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ edgeFinset G₁ ⊂ edgeFinset G₂ ↔ G₁ < G₂
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝² : Fintype ↑(edgeSet G)
inst✝¹ : Fintype ↑(edgeSet G₁)
inst✝ : Fintype ↑(edgeSet G₂)
⊢ edgeFinset ⊥ = ∅
[PROOFSTEP]
simp [edgeFinset]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝³ : Fintype ↑(edgeSet G)
inst✝² : Fintype ↑(edgeSet G₁)
inst✝¹ : Fintype ↑(edgeSet G₂)
inst✝ : DecidableEq V
⊢ edgeFinset (G₁ ⊔ G₂) = edgeFinset G₁ ∪ edgeFinset G₂
[PROOFSTEP]
simp [edgeFinset]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝³ : Fintype ↑(edgeSet G)
inst✝² : Fintype ↑(edgeSet G₁)
inst✝¹ : Fintype ↑(edgeSet G₂)
inst✝ : DecidableEq V
⊢ edgeFinset (G₁ ⊓ G₂) = edgeFinset G₁ ∩ edgeFinset G₂
[PROOFSTEP]
simp [edgeFinset]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G₁ G₂ : SimpleGraph V
inst✝³ : Fintype ↑(edgeSet G)
inst✝² : Fintype ↑(edgeSet G₁)
inst✝¹ : Fintype ↑(edgeSet G₂)
inst✝ : DecidableEq V
⊢ edgeFinset (G₁ \ G₂) = edgeFinset G₁ \ edgeFinset G₂
[PROOFSTEP]
simp [edgeFinset]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
v w : V
⊢ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ incidenceSet G v ↔ Adj G v w
[PROOFSTEP]
simp [incidenceSet]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
v w : V
⊢ Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ incidenceSet G v ↔ w ∈ neighborSet G v
[PROOFSTEP]
simp only [mem_incidenceSet, mem_neighborSet]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
v : V
e : Sym2 V
he : e ∈ edgeSet G
h : v ∈ e
⊢ incidenceSet G v ∩ incidenceSet G (Sym2.Mem.other h) = {e}
[PROOFSTEP]
ext e'
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
v : V
e : Sym2 V
he : e ∈ edgeSet G
h : v ∈ e
e' : Sym2 V
⊢ e' ∈ incidenceSet G v ∩ incidenceSet G (Sym2.Mem.other h) ↔ e' ∈ {e}
[PROOFSTEP]
simp only [incidenceSet, Set.mem_sep_iff, Set.mem_inter_iff, Set.mem_singleton_iff]
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
v : V
e : Sym2 V
he : e ∈ edgeSet G
h : v ∈ e
e' : Sym2 V
⊢ (e' ∈ edgeSet G ∧ v ∈ e') ∧ e' ∈ edgeSet G ∧ Sym2.Mem.other h ∈ e' ↔ e' = e
[PROOFSTEP]
refine' ⟨fun h' => _, _⟩
[GOAL]
case h.refine'_1
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
v : V
e : Sym2 V
he : e ∈ edgeSet G
h : v ∈ e
e' : Sym2 V
h' : (e' ∈ edgeSet G ∧ v ∈ e') ∧ e' ∈ edgeSet G ∧ Sym2.Mem.other h ∈ e'
⊢ e' = e
[PROOFSTEP]
rw [← Sym2.other_spec h]
[GOAL]
case h.refine'_1
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
v : V
e : Sym2 V
he : e ∈ edgeSet G
h : v ∈ e
e' : Sym2 V
h' : (e' ∈ edgeSet G ∧ v ∈ e') ∧ e' ∈ edgeSet G ∧ Sym2.Mem.other h ∈ e'
⊢ e' = Quotient.mk (Sym2.Rel.setoid V) (v, Sym2.Mem.other h)
[PROOFSTEP]
exact (Sym2.mem_and_mem_iff (edge_other_ne G he h).symm).mp ⟨h'.1.2, h'.2.2⟩
[GOAL]
case h.refine'_2
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
v : V
e : Sym2 V
he : e ∈ edgeSet G
h : v ∈ e
e' : Sym2 V
⊢ e' = e → (e' ∈ edgeSet G ∧ v ∈ e') ∧ e' ∈ edgeSet G ∧ Sym2.Mem.other h ∈ e'
[PROOFSTEP]
rintro rfl
[GOAL]
case h.refine'_2
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
v : V
e' : Sym2 V
he : e' ∈ edgeSet G
h : v ∈ e'
⊢ (e' ∈ edgeSet G ∧ v ∈ e') ∧ e' ∈ edgeSet G ∧ Sym2.Mem.other h ∈ e'
[PROOFSTEP]
exact ⟨⟨he, h⟩, he, Sym2.other_mem _⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
G : SimpleGraph V
v : V
⊢ Disjoint (neighborSet G v) (neighborSet Gᶜ v)
[PROOFSTEP]
rw [Set.disjoint_iff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
G : SimpleGraph V
v : V
⊢ neighborSet G v ∩ neighborSet Gᶜ v ⊆ ∅
[PROOFSTEP]
rintro w ⟨h, h'⟩
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
h : w ∈ neighborSet G v
h' : w ∈ neighborSet Gᶜ v
⊢ w ∈ ∅
[PROOFSTEP]
rw [mem_neighborSet, compl_adj] at h'
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
h : w ∈ neighborSet G v
h' : v ≠ w ∧ ¬Adj G v w
⊢ w ∈ ∅
[PROOFSTEP]
exact h'.2 h
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
G : SimpleGraph V
v : V
⊢ neighborSet G v ∪ neighborSet Gᶜ v = {v}ᶜ
[PROOFSTEP]
ext w
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
⊢ w ∈ neighborSet G v ∪ neighborSet Gᶜ v ↔ w ∈ {v}ᶜ
[PROOFSTEP]
have h := @ne_of_adj _ G
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
h : ∀ {a b : V}, Adj G a b → a ≠ b
⊢ w ∈ neighborSet G v ∪ neighborSet Gᶜ v ↔ w ∈ {v}ᶜ
[PROOFSTEP]
simp_rw [Set.mem_union, mem_neighborSet, compl_adj, Set.mem_compl_iff, Set.mem_singleton_iff]
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
h : ∀ {a b : V}, Adj G a b → a ≠ b
⊢ Adj G v w ∨ v ≠ w ∧ ¬Adj G v w ↔ ¬w = v
[PROOFSTEP]
tauto
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
v : V
inst✝ : Fintype ↑(neighborSet G v ∪ neighborSet Gᶜ v)
⊢ card (Set.toFinset (neighborSet G v ∪ neighborSet Gᶜ v)) = Fintype.card V - 1
[PROOFSTEP]
classical simp_rw [neighborSet_union_compl_neighborSet_eq, Set.toFinset_compl, Finset.card_compl, Set.toFinset_card,
Set.card_singleton]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
v : V
inst✝ : Fintype ↑(neighborSet G v ∪ neighborSet Gᶜ v)
⊢ card (Set.toFinset (neighborSet G v ∪ neighborSet Gᶜ v)) = Fintype.card V - 1
[PROOFSTEP]
simp_rw [neighborSet_union_compl_neighborSet_eq, Set.toFinset_compl, Finset.card_compl, Set.toFinset_card,
Set.card_singleton]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
G : SimpleGraph V
v : V
⊢ neighborSet Gᶜ v = (neighborSet G v)ᶜ \ {v}
[PROOFSTEP]
ext w
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
G : SimpleGraph V
v w : V
⊢ w ∈ neighborSet Gᶜ v ↔ w ∈ (neighborSet G v)ᶜ \ {v}
[PROOFSTEP]
simp [and_comm, eq_comm]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
v w : V
⊢ commonNeighbors ⊤ v w = Set.univ \ {v, w}
[PROOFSTEP]
ext u
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u✝ v✝ w✝ : V
e : Sym2 V
v w u : V
⊢ u ∈ commonNeighbors ⊤ v w ↔ u ∈ Set.univ \ {v, w}
[PROOFSTEP]
simp [commonNeighbors, eq_comm, not_or]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
inst✝ : DecidableEq V
v : V
e : Sym2 V
h : e ∈ incidenceSet G v
⊢ e ∈ incidenceSet G (otherVertexOfIncident G h)
[PROOFSTEP]
use h.1
[GOAL]
case right
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
inst✝ : DecidableEq V
v : V
e : Sym2 V
h : e ∈ incidenceSet G v
⊢ otherVertexOfIncident G h ∈ e
[PROOFSTEP]
simp [otherVertexOfIncident, Sym2.other_mem']
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
inst✝ : DecidableEq V
v : V
e : Sym2 V
h : e ∈ incidenceSet G v
⊢ otherVertexOfIncident G h ∈ neighborSet G v
[PROOFSTEP]
cases' h with he hv
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e✝ : Sym2 V
inst✝ : DecidableEq V
v : V
e : Sym2 V
he : e ∈ edgeSet G
hv : v ∈ e
⊢ otherVertexOfIncident G (_ : e ∈ edgeSet G ∧ v ∈ e) ∈ neighborSet G v
[PROOFSTEP]
rwa [← Sym2.other_spec' hv, mem_edgeSet] at he
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝ : DecidableEq V
v : V
x : ↑(incidenceSet G v)
⊢ (fun w =>
{ val := Quotient.mk (Sym2.Rel.setoid V) (v, ↑w),
property := (_ : Quotient.mk (Sym2.Rel.setoid V) (v, ↑w) ∈ incidenceSet G v) })
((fun e =>
{ val := otherVertexOfIncident G (_ : ↑e ∈ incidenceSet G v),
property := (_ : otherVertexOfIncident G (_ : ↑e ∈ incidenceSet G v) ∈ neighborSet G v) })
x) =
x
[PROOFSTEP]
simp [otherVertexOfIncident]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝ : DecidableEq V
v : V
x✝ : ↑(neighborSet G v)
w : V
hw : w ∈ neighborSet G v
⊢ (fun e =>
{ val := otherVertexOfIncident G (_ : ↑e ∈ incidenceSet G v),
property := (_ : otherVertexOfIncident G (_ : ↑e ∈ incidenceSet G v) ∈ neighborSet G v) })
((fun w =>
{ val := Quotient.mk (Sym2.Rel.setoid V) (v, ↑w),
property := (_ : Quotient.mk (Sym2.Rel.setoid V) (v, ↑w) ∈ incidenceSet G v) })
{ val := w, property := hw }) =
{ val := w, property := hw }
[PROOFSTEP]
simp only [mem_neighborSet, Subtype.mk.injEq]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝ : DecidableEq V
v : V
x✝ : ↑(neighborSet G v)
w : V
hw : w ∈ neighborSet G v
⊢ otherVertexOfIncident G
(_ :
↑((fun w =>
{ val := Quotient.mk (Sym2.Rel.setoid V) (v, ↑w),
property := (_ : Quotient.mk (Sym2.Rel.setoid V) (v, ↑w) ∈ incidenceSet G v) })
{ val := w, property := hw }) ∈
incidenceSet G v) =
w
[PROOFSTEP]
exact incidence_other_neighbor_edge _ hw
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a✝ b✝ c u v w : V
e : Sym2 V
s : Set (Sym2 V)
a b : V
⊢ (G.Adj \ Sym2.ToRel s) a b → (G.Adj \ Sym2.ToRel s) b a
[PROOFSTEP]
simp [adj_comm, Sym2.eq_swap]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a✝ b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
a : V
⊢ ¬(G.Adj \ Sym2.ToRel s) a a
[PROOFSTEP]
simp [SDiff.sdiff]
-- porting note: used to be handled by `obviously`
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G'✝ : SimpleGraph W
a b c u v w : V
e : Sym2 V
G G' : SimpleGraph V
⊢ G \ G' = deleteEdges G (edgeSet G')
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G'✝ : SimpleGraph W
a b c u v w : V
e : Sym2 V
G G' : SimpleGraph V
x✝¹ x✝ : V
⊢ Adj (G \ G') x✝¹ x✝ ↔ Adj (deleteEdges G (edgeSet G')) x✝¹ x✝
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ deleteEdges G s = G \ fromEdgeSet s
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
x✝¹ x✝ : V
⊢ Adj (deleteEdges G s) x✝¹ x✝ ↔ Adj (G \ fromEdgeSet s) x✝¹ x✝
[PROOFSTEP]
exact ⟨fun h => ⟨h.1, not_and_of_not_left _ h.2⟩, fun h => ⟨h.1, not_and'.mp h.2 h.ne⟩⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
⊢ Gᶜ = deleteEdges ⊤ (edgeSet G)
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
x✝¹ x✝ : V
⊢ Adj Gᶜ x✝¹ x✝ ↔ Adj (deleteEdges ⊤ (edgeSet G)) x✝¹ x✝
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s s' : Set (Sym2 V)
⊢ deleteEdges (deleteEdges G s) s' = deleteEdges G (s ∪ s')
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s s' : Set (Sym2 V)
x✝¹ x✝ : V
⊢ Adj (deleteEdges (deleteEdges G s) s') x✝¹ x✝ ↔ Adj (deleteEdges G (s ∪ s')) x✝¹ x✝
[PROOFSTEP]
simp [and_assoc, not_or]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
⊢ deleteEdges G ∅ = G
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
x✝¹ x✝ : V
⊢ Adj (deleteEdges G ∅) x✝¹ x✝ ↔ Adj G x✝¹ x✝
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
⊢ deleteEdges G Set.univ = ⊥
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
x✝¹ x✝ : V
⊢ Adj (deleteEdges G Set.univ) x✝¹ x✝ ↔ Adj ⊥ x✝¹ x✝
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ deleteEdges G s ≤ G
[PROOFSTEP]
intro
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
v✝ : V
⊢ ∀ ⦃w : V⦄, Adj (deleteEdges G s) v✝ w → Adj G v✝ w
[PROOFSTEP]
simp (config := { contextual := true })
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s s' : Set (Sym2 V)
h : s ⊆ s'
v w : V
⊢ Adj (deleteEdges G s') v w → Adj (deleteEdges G s) v w
[PROOFSTEP]
simp (config := { contextual := true }) only [deleteEdges_adj, and_imp, true_and_iff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
s s' : Set (Sym2 V)
h : s ⊆ s'
v w : V
⊢ Adj G v w → ¬Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ s' → ¬Quotient.mk (Sym2.Rel.setoid V) (v, w) ∈ s
[PROOFSTEP]
exact fun _ hn hs => hn (h hs)
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ deleteEdges G s = deleteEdges G (s ∩ edgeSet G)
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
x✝¹ x✝ : V
⊢ Adj (deleteEdges G s) x✝¹ x✝ ↔ Adj (deleteEdges G (s ∩ edgeSet G)) x✝¹ x✝
[PROOFSTEP]
simp (config := { contextual := true }) [imp_false]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
H : SimpleGraph V
h : H ≤ G
⊢ deleteEdges G (edgeSet G \ edgeSet H) = H
[PROOFSTEP]
ext v w
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
H : SimpleGraph V
h : H ≤ G
v w : V
⊢ Adj (deleteEdges G (edgeSet G \ edgeSet H)) v w ↔ Adj H v w
[PROOFSTEP]
constructor
[GOAL]
case Adj.h.h.a.mp
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
H : SimpleGraph V
h : H ≤ G
v w : V
⊢ Adj (deleteEdges G (edgeSet G \ edgeSet H)) v w → Adj H v w
[PROOFSTEP]
simp (config := { contextual := true }) [@h v w]
[GOAL]
case Adj.h.h.a.mpr
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
H : SimpleGraph V
h : H ≤ G
v w : V
⊢ Adj H v w → Adj (deleteEdges G (edgeSet G \ edgeSet H)) v w
[PROOFSTEP]
simp (config := { contextual := true }) [@h v w]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s : Set (Sym2 V)
⊢ edgeSet (deleteEdges G s) = edgeSet G \ s
[PROOFSTEP]
ext e
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
s : Set (Sym2 V)
e : Sym2 V
⊢ e ∈ edgeSet (deleteEdges G s) ↔ e ∈ edgeSet G \ s
[PROOFSTEP]
refine' Sym2.ind _ e
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
s : Set (Sym2 V)
e : Sym2 V
⊢ ∀ (x y : V),
Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ edgeSet (deleteEdges G s) ↔
Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ edgeSet G \ s
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝³ : Fintype (Sym2 V)
inst✝² : DecidableEq V
inst✝¹ : DecidableRel G.Adj
s : Finset (Sym2 V)
inst✝ : DecidableRel (deleteEdges G ↑s).Adj
⊢ edgeFinset (deleteEdges G ↑s) = edgeFinset G \ s
[PROOFSTEP]
ext e
[GOAL]
case a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
inst✝³ : Fintype (Sym2 V)
inst✝² : DecidableEq V
inst✝¹ : DecidableRel G.Adj
s : Finset (Sym2 V)
inst✝ : DecidableRel (deleteEdges G ↑s).Adj
e : Sym2 V
⊢ e ∈ edgeFinset (deleteEdges G ↑s) ↔ e ∈ edgeFinset G \ s
[PROOFSTEP]
simp [edgeSet_deleteEdges]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝⁴ : OrderedRing 𝕜
inst✝³ : Fintype V
inst✝² : Fintype (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : DecidableRel G.Adj
p : SimpleGraph V → Prop
r r₁ r₂ : 𝕜
⊢ DeleteFar G p r ↔ ∀ ⦃H : SimpleGraph V⦄, H ≤ G → p H → r ≤ ↑(card (edgeFinset G)) - ↑(card (edgeFinset H))
[PROOFSTEP]
refine' ⟨fun h H hHG hH => _, fun h s hs hG => _⟩
[GOAL]
case refine'_1
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝⁴ : OrderedRing 𝕜
inst✝³ : Fintype V
inst✝² : Fintype (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : DecidableRel G.Adj
p : SimpleGraph V → Prop
r r₁ r₂ : 𝕜
h : DeleteFar G p r
H : SimpleGraph V
hHG : H ≤ G
hH : p H
⊢ r ≤ ↑(card (edgeFinset G)) - ↑(card (edgeFinset H))
[PROOFSTEP]
have := h (sdiff_subset G.edgeFinset H.edgeFinset)
[GOAL]
case refine'_1
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝⁴ : OrderedRing 𝕜
inst✝³ : Fintype V
inst✝² : Fintype (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : DecidableRel G.Adj
p : SimpleGraph V → Prop
r r₁ r₂ : 𝕜
h : DeleteFar G p r
H : SimpleGraph V
hHG : H ≤ G
hH : p H
this : p (deleteEdges G ↑(edgeFinset G \ edgeFinset H)) → r ≤ ↑(card (edgeFinset G \ edgeFinset H))
⊢ r ≤ ↑(card (edgeFinset G)) - ↑(card (edgeFinset H))
[PROOFSTEP]
simp only [deleteEdges_sdiff_eq_of_le _ hHG, edgeFinset_mono hHG, card_sdiff, card_le_of_subset, coe_sdiff,
coe_edgeFinset, Nat.cast_sub] at this
[GOAL]
case refine'_1
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝⁴ : OrderedRing 𝕜
inst✝³ : Fintype V
inst✝² : Fintype (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : DecidableRel G.Adj
p : SimpleGraph V → Prop
r r₁ r₂ : 𝕜
h : DeleteFar G p r
H : SimpleGraph V
hHG : H ≤ G
hH : p H
this : p H → r ≤ ↑(card (edgeFinset G)) - ↑(card (edgeFinset H))
⊢ r ≤ ↑(card (edgeFinset G)) - ↑(card (edgeFinset H))
[PROOFSTEP]
exact this hH
[GOAL]
case refine'_2
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝⁴ : OrderedRing 𝕜
inst✝³ : Fintype V
inst✝² : Fintype (Sym2 V)
inst✝¹ : DecidableEq V
inst✝ : DecidableRel G.Adj
p : SimpleGraph V → Prop
r r₁ r₂ : 𝕜
h : ∀ ⦃H : SimpleGraph V⦄, H ≤ G → p H → r ≤ ↑(card (edgeFinset G)) - ↑(card (edgeFinset H))
s : Finset (Sym2 V)
hs : s ⊆ edgeFinset G
hG : p (deleteEdges G ↑s)
⊢ r ≤ ↑(card s)
[PROOFSTEP]
simpa [card_sdiff hs, edgeFinset_deleteEdges, -Set.toFinset_card, Nat.cast_sub, card_le_of_subset hs] using
h (G.deleteEdges_le s) hG
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a✝ b✝ c u v w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
a b : W
⊢ Relation.Map G.Adj (↑f) (↑f) a b → Relation.Map G.Adj (↑f) (↑f) b a
[PROOFSTEP]
rintro ⟨v, w, h, rfl, rfl⟩
[GOAL]
case intro.intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
v w : V
h : Adj G v w
⊢ Relation.Map G.Adj (↑f) (↑f) (↑f w) (↑f v)
[PROOFSTEP]
use w, v, h.symm, rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a✝ b c u v w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
a : W
⊢ ¬Relation.Map G.Adj (↑f) (↑f) a a
[PROOFSTEP]
rintro ⟨v, w, h, rfl, h'⟩
[GOAL]
case intro.intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
v w : V
h : Adj G v w
h' : ↑f w = ↑f v
⊢ False
[PROOFSTEP]
exact h.ne (f.injective h'.symm)
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
⊢ Monotone (SimpleGraph.map f)
[PROOFSTEP]
rintro G G' h _ _ ⟨u, v, ha, rfl, rfl⟩
[GOAL]
case intro.intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G'✝ : SimpleGraph W
a b c u✝ v✝ w : V
e : Sym2 V
f : V ↪ W
G G' : SimpleGraph V
h : G ≤ G'
u v : V
ha : Adj G u v
⊢ Adj (SimpleGraph.map f G') (↑f u) (↑f v)
[PROOFSTEP]
exact ⟨_, _, h ha, rfl, rfl⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
⊢ Monotone (SimpleGraph.comap ↑f)
[PROOFSTEP]
intro G G' h _ _ ha
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G'✝ : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
G G' : SimpleGraph W
h : G ≤ G'
v✝ w✝ : V
ha : Adj (SimpleGraph.comap (↑f) G) v✝ w✝
⊢ Adj (SimpleGraph.comap (↑f) G') v✝ w✝
[PROOFSTEP]
exact h ha
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
⊢ SimpleGraph.comap (↑f) (SimpleGraph.map f G) = G
[PROOFSTEP]
ext
[GOAL]
case Adj.h.h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
x✝¹ x✝ : V
⊢ Adj (SimpleGraph.comap (↑f) (SimpleGraph.map f G)) x✝¹ x✝ ↔ Adj G x✝¹ x✝
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G'✝ : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
G' : SimpleGraph W
⊢ G ≤ SimpleGraph.comap (↑f) G' → SimpleGraph.map f G ≤ G'
[PROOFSTEP]
rintro h _ _ ⟨u, v, ha, rfl, rfl⟩
[GOAL]
case intro.intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G'✝ : SimpleGraph W
a b c u✝ v✝ w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph V
G' : SimpleGraph W
h : G ≤ SimpleGraph.comap (↑f) G'
u v : V
ha : Adj G u v
⊢ Adj G' (↑f u) (↑f v)
[PROOFSTEP]
exact h ha
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
G : SimpleGraph W
⊢ SimpleGraph.map f (SimpleGraph.comap (↑f) G) ≤ G
[PROOFSTEP]
rw [map_le_iff_le_comap]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V → W
inst✝ : Subsingleton V
⊢ G ≤ SimpleGraph.comap f G'
[PROOFSTEP]
intros v w
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f : V → W
inst✝ : Subsingleton V
v w : V
⊢ Adj G v w → Adj (SimpleGraph.comap f G') v w
[PROOFSTEP]
simp [Subsingleton.elim v w]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
inst✝ : Subsingleton V
⊢ SimpleGraph.map f G ≤ G'
[PROOFSTEP]
rw [map_le_iff_le_comap]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : V ↪ W
inst✝ : Subsingleton V
⊢ G ≤ SimpleGraph.comap (↑f) G'
[PROOFSTEP]
apply le_comap_of_subsingleton
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
v : V
⊢ induce {v} G = ⊤
[PROOFSTEP]
rw [eq_top_iff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
v : V
⊢ ⊤ ≤ induce {v} G
[PROOFSTEP]
apply le_comap_of_subsingleton
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝ : Fintype ↑(neighborSet G v)
⊢ ¬v ∈ neighborFinset G v
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝ : Fintype ↑(neighborSet G v)
⊢ 0 < degree G v ↔ ∃ w, Adj G v w
[PROOFSTEP]
simp only [degree, card_pos, Finset.Nonempty, mem_neighborFinset]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype ↑(neighborSet G v)
inst✝¹ : Fintype ↑(neighborSet Gᶜ v)
inst✝ : Fintype V
⊢ degree Gᶜ v = Fintype.card V - 1 - degree G v
[PROOFSTEP]
classical
rw [← card_neighborSet_union_compl_neighborSet G v, Set.toFinset_union]
simp [card_disjoint_union (Set.disjoint_toFinset.mpr (compl_neighborSet_disjoint G v))]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype ↑(neighborSet G v)
inst✝¹ : Fintype ↑(neighborSet Gᶜ v)
inst✝ : Fintype V
⊢ degree Gᶜ v = Fintype.card V - 1 - degree G v
[PROOFSTEP]
rw [← card_neighborSet_union_compl_neighborSet G v, Set.toFinset_union]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype ↑(neighborSet G v)
inst✝¹ : Fintype ↑(neighborSet Gᶜ v)
inst✝ : Fintype V
⊢ degree Gᶜ v = card (Set.toFinset (neighborSet G v) ∪ Set.toFinset (neighborSet Gᶜ v)) - degree G v
[PROOFSTEP]
simp [card_disjoint_union (Set.disjoint_toFinset.mpr (compl_neighborSet_disjoint G v))]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype ↑(neighborSet G v)
inst✝ : DecidableEq V
⊢ Fintype.card ↑(incidenceSet G v) = degree G v
[PROOFSTEP]
rw [Fintype.card_congr (G.incidenceSetEquivNeighborSet v)]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype ↑(neighborSet G v)
inst✝ : DecidableEq V
⊢ Fintype.card ↑(neighborSet G v) = degree G v
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype ↑(neighborSet G v)
inst✝ : DecidableEq V
⊢ card (incidenceFinset G v) = degree G v
[PROOFSTEP]
rw [← G.card_incidenceSet_eq_degree]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype ↑(neighborSet G v)
inst✝ : DecidableEq V
⊢ card (incidenceFinset G v) = Fintype.card ↑(incidenceSet G v)
[PROOFSTEP]
apply Set.toFinset_card
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype ↑(neighborSet G v)
inst✝¹ : DecidableEq V
inst✝ : Fintype ↑(edgeSet G)
⊢ incidenceFinset G v = filter (Membership.mem v) (edgeFinset G)
[PROOFSTEP]
ext e
[GOAL]
case a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
inst✝² : Fintype ↑(neighborSet G v)
inst✝¹ : DecidableEq V
inst✝ : Fintype ↑(edgeSet G)
e : Sym2 V
⊢ e ∈ incidenceFinset G v ↔ e ∈ filter (Membership.mem v) (edgeFinset G)
[PROOFSTEP]
refine' Sym2.ind (fun x y => _) e
[GOAL]
case a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
inst✝² : Fintype ↑(neighborSet G v)
inst✝¹ : DecidableEq V
inst✝ : Fintype ↑(edgeSet G)
e : Sym2 V
x y : V
⊢ Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ incidenceFinset G v ↔
Quotient.mk (Sym2.Rel.setoid V) (x, y) ∈ filter (Membership.mem v) (edgeFinset G)
[PROOFSTEP]
simp [mk'_mem_incidenceSet_iff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝³ : LocallyFinite G✝
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
k : ℕ
h : IsRegularOfDegree G k
⊢ IsRegularOfDegree Gᶜ (Fintype.card V - 1 - k)
[PROOFSTEP]
intro v
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝³ : LocallyFinite G✝
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
k : ℕ
h : IsRegularOfDegree G k
v : V
⊢ degree Gᶜ v = Fintype.card V - 1 - k
[PROOFSTEP]
rw [degree_compl, h v]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ DecidablePred fun x => x ∈ neighborSet G v
[PROOFSTEP]
simp_rw [mem_neighborSet]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ DecidablePred fun x => Adj G v x
[PROOFSTEP]
infer_instance
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
v : V
inst✝ : DecidableRel G.Adj
⊢ neighborFinset G v = filter (Adj G v) univ
[PROOFSTEP]
ext
[GOAL]
case a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
v : V
inst✝ : DecidableRel G.Adj
a✝ : V
⊢ a✝ ∈ neighborFinset G v ↔ a✝ ∈ filter (Adj G v) univ
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableEq V
inst✝ : DecidableRel G.Adj
v : V
⊢ neighborFinset Gᶜ v = (neighborFinset G v)ᶜ \ {v}
[PROOFSTEP]
simp only [neighborFinset, neighborSet_compl, Set.toFinset_diff, Set.toFinset_compl, Set.toFinset_singleton]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
v : V
⊢ degree ⊤ v = Fintype.card V - 1
[PROOFSTEP]
erw [degree, neighborFinset_eq_filter, filter_ne, card_erase_of_mem (mem_univ v), card_univ]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝ : Fintype V
v : V
⊢ degree ⊥ v = 0
[PROOFSTEP]
erw [degree, neighborFinset_eq_filter, filter_False]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝ : Fintype V
v : V
⊢ card ∅ = 0
[PROOFSTEP]
exact Finset.card_empty
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
⊢ IsRegularOfDegree ⊤ (Fintype.card V - 1)
[PROOFSTEP]
intro v
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
v : V
⊢ degree ⊤ v = Fintype.card V - 1
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
⊢ ∃ v, minDegree G = degree G v
[PROOFSTEP]
obtain ⟨t, ht : _ = _⟩ := min_of_nonempty (univ_nonempty.image fun v => G.degree v)
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
t : ℕ
ht : Finset.min (image (fun v => degree G v) univ) = ↑t
⊢ ∃ v, minDegree G = degree G v
[PROOFSTEP]
obtain ⟨v, _, rfl⟩ := mem_image.mp (mem_of_min ht)
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
left✝ : v ∈ univ
ht : Finset.min (image (fun v => degree G v) univ) = ↑(degree G v)
⊢ ∃ v, minDegree G = degree G v
[PROOFSTEP]
refine' ⟨v, by simp [minDegree, ht]⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
left✝ : v ∈ univ
ht : Finset.min (image (fun v => degree G v) univ) = ↑(degree G v)
⊢ minDegree G = degree G v
[PROOFSTEP]
simp [minDegree, ht]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ minDegree G ≤ degree G v
[PROOFSTEP]
obtain ⟨t, ht⟩ := Finset.min_of_mem (mem_image_of_mem (fun v => G.degree v) (mem_univ v))
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
t : ℕ
ht : Finset.min (image (fun v => degree G v) univ) = ↑t
⊢ minDegree G ≤ degree G v
[PROOFSTEP]
have := Finset.min_le_of_eq (mem_image_of_mem _ (mem_univ v)) ht
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
t : ℕ
ht : Finset.min (image (fun v => degree G v) univ) = ↑t
this : t ≤ degree G v
⊢ minDegree G ≤ degree G v
[PROOFSTEP]
rwa [minDegree, ht]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
k : ℕ
h : ∀ (v : V), k ≤ degree G v
⊢ k ≤ minDegree G
[PROOFSTEP]
rcases G.exists_minimal_degree_vertex with ⟨v, hv⟩
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
k : ℕ
h : ∀ (v : V), k ≤ degree G v
v : V
hv : minDegree G = degree G v
⊢ k ≤ minDegree G
[PROOFSTEP]
rw [hv]
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
k : ℕ
h : ∀ (v : V), k ≤ degree G v
v : V
hv : minDegree G = degree G v
⊢ k ≤ degree G v
[PROOFSTEP]
apply h
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
⊢ ∃ v, maxDegree G = degree G v
[PROOFSTEP]
obtain ⟨t, ht⟩ := max_of_nonempty (univ_nonempty.image fun v => G.degree v)
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
t : ℕ
ht : Finset.max (image (fun v => degree G v) univ) = ↑t
⊢ ∃ v, maxDegree G = degree G v
[PROOFSTEP]
have ht₂ := mem_of_max ht
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
t : ℕ
ht : Finset.max (image (fun v => degree G v) univ) = ↑t
ht₂ : t ∈ image (fun v => degree G v) univ
⊢ ∃ v, maxDegree G = degree G v
[PROOFSTEP]
simp only [mem_image, mem_univ, exists_prop_of_true] at ht₂
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
t : ℕ
ht : Finset.max (image (fun v => degree G v) univ) = ↑t
ht₂ : ∃ a, True ∧ degree G a = t
⊢ ∃ v, maxDegree G = degree G v
[PROOFSTEP]
rcases ht₂ with ⟨v, _, rfl⟩
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
left✝ : True
ht : Finset.max (image (fun v => degree G v) univ) = ↑(degree G v)
⊢ ∃ v, maxDegree G = degree G v
[PROOFSTEP]
refine' ⟨v, _⟩
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
left✝ : True
ht : Finset.max (image (fun v => degree G v) univ) = ↑(degree G v)
⊢ maxDegree G = degree G v
[PROOFSTEP]
rw [maxDegree, ht]
[GOAL]
case intro.intro.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
left✝ : True
ht : Finset.max (image (fun v => degree G v) univ) = ↑(degree G v)
⊢ Option.getD (↑(degree G v)) 0 = degree G v
[PROOFSTEP]
rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ degree G v ≤ maxDegree G
[PROOFSTEP]
obtain ⟨t, ht : _ = _⟩ := Finset.max_of_mem (mem_image_of_mem (fun v => G.degree v) (mem_univ v))
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
t : ℕ
ht : Finset.max (image (fun v => degree G v) univ) = ↑t
⊢ degree G v ≤ maxDegree G
[PROOFSTEP]
have := Finset.le_max_of_eq (mem_image_of_mem _ (mem_univ v)) ht
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
t : ℕ
ht : Finset.max (image (fun v => degree G v) univ) = ↑t
this : degree G v ≤ t
⊢ degree G v ≤ maxDegree G
[PROOFSTEP]
rwa [maxDegree, ht]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
⊢ maxDegree G ≤ k
[PROOFSTEP]
by_cases hV : (univ : Finset V).Nonempty
[GOAL]
case pos
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : Finset.Nonempty univ
⊢ maxDegree G ≤ k
[PROOFSTEP]
haveI : Nonempty V := univ_nonempty_iff.mp hV
[GOAL]
case pos
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : Finset.Nonempty univ
this : Nonempty V
⊢ maxDegree G ≤ k
[PROOFSTEP]
obtain ⟨v, hv⟩ := G.exists_maximal_degree_vertex
[GOAL]
case pos.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : Finset.Nonempty univ
this : Nonempty V
v : V
hv : maxDegree G = degree G v
⊢ maxDegree G ≤ k
[PROOFSTEP]
rw [hv]
[GOAL]
case pos.intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : Finset.Nonempty univ
this : Nonempty V
v : V
hv : maxDegree G = degree G v
⊢ degree G v ≤ k
[PROOFSTEP]
apply h
[GOAL]
case neg
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : ¬Finset.Nonempty univ
⊢ maxDegree G ≤ k
[PROOFSTEP]
rw [not_nonempty_iff_eq_empty] at hV
[GOAL]
case neg
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : univ = ∅
⊢ maxDegree G ≤ k
[PROOFSTEP]
rw [maxDegree, hV, image_empty]
[GOAL]
case neg
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
k : ℕ
h : ∀ (v : V), degree G v ≤ k
hV : univ = ∅
⊢ Option.getD (Finset.max ∅) 0 ≤ k
[PROOFSTEP]
exact zero_le k
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ degree G v < Fintype.card V
[PROOFSTEP]
classical
apply Finset.card_lt_card
rw [Finset.ssubset_iff]
exact ⟨v, by simp, Finset.subset_univ _⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ degree G v < Fintype.card V
[PROOFSTEP]
apply Finset.card_lt_card
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ neighborFinset G v ⊂ univ
[PROOFSTEP]
rw [Finset.ssubset_iff]
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ ∃ a x, insert a (neighborFinset G v) ⊆ univ
[PROOFSTEP]
exact ⟨v, by simp, Finset.subset_univ _⟩
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v : V
⊢ ¬v ∈ neighborFinset G v
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
⊢ maxDegree G < Fintype.card V
[PROOFSTEP]
cases' G.exists_maximal_degree_vertex with v hv
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
hv : maxDegree G = degree G v
⊢ maxDegree G < Fintype.card V
[PROOFSTEP]
rw [hv]
[GOAL]
case intro
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
inst✝² : Fintype V
inst✝¹ : DecidableRel G.Adj
inst✝ : Nonempty V
v : V
hv : maxDegree G = degree G v
⊢ degree G v < Fintype.card V
[PROOFSTEP]
apply G.degree_lt_card_verts v
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v w : V
⊢ Fintype.card ↑(commonNeighbors G v w) ≤ degree G v
[PROOFSTEP]
rw [← card_neighborSet_eq_degree]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v w : V
⊢ Fintype.card ↑(commonNeighbors G v w) ≤ Fintype.card ↑(neighborSet G v)
[PROOFSTEP]
exact Set.card_le_of_subset (Set.inter_subset_left _ _)
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableRel G.Adj
v w : V
⊢ Fintype.card ↑(commonNeighbors G v w) ≤ degree G w
[PROOFSTEP]
simp_rw [commonNeighbors_symm _ v w, card_commonNeighbors_le_degree_left]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ Fintype.card ↑(commonNeighbors G v w) < degree G v
[PROOFSTEP]
classical
erw [← Set.toFinset_card]
apply Finset.card_lt_card
rw [Finset.ssubset_iff]
use w
constructor
· rw [Finset.insert_subset_iff]
constructor
· simpa
· rw [neighborFinset, Set.toFinset_subset_toFinset]
exact G.commonNeighbors_subset_neighborSet_left _ _
· rw [Set.mem_toFinset]
apply not_mem_commonNeighbors_right
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ Fintype.card ↑(commonNeighbors G v w) < degree G v
[PROOFSTEP]
erw [← Set.toFinset_card]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ card (Set.toFinset (commonNeighbors G v w)) < degree G v
[PROOFSTEP]
apply Finset.card_lt_card
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ Set.toFinset (commonNeighbors G v w) ⊂ neighborFinset G v
[PROOFSTEP]
rw [Finset.ssubset_iff]
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ ∃ a x, insert a (Set.toFinset (commonNeighbors G v w)) ⊆ neighborFinset G v
[PROOFSTEP]
use w
[GOAL]
case h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ ∃ x, insert w (Set.toFinset (commonNeighbors G v w)) ⊆ neighborFinset G v
[PROOFSTEP]
constructor
[GOAL]
case h.h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ insert w (Set.toFinset (commonNeighbors G v w)) ⊆ neighborFinset G v
[PROOFSTEP]
rw [Finset.insert_subset_iff]
[GOAL]
case h.h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ w ∈ neighborFinset G v ∧ Set.toFinset (commonNeighbors G v w) ⊆ neighborFinset G v
[PROOFSTEP]
constructor
[GOAL]
case h.h.left
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ w ∈ neighborFinset G v
[PROOFSTEP]
simpa
[GOAL]
case h.h.right
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ Set.toFinset (commonNeighbors G v w) ⊆ neighborFinset G v
[PROOFSTEP]
rw [neighborFinset, Set.toFinset_subset_toFinset]
[GOAL]
case h.h.right
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ commonNeighbors G v w ⊆ neighborSet G v
[PROOFSTEP]
exact G.commonNeighbors_subset_neighborSet_left _ _
[GOAL]
case h.w
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ ¬w ∈ Set.toFinset (commonNeighbors G v w)
[PROOFSTEP]
rw [Set.mem_toFinset]
[GOAL]
case h.w
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
v w : V
h : Adj G v w
⊢ ¬w ∈ commonNeighbors G v w
[PROOFSTEP]
apply not_mem_commonNeighbors_right
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
v w : V
h : v ≠ w
⊢ Fintype.card ↑(commonNeighbors ⊤ v w) = Fintype.card V - 2
[PROOFSTEP]
simp only [commonNeighbors_top_eq, ← Set.toFinset_card, Set.toFinset_diff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
v w : V
h : v ≠ w
⊢ card (Set.toFinset Set.univ \ Set.toFinset {v, w}) = Fintype.card V - 2
[PROOFSTEP]
rw [Finset.card_sdiff]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
v w : V
h : v ≠ w
⊢ card (Set.toFinset Set.univ) - card (Set.toFinset {v, w}) = Fintype.card V - 2
[PROOFSTEP]
simp [Finset.card_univ, h]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
inst✝¹ : Fintype V
inst✝ : DecidableEq V
v w : V
h : v ≠ w
⊢ Set.toFinset {v, w} ⊆ Set.toFinset Set.univ
[PROOFSTEP]
simp only [Set.toFinset_subset_toFinset, Set.subset_univ]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
⊢ Injective (mapEdgeSet f)
[PROOFSTEP]
rintro ⟨e₁, h₁⟩ ⟨e₂, h₂⟩
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
e₁ : Sym2 V
h₁ : e₁ ∈ edgeSet G
e₂ : Sym2 V
h₂ : e₂ ∈ edgeSet G
⊢ mapEdgeSet f { val := e₁, property := h₁ } = mapEdgeSet f { val := e₂, property := h₂ } →
{ val := e₁, property := h₁ } = { val := e₂, property := h₂ }
[PROOFSTEP]
dsimp [Hom.mapEdgeSet]
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
e₁ : Sym2 V
h₁ : e₁ ∈ edgeSet G
e₂ : Sym2 V
h₂ : e₂ ∈ edgeSet G
⊢ { val := Sym2.map (↑f) e₁, property := (_ : Sym2.map ↑f ↑{ val := e₁, property := h₁ } ∈ edgeSet G') } =
{ val := Sym2.map (↑f) e₂, property := (_ : Sym2.map ↑f ↑{ val := e₂, property := h₂ } ∈ edgeSet G') } →
{ val := e₁, property := h₁ } = { val := e₂, property := h₂ }
[PROOFSTEP]
repeat' rw [Subtype.mk_eq_mk]
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
e₁ : Sym2 V
h₁ : e₁ ∈ edgeSet G
e₂ : Sym2 V
h₂ : e₂ ∈ edgeSet G
⊢ { val := Sym2.map (↑f) e₁, property := (_ : Sym2.map ↑f ↑{ val := e₁, property := h₁ } ∈ edgeSet G') } =
{ val := Sym2.map (↑f) e₂, property := (_ : Sym2.map ↑f ↑{ val := e₂, property := h₂ } ∈ edgeSet G') } →
{ val := e₁, property := h₁ } = { val := e₂, property := h₂ }
[PROOFSTEP]
rw [Subtype.mk_eq_mk]
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
e₁ : Sym2 V
h₁ : e₁ ∈ edgeSet G
e₂ : Sym2 V
h₂ : e₂ ∈ edgeSet G
⊢ Sym2.map (↑f) e₁ = Sym2.map (↑f) e₂ → { val := e₁, property := h₁ } = { val := e₂, property := h₂ }
[PROOFSTEP]
rw [Subtype.mk_eq_mk]
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
e₁ : Sym2 V
h₁ : e₁ ∈ edgeSet G
e₂ : Sym2 V
h₂ : e₂ ∈ edgeSet G
⊢ Sym2.map (↑f) e₁ = Sym2.map (↑f) e₂ → e₁ = e₂
[PROOFSTEP]
rw [Subtype.mk_eq_mk]
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G →g G'
hinj : Injective ↑f
e₁ : Sym2 V
h₁ : e₁ ∈ edgeSet G
e₂ : Sym2 V
h₂ : e₂ ∈ edgeSet G
⊢ Sym2.map (↑f) e₁ = Sym2.map (↑f) e₂ → e₁ = e₂
[PROOFSTEP]
apply Sym2.map.injective hinj
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G →g G'
f : ⊤ →g G'
⊢ Injective ↑f
[PROOFSTEP]
intro v w h
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f✝ : G →g G'
f : ⊤ →g G'
v w : V
h : ↑f v = ↑f w
⊢ v = w
[PROOFSTEP]
contrapose! h
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f✝ : G →g G'
f : ⊤ →g G'
v w : V
h : v ≠ w
⊢ ↑f v ≠ ↑f w
[PROOFSTEP]
exact G'.ne_of_adj (map_adj _ ((top_adj _ _).mpr h))
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G✝ →g G'
f : V → W
G : SimpleGraph W
⊢ ∀ {a b : V}, Adj (SimpleGraph.comap f G) a b → Adj G (f a) (f b)
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
f : G ↪g G'
v : V
⊢ Injective fun w => { val := ↑f ↑w, property := (_ : ↑f ↑w ∈ neighborSet G' (↑f v)) }
[PROOFSTEP]
rintro ⟨w₁, h₁⟩ ⟨w₂, h₂⟩ h
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
f : G ↪g G'
v w₁ : V
h₁ : w₁ ∈ neighborSet G v
w₂ : V
h₂ : w₂ ∈ neighborSet G v
h :
(fun w => { val := ↑f ↑w, property := (_ : ↑f ↑w ∈ neighborSet G' (↑f v)) }) { val := w₁, property := h₁ } =
(fun w => { val := ↑f ↑w, property := (_ : ↑f ↑w ∈ neighborSet G' (↑f v)) }) { val := w₂, property := h₂ }
⊢ { val := w₁, property := h₁ } = { val := w₂, property := h₂ }
[PROOFSTEP]
rw [Subtype.mk_eq_mk] at h ⊢
[GOAL]
case mk.mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w : V
e : Sym2 V
f : G ↪g G'
v w₁ : V
h₁ : w₁ ∈ neighborSet G v
w₂ : V
h₂ : w₂ ∈ neighborSet G v
h : ↑f ↑{ val := w₁, property := h₁ } = ↑f ↑{ val := w₂, property := h₂ }
⊢ w₁ = w₂
[PROOFSTEP]
exact f.inj' h
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G✝ ↪g G'
f : V ↪ W
G : SimpleGraph W
⊢ ∀ {a b : V},
Adj G (↑{ toFun := f.toFun, inj' := (_ : Injective f.toFun) } a)
(↑{ toFun := f.toFun, inj' := (_ : Injective f.toFun) } b) ↔
Adj (SimpleGraph.comap (↑f) G) a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G✝ ↪g G'
f : V ↪ W
G : SimpleGraph V
⊢ ∀ {a b : V},
Adj (SimpleGraph.map f G) (↑{ toFun := f.toFun, inj' := (_ : Injective f.toFun) } a)
(↑{ toFun := f.toFun, inj' := (_ : Injective f.toFun) } b) ↔
Adj G a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G ↪g G'
α : Type u_3
β : Type u_4
f : α ↪ β
⊢ ∀ {a b : α},
Adj ⊤ (↑{ toFun := f.toFun, inj' := (_ : Injective f.toFun) } a)
(↑{ toFun := f.toFun, inj' := (_ : Injective f.toFun) } b) ↔
Adj ⊤ a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s✝ : Set V
t : Set W
r : Set X
φ : G✝ →g G'
φst : Set.MapsTo (↑φ) s✝ t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
G : SimpleGraph V
s : Set V
⊢ induceHom Hom.id (_ : Set.MapsTo id s s) = Hom.id
[PROOFSTEP]
ext x
[GOAL]
case h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s✝ : Set V
t : Set W
r : Set X
φ : G✝ →g G'
φst : Set.MapsTo (↑φ) s✝ t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
G : SimpleGraph V
s : Set V
x : ↑s
⊢ ↑(↑(induceHom Hom.id (_ : Set.MapsTo id s s)) x) = ↑(↑Hom.id x)
[PROOFSTEP]
rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s : Set V
t : Set W
r : Set X
φ : G →g G'
φst : Set.MapsTo (↑φ) s t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
⊢ Hom.comp (induceHom ψ ψtr) (induceHom φ φst) = induceHom (Hom.comp ψ φ) (_ : Set.MapsTo (↑ψ ∘ fun x => ↑φ x) s r)
[PROOFSTEP]
ext x
[GOAL]
case h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s : Set V
t : Set W
r : Set X
φ : G →g G'
φst : Set.MapsTo (↑φ) s t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
x : ↑s
⊢ ↑(↑(Hom.comp (induceHom ψ ψtr) (induceHom φ φst)) x) =
↑(↑(induceHom (Hom.comp ψ φ) (_ : Set.MapsTo (↑ψ ∘ fun x => ↑φ x) s r)) x)
[PROOFSTEP]
rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s : Set V
t : Set W
r : Set X
φ : G →g G'
φst : Set.MapsTo (↑φ) s t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
hi : Set.InjOn (↑φ) s
⊢ Injective ↑(induceHom φ φst)
[PROOFSTEP]
erw [Set.MapsTo.restrict_inj]
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s : Set V
t : Set W
r : Set X
φ : G →g G'
φst : Set.MapsTo (↑φ) s t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
hi : Set.InjOn (↑φ) s
⊢ Set.InjOn (↑φ) s
[PROOFSTEP]
assumption
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G'' : SimpleGraph X
s : Set V
t : Set W
r : Set X
φ : G →g G'
φst : Set.MapsTo (↑φ) s t
ψ : G' →g G''
ψtr : Set.MapsTo (↑ψ) t r
hi : Set.InjOn (↑φ) s
⊢ Set.MapsTo (↑φ) s t
[PROOFSTEP]
assumption
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s s' : Set V
h✝ h : s ≤ s'
⊢ ∀ {a b : ↑s},
Adj (induce s' G) (↑(Set.embeddingOfSubset s s' h) a) (↑(Set.embeddingOfSubset s s' h) b) ↔ Adj (induce s G) a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s s' : Set V
h : s ≤ s'
⊢ Embedding.toHom (induceHomOfLE G h) = induceHom Hom.id (_ : Set.MapsTo id s s')
[PROOFSTEP]
ext
[GOAL]
case h.a
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
s s' : Set V
h : s ≤ s'
x✝ : ↑s
⊢ ↑(↑(Embedding.toHom (induceHomOfLE G h)) x✝) = ↑(↑(induceHom Hom.id (_ : Set.MapsTo id s s')) x✝)
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G ≃g G'
⊢ LeftInverse (Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding (symm f))))
(Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding f)))
[PROOFSTEP]
rintro ⟨e, h⟩
[GOAL]
case mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
f : G ≃g G'
e : Sym2 V
h : e ∈ edgeSet G
⊢ Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding (symm f)))
(Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding f)) { val := e, property := h }) =
{ val := e, property := h }
[PROOFSTEP]
simp [Hom.mapEdgeSet, Sym2.map_map, RelEmbedding.toRelHom]
[GOAL]
case mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
f : G ≃g G'
e : Sym2 V
h : e ∈ edgeSet G
⊢ Sym2.map
(fun x =>
↑{ toFun := ↑(symm f), map_rel' := (_ : ∀ {a b : W}, Adj G' a b → Adj G (↑(symm f) a) (↑(symm f) b)) }
(↑{ toFun := ↑f, map_rel' := (_ : ∀ {a b : V}, Adj G a b → Adj G' (↑f a) (↑f b)) } x))
e =
e
[PROOFSTEP]
convert congr_fun Sym2.map_id e
[GOAL]
case h.e'_2.h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
f : G ≃g G'
e : Sym2 V
h : e ∈ edgeSet G
x✝ : V
a✝ : x✝ ∈ e
⊢ ↑{ toFun := ↑(symm f), map_rel' := (_ : ∀ {a b : W}, Adj G' a b → Adj G (↑(symm f) a) (↑(symm f) b)) }
(↑{ toFun := ↑f, map_rel' := (_ : ∀ {a b : V}, Adj G a b → Adj G' (↑f a) (↑f b)) } x✝) =
id x✝
[PROOFSTEP]
exact RelIso.symm_apply_apply _ _
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f : G ≃g G'
⊢ Function.RightInverse (Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding (symm f))))
(Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding f)))
[PROOFSTEP]
rintro ⟨e, h⟩
[GOAL]
case mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
f : G ≃g G'
e : Sym2 W
h : e ∈ edgeSet G'
⊢ Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding f))
(Hom.mapEdgeSet (RelEmbedding.toRelHom (RelIso.toRelEmbedding (symm f))) { val := e, property := h }) =
{ val := e, property := h }
[PROOFSTEP]
simp [Hom.mapEdgeSet, Sym2.map_map, RelEmbedding.toRelHom]
[GOAL]
case mk
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
f : G ≃g G'
e : Sym2 W
h : e ∈ edgeSet G'
⊢ Sym2.map
(fun x =>
↑{ toFun := ↑f, map_rel' := (_ : ∀ {a b : V}, Adj G a b → Adj G' (↑f a) (↑f b)) }
(↑{ toFun := ↑(symm f), map_rel' := (_ : ∀ {a b : W}, Adj G' a b → Adj G (↑(symm f) a) (↑(symm f) b)) } x))
e =
e
[PROOFSTEP]
convert congr_fun Sym2.map_id e
[GOAL]
case h.e'_2.h
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e✝ : Sym2 V
f : G ≃g G'
e : Sym2 W
h : e ∈ edgeSet G'
x✝ : W
a✝ : x✝ ∈ e
⊢ ↑{ toFun := ↑f, map_rel' := (_ : ∀ {a b : V}, Adj G a b → Adj G' (↑f a) (↑f b)) }
(↑{ toFun := ↑(symm f), map_rel' := (_ : ∀ {a b : W}, Adj G' a b → Adj G (↑(symm f) a) (↑(symm f) b)) } x✝) =
id x✝
[PROOFSTEP]
exact RelIso.apply_symm_apply _ _
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f : G ≃g G'
v : V
w : ↑(neighborSet G' (↑f v))
⊢ ↑(symm f) ↑w ∈ neighborSet G v
[PROOFSTEP]
simpa [RelIso.symm_apply_apply] using f.symm.apply_mem_neighborSet_iff.mpr w.2
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f : G ≃g G'
v : V
w : ↑(neighborSet G v)
⊢ (fun w => { val := ↑(symm f) ↑w, property := (_ : ↑(symm f) ↑w ∈ neighborSet G v) })
((fun w => { val := ↑f ↑w, property := (_ : ↑f ↑w ∈ neighborSet G' (↑f v)) }) w) =
w
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v✝ w✝ : V
e : Sym2 V
f : G ≃g G'
v : V
w : ↑(neighborSet G' (↑f v))
⊢ (fun w => { val := ↑f ↑w, property := (_ : ↑f ↑w ∈ neighborSet G' (↑f v)) })
((fun w => { val := ↑(symm f) ↑w, property := (_ : ↑(symm f) ↑w ∈ neighborSet G v) }) w) =
w
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G ≃g G'
inst✝¹ : Fintype V
inst✝ : Fintype W
f : G ≃g G'
⊢ Fintype.card V = Fintype.card W
[PROOFSTEP]
rw [← Fintype.ofEquiv_card f.toEquiv]
-- porting note: need to help it to find the typeclass instances from the target expression
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G ≃g G'
inst✝¹ : Fintype V
inst✝ : Fintype W
f : G ≃g G'
⊢ Fintype.card W = Fintype.card W
[PROOFSTEP]
apply @Fintype.card_congr' _ _ (_) (_) rfl
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G✝ ≃g G'
f : V ≃ W
G : SimpleGraph W
⊢ ∀ {a b : V},
Adj G
(↑{ toFun := f.toFun, invFun := f.invFun, left_inv := (_ : LeftInverse f.invFun f.toFun),
right_inv := (_ : Function.RightInverse f.invFun f.toFun) }
a)
(↑{ toFun := f.toFun, invFun := f.invFun, left_inv := (_ : LeftInverse f.invFun f.toFun),
right_inv := (_ : Function.RightInverse f.invFun f.toFun) }
b) ↔
Adj (SimpleGraph.comap (↑(Equiv.toEmbedding f)) G) a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G✝ ≃g G'
f : V ≃ W
G : SimpleGraph V
⊢ ∀ {a b : V},
Adj (SimpleGraph.map (Equiv.toEmbedding f) G)
(↑{ toFun := f.toFun, invFun := f.invFun, left_inv := (_ : LeftInverse f.invFun f.toFun),
right_inv := (_ : Function.RightInverse f.invFun f.toFun) }
a)
(↑{ toFun := f.toFun, invFun := f.invFun, left_inv := (_ : LeftInverse f.invFun f.toFun),
right_inv := (_ : Function.RightInverse f.invFun f.toFun) }
b) ↔
Adj G a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
f✝ : G ≃g G'
α : Type u_3
β : Type u_4
f : α ≃ β
⊢ ∀ {a b : α},
Adj ⊤
(↑{ toFun := f.toFun, invFun := f.invFun, left_inv := (_ : LeftInverse f.invFun f.toFun),
right_inv := (_ : Function.RightInverse f.invFun f.toFun) }
a)
(↑{ toFun := f.toFun, invFun := f.invFun, left_inv := (_ : LeftInverse f.invFun f.toFun),
right_inv := (_ : Function.RightInverse f.invFun f.toFun) }
b) ↔
Adj ⊤ a b
[PROOFSTEP]
simp
[GOAL]
ι : Sort u_1
𝕜 : Type u_2
V : Type u
W : Type v
X : Type w
G✝ : SimpleGraph V
G' : SimpleGraph W
a b c u v w : V
e : Sym2 V
G : SimpleGraph V
⊢ ∀ {a b : ↑Set.univ}, Adj G (↑(Equiv.Set.univ V) a) (↑(Equiv.Set.univ V) b) ↔ Adj (induce Set.univ G) a b
[PROOFSTEP]
simp only [Equiv.Set.univ, Equiv.coe_fn_mk, comap_Adj, Embedding.coe_subtype, Subtype.forall, Set.mem_univ,
forall_true_left, implies_true]
|
{"mathlib_filename": "Mathlib.Combinatorics.SimpleGraph.Basic", "llama_tokens": 52048}
|
# -*- coding: utf-8 -*-
#
# Lijun Zhu (ljzhu@gps.caltech.edu)
#
# (c) 2018-2019 all rights reserved
#
import numpy
import cuda
import gsl
def test():
"""
Test Cholesky/trmm/inverse/gemm
"""
samples = 10
parameters = 20
precision = 'float64'
m = gsl.matrix(shape=(samples, parameters))
for sample in range(samples):
for parameter in range(parameters):
m[sample, parameter] = sample + parameter
subset = numpy.asarray(list (range(6,8))+ [12, 15] +list(range(18,20)), dtype='int64')
gsubset = cuda.vector(source=subset)
gm = cuda.matrix(source=m, dtype=precision)
gm_sub = cuda.matrix(shape=(samples, gsubset.shape))
gm.copycols(dst=gm_sub, indices=gsubset, batch=samples)
print("A submatrix for indices", subset)
gm_sub.print()
gm.fill(1)
gm.insert(src=gm_sub, start=(0,1))
print("insert submatrix back")
gm.print()
return
test()
|
{"hexsha": "d0fa089912a0a48b7565842eb7091ffd80e47748", "size": 948, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/cuda/copycols.py", "max_stars_repo_name": "lijun99/pyre", "max_stars_repo_head_hexsha": "004dfd4c06489b4ba5b32877338ca6440f2d523b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-08-02T21:02:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-08T13:59:43.000Z", "max_issues_repo_path": "tests/cuda/copycols.py", "max_issues_repo_name": "lijun99/pyre", "max_issues_repo_head_hexsha": "004dfd4c06489b4ba5b32877338ca6440f2d523b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/cuda/copycols.py", "max_forks_repo_name": "lijun99/pyre", "max_forks_repo_head_hexsha": "004dfd4c06489b4ba5b32877338ca6440f2d523b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0666666667, "max_line_length": 91, "alphanum_fraction": 0.6276371308, "include": true, "reason": "import numpy", "num_tokens": 264}
|
from mushroom_rl.core import Core
from mushroom_rl.environments import GridWorld
from mushroom_rl.algorithms.value import SARSA
from mushroom_rl.policy import EpsGreedy
from mushroom_rl.utils.parameters import Parameter, ExponentialParameter
from mushroom_rl.utils.callbacks import *
import numpy as np
def test_collect_dataset():
np.random.seed(88)
callback = CollectDataset()
mdp = GridWorld(4, 4, (2, 2))
eps = Parameter(0.1)
pi = EpsGreedy(eps)
alpha = Parameter(0.2)
agent = SARSA(mdp.info, pi, alpha)
core = Core(agent, mdp, callbacks_fit=[callback])
core.learn(n_steps=10, n_steps_per_fit=1, quiet=True)
dataset = callback.get()
assert len(dataset) == 10
core.learn(n_steps=5, n_steps_per_fit=1, quiet=True)
assert len(dataset) == 15
callback.clean()
dataset = callback.get()
assert len(dataset) == 0
def test_collect_Q():
np.random.seed(88)
mdp = GridWorld(3, 3, (2, 2))
eps = Parameter(0.1)
pi = EpsGreedy(eps)
alpha = Parameter(0.1)
agent = SARSA(mdp.info, pi, alpha)
callback_q = CollectQ(agent.Q)
callback_max_q = CollectMaxQ(agent.Q, np.array([2]))
core = Core(agent, mdp, callbacks_fit=[callback_q, callback_max_q])
core.learn(n_steps=1000, n_steps_per_fit=1, quiet=True)
V_test = np.array([2.4477574 , 0.02246188, 1.6210059 , 6.01867052])
V = callback_q.get()[-1]
assert np.allclose(V[0, :], V_test)
V_max = np.array([np.max(x[2, :], axis=-1) for x in callback_q.get()])
max_q = np.array(callback_max_q.get())
assert np.allclose(V_max, max_q)
def test_collect_parameter():
np.random.seed(88)
mdp = GridWorld(3, 3, (2, 2))
eps = ExponentialParameter(value=1, exp=.5,
size=mdp.info.observation_space.size)
pi = EpsGreedy(eps)
alpha = Parameter(0.1)
agent = SARSA(mdp.info, pi, alpha)
callback_eps = CollectParameters(eps, 1)
core = Core(agent, mdp, callbacks_fit=[callback_eps])
core.learn(n_steps=10, n_steps_per_fit=1, quiet=True)
eps_test = np.array([1., 0.70710678, 0.70710678, 0.57735027, 0.57735027,
0.57735027, 0.57735027, 0.57735027, 0.57735027, 0.57735027])
eps = callback_eps.get()
assert np.allclose(eps, eps_test)
|
{"hexsha": "6518b7c146c32fb2c13d47e0d7ec5289a23de5ce", "size": 2297, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/utils/test_callbacks.py", "max_stars_repo_name": "PuzeLiu/mushroom-rl", "max_stars_repo_head_hexsha": "99942b425e66b4ddcc26009d7105dde23841e95d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 344, "max_stars_repo_stars_event_min_datetime": "2020-01-10T09:45:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:48:28.000Z", "max_issues_repo_path": "tests/utils/test_callbacks.py", "max_issues_repo_name": "AmmarFahmy/mushroom-rl", "max_issues_repo_head_hexsha": "2625ee7f64d5613b3b9fba00f0b7a39fece88ca5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 44, "max_issues_repo_issues_event_min_datetime": "2020-01-23T03:00:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T17:14:22.000Z", "max_forks_repo_path": "tests/utils/test_callbacks.py", "max_forks_repo_name": "AmmarFahmy/mushroom-rl", "max_forks_repo_head_hexsha": "2625ee7f64d5613b3b9fba00f0b7a39fece88ca5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 93, "max_forks_repo_forks_event_min_datetime": "2020-01-10T21:17:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:58:52.000Z", "avg_line_length": 27.6746987952, "max_line_length": 85, "alphanum_fraction": 0.6630387462, "include": true, "reason": "import numpy", "num_tokens": 690}
|
#include <boost/geometry/index/predicates.hpp>
|
{"hexsha": "803b4c1ef0f7e8d98254880eff0fa0b2327bfdea", "size": 47, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_geometry_index_predicates.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_geometry_index_predicates.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_geometry_index_predicates.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 23.5, "max_line_length": 46, "alphanum_fraction": 0.8085106383, "num_tokens": 11}
|
""" An example highlighting the difference between TLS-DMD and DMD
TLS-DMD is a total least squares variant of DMD, which can produce
superior results when the data provided to the method are noisy.
This example is meant to highlight the difference between the two
methods on a simple problem where the true solution is already known.
Returns
-------
Outputs a plot comparing the true, DMD, and TLS-DMD eigenvalues
"""
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
import dmdtools
if __name__ == "__main__":
np.random.seed(0)
# ======== System Parameters =======
n_rank = 2 # True rank of the system
n = 250 # Number of states
m = 1000 # Number of snapshots
std = 5e-1 # standard deviation of the noise
# The true system is 2 dimensional and oscillatory
Alow = np.diag(np.exp([1j, 0.65j]))
data = np.zeros((n_rank, m+1), dtype="complex")
data[:, 0] = np.random.randn(n_rank) + 1j*np.random.randn(n_rank)
for ii in xrange(m):
data[:, ii+1] = Alow.dot(data[:, ii])
Q = np.linalg.qr(np.random.randn(n, 2))[0]
data = Q.dot(data)
data = np.r_[data.real, data.imag] # Split and stack real and image parts
# Add noise to the data
noisy_data = data + std*np.random.randn(data.shape[0], data.shape[1])
# Create a new figure for output
fig = plt.figure(1)
th = np.linspace(0, 2*np.pi, 101)
plt.plot(np.cos(th), np.sin(th), '-', color='0.75', lw=4)
plt.plot(np.diag(Alow).real, np.diag(Alow).imag, 'ko', ms=14)
# Note: n_rank is doubled because we only deal with real numbers
dmd = dmdtools.DMD(n_rank*2, False, False) # "standard" DMD
dmd = dmd.fit(noisy_data)
dmd_vals, dmd_modes = dmd.get_mode_pairs(sortby="LM")
# Plot the DMD eigenvalues
plt.plot(dmd_vals.real, dmd_vals.imag, 'rv', ms=14)
# With TLS DMD
tlsdmd = dmdtools.DMD(n_rank*2, False, True) # "standard" DMD
tlsdmd = tlsdmd.fit(noisy_data)
tlsdmd_vals, tlsdmd_modes = tlsdmd.get_mode_pairs(sortby="LM")
# Plot the DMD eigenvalues
plt.plot(tlsdmd_vals.real, tlsdmd_vals.imag, 'b^', ms=14)
plt.xlabel("$\Re(\mu)$")
plt.ylabel("$\Im(\mu)$")
plt.legend(["Unit Circle", "True", "DMD", "TLS-DMD"], "lower left")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.gca().set_aspect("equal")
plt.title("DMD vs TLS-DMD")
plt.savefig("tls_dmd_comparison.pdf")
plt.show()
|
{"hexsha": "3ad0e560c70aadea9d8ef150b03dbe0c2cb64501", "size": 2465, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/scripts/total_dmd_example.py", "max_stars_repo_name": "Zhengyu-Huang/dmdtools", "max_stars_repo_head_hexsha": "d63fecd069e895bf4a0e5a0a3aec59d59d555d17", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2015-11-04T04:08:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T06:55:24.000Z", "max_issues_repo_path": "python/scripts/total_dmd_example.py", "max_issues_repo_name": "Zhengyu-Huang/dmdtools", "max_issues_repo_head_hexsha": "d63fecd069e895bf4a0e5a0a3aec59d59d555d17", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2015-04-13T14:48:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-19T04:48:20.000Z", "max_forks_repo_path": "python/scripts/total_dmd_example.py", "max_forks_repo_name": "Zhengyu-Huang/dmdtools", "max_forks_repo_head_hexsha": "d63fecd069e895bf4a0e5a0a3aec59d59d555d17", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2015-06-02T21:23:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T22:55:09.000Z", "avg_line_length": 33.3108108108, "max_line_length": 78, "alphanum_fraction": 0.6430020284, "include": true, "reason": "import numpy", "num_tokens": 744}
|
import psycopg2, psycopg2.extras
import sys
import os
import glob
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib import patches
from matplotlib.pyplot import figure
from datetime import timedelta, date
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score, recall_score, precision_score
#error occurs when directly import keras without tensorflow.python
from tensorflow.python.keras import layers, Input, regularizers
from tensorflow.python.keras.backend import clear_session
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.utils import to_categorical, model_to_dot, plot_model
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
# transform array to rectangle shape
def trans2rect(arr):
tarr = []
trend = arr[0]
width = 1
day = 0
for elm in arr[1:]:
if elm == trend:
width += 1
else:
tarr.append((trend, day, width))
trend = elm
day += width
width = 1
tarr.append((trend, day, width))
return tarr
def date_range(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def get_f1_pre_recall(model, x, y):
y_pred = model.predict(x, verbose=2)
y_pred = np.argmax(y_pred, axis=1)
y_pred.tolist()
f1 = f1_score(y, y_pred, average='macro')
precision = precision_score(y, y_pred, average='macro')
recall = recall_score(y, y_pred, average='macro')
return f1, precision, recall
def get_model_data(df, input_size, feature_num, pred_k):
dt_count = df['dt'].value_counts()
date_num = dt_count.shape[0]
event_num = dt_count.sum()
input_shape = event_num-(input_size-1+pred_k)
df = df.drop(columns = ['dt'])
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
data = df.values
X = []
Y = []
for i in range(input_shape):#range = 0~31837
X.append(data[i:i+input_size,0:feature_num])# [every 100 events from 31937 rows, take the first 40 columns as features]
Y.append(data[i+input_size-1,-1:])# [from 99~31936 rows, take the last 5 columns as labels]
return X,Y
def main():
conn = psycopg2.connect(**eval(open('auth.txt').read()))
cmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
start_date = date(2010, 7, 1)
end_date = date(2010, 7, 2)
input_size = 30
pred_k = 50
feature_num = 4
label_threshold = 0.0010
#run from start_date to end_date-1 day
for single_date in date_range(start_date, end_date):
test_x = []
test_y = []
cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
recs = cmd.fetchall()
if recs == []:
continue;
df = pd.DataFrame(recs, columns = recs[0].keys())
#change column order
df.loc[:,['dt', 'tm', 'open', 'close', 'high', 'low', 'volume']]
df.sort_values(by='dt')
df = df.drop(columns = ['mid', 'tm', 'origin'])
if feature_num == 4:
df = df.drop(columns = ['volume'])
df['horizon avg'] = 0.000000
#list slicing doesn't include last element; pd.Dataframe loc does include
for i in df.index:
df.loc[i,'horizon avg'] = df.loc[i+1:i+pred_k]['close'].sum()/float(pred_k)
df['pct'] = (df['horizon avg']-df['close'])/df['close']
df['target'] = 1
#labels 0: equal or greater than 0.00015
#labels 1: between
#labels 2: smaller or equal to -0.00015
df.loc[df['pct'] >= label_threshold, 'target'] = 0
df.loc[df['pct'] <= (-1)*label_threshold, 'target'] = 2
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
df = df.drop(columns = ['pct', 'horizon avg'])
df1 = df['open']
df2 = df['high']
df3 = df['low']
df4 = df['close']
df5 = pd.concat([df1, df2, df3, df4], ignore_index=True)
mean = df5.mean()
std = df5.std()
#zscore
df['open'] = (df['open']-mean)/std
df['high'] = (df['high']-mean)/std
df['low'] = (df['low']-mean)/std
df['close'] = (df['close']-mean)/std
x, y = get_model_data(df, input_size, feature_num, pred_k)
#list
test_x = test_x + x
test_y = test_y + y
test_x = np.array(test_x)
test_y = np.array(test_y)
test_x = test_x.reshape(test_x.shape[0], test_x.shape[1], test_x.shape[2], 1)
test_y = test_y.astype(int)
temp = []
test_y = [i[0] for i in test_y]
# print(len(test_y))
# print(test_y)
# save_dir = os.path.join(os.getcwd(), 'data_set/'+str(data_set))
# if not os.path.isdir(save_dir):
# os.makedirs(save_dir)
#np.save(os.path.join(save_dir, 'valid_x.npy'), test_x)
#test_y = to_categorical(test_y)
#np.save(os.path.join(save_dir, 'valid_y_onehot.npy'), test_y)
model = load_model('model_epoch_300.h5')
y_pred = model.predict(test_x, verbose=2)
y_pred = np.argmax(y_pred, axis=1)
y_pred.tolist()
acc = accuracy_score(test_y, y_pred)
# print(len(y_pred))
# print(y_pred)
#loss, acc = model.evaluate(test_x, test_y, verbose=2)
# test_loss = test_loss + [loss]
# test_acc = test_acc + [acc]
# f1, precision, recall = get_f1_pre_recall(model, test_x, test_y_label)
# test_f1 = test_f1 + [f1]
# test_precision = test_precision + [precision]
# test_recall = test_recall + [recall]
plt.rcParams.update({'font.size': 35})
figure(figsize=(100,40), dpi=80)
plt.suptitle('date={}, acc={}, k={}, threshold={}'.format(single_date, acc, pred_k, label_threshold))
ax = plt.subplot(211)
tans = trans2rect(test_y)
print(len(test_y))
print(test_y)
tans_stats = sorted(tans, key=lambda x: x[2])
plt.title('Answer, #lables={}, max_period={}'.format(len(tans), tans_stats[-1][2]))
for a in tans:
if a[0] == 0:
col = (1,.6,.6)
elif a[0] == 1:
col = 'w'
elif a[0] == 2:
col = (.6,1,.6)
ax.add_patch(patches.Rectangle((a[1],0), a[2],1, color=col))
df_close = df4
close_price = df_close.values.tolist()
close_price = [(float(i)-min(close_price))/(max(close_price)-min(close_price)) for i in close_price]
close_price = close_price[input_size-1:-pred_k]
plt.plot(close_price)
ax = plt.subplot(212)
tans = trans2rect(y_pred)
tans_stats = sorted(tans, key=lambda x: x[2])
plt.title('Prediction, #lables={}, max_period={}'.format(len(tans), tans_stats[-1][2]))
for a in tans:
if a[0] == 0:
col = (1,.6,.6)
elif a[0] == 1:
col = 'w'
elif a[0] == 2:
col = (.6,1,.6)
ax.add_patch(patches.Rectangle((a[1],0), a[2],1, color=col))
plt.plot(close_price)
plt.savefig('date={}_acc={:.2f}_k={}_threshold={}.png'.format(single_date, acc, pred_k, label_threshold*10000))
if __name__ == '__main__':
main()
|
{"hexsha": "5d7742ad1dcf743a7b1c5ec87e7d80625740ba0f", "size": 7726, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment/past_experiment/ohlc/predict_day/pred_day.py", "max_stars_repo_name": "anakinanakin/neural-network-on-finance-data", "max_stars_repo_head_hexsha": "1842606294ca3d5dafa7387d6db95a1c21d323eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-11T09:11:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-11T09:11:53.000Z", "max_issues_repo_path": "experiment/past_experiment/ohlc/predict_day/pred_day.py", "max_issues_repo_name": "anakinanakin/neural-network-on-finance-data", "max_issues_repo_head_hexsha": "1842606294ca3d5dafa7387d6db95a1c21d323eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment/past_experiment/ohlc/predict_day/pred_day.py", "max_forks_repo_name": "anakinanakin/neural-network-on-finance-data", "max_forks_repo_head_hexsha": "1842606294ca3d5dafa7387d6db95a1c21d323eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-28T03:59:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-28T03:59:31.000Z", "avg_line_length": 28.3003663004, "max_line_length": 127, "alphanum_fraction": 0.5818017085, "include": true, "reason": "import numpy", "num_tokens": 2101}
|
import tensorflow as tf
import data_io.basepy as basepy
import random
import numpy as np
import os.path as osp
import copy
slim = tf.contrib.slim
def arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def regression(inputs,
is_training=True,
dropout_keep_prob=0.85,
scope='regression',
fc_conv_padding='VALID'):
with tf.variable_scope(scope, 'regression', [inputs], reuse=tf.AUTO_REUSE) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
# Use conv2d instead of fully_connected layers.
regress = tf.expand_dims(tf.expand_dims(inputs, 1), 1, name='input/expand')
regress = slim.dropout(regress, dropout_keep_prob, is_training=is_training, scope='dropout0')
regress = slim.conv2d(regress, 512, [1, 1], padding=fc_conv_padding, scope='regress1',
activation_fn=tf.nn.relu)
regress = slim.dropout(regress, dropout_keep_prob, is_training=is_training, scope='dropout1')
regress = slim.conv2d(regress, 32, [1, 1], scope='regress2',
activation_fn=None)
regress = slim.dropout(regress, dropout_keep_prob, is_training=is_training, scope='dropout2')
# # Convert end_points_collection into a end_point dict.
# end_points = slim.utils.convert_collection_to_dict(end_points_collection)
regress = slim.conv2d(regress, 1, [1, 1], scope='score/expand',
activation_fn=tf.nn.sigmoid)
regress = tf.squeeze(regress, [1, 2], name='score')
return regress
def network_fn(inputs, fusion='standard', feature_len=4096, segment_num=32, attention_l=1024, **kwargs):
with slim.arg_scope(arg_scope(weight_decay=0.0005)):
if fusion == 'standard':
reshaped_inputs = tf.reshape(inputs, [-1, feature_len])
anomaly_score = regression(reshaped_inputs, **kwargs)
outputs = tf.reshape(anomaly_score, [-1, segment_num])
elif fusion == 'segments':
reshaped_inputs = tf.reduce_max(inputs, axis=1)
outputs = regression(reshaped_inputs, **kwargs)
elif fusion == 'average':
reshaped_inputs = tf.reduce_mean(inputs, axis=1)
outputs = regression(reshaped_inputs, **kwargs)
elif fusion == 'attention':
with tf.variable_scope(fusion, 'regression', [inputs], reuse=tf.AUTO_REUSE):
hk = tf.reshape(inputs, [-1, feature_len])
_v = tf.get_variable('para_v', [attention_l, feature_len])
th = tf.tanh(tf.matmul(_v, hk, transpose_b=True))
_w = tf.get_variable('para_w', [1, attention_l])
ep = tf.exp(tf.matmul(_w, th))
ot = tf.reshape(tf.transpose(ep), [-1, segment_num])
l1 = ot / tf.norm(ot, ord=1, axis=1, keepdims=True)
fn = tf.expand_dims(l1, axis=2) * inputs
reshaped_inputs = tf.reduce_sum(fn, axis=1)
outputs = regression(reshaped_inputs, **kwargs)
else:
raise ValueError('Wrong fusion type: %s' % fusion)
return outputs
def network_fn_list(inputs, **kwargs):
outputs = []
for k in inputs:
with slim.arg_scope(arg_scope(weight_decay=0.0005)):
anomaly_score = regression(k, **kwargs)
outputs.append(anomaly_score)
return outputs
def get_np_from_txt(txt_file_path, renum=1001):
feature = basepy.read_txt_lines2list(txt_file_path)
try:
feature = random.sample(feature, renum)
except ValueError:
quotient, remainder = divmod(renum, len(feature))
feature = feature * quotient + random.sample(feature, remainder)
return np.array([i[0] for i in feature], dtype='float32')
def reform_train_list(org_txt_list, reform_txt_list, if_print=True):
"""
Reform for some changes in list
:param org_txt_list: [['Abuse/Abuse001_x264.mp4'], ['Abuse/Abuse002_x264.mp4'],...]
:param reform_txt_list: ['/absolute/datasets/anoma_motion16_tfrecords/Shoplifting@Shoplifting041_x264.txt',
'/absolute/datasets/anoma_motion16_tfrecords/normal_train@Normal_Videos308_0_x264.txt',]
:return: similar to reform_txt_list
"""
if if_print:
print('List reform:')
new_txt_list = []
remove = 0
replace = 0
for trainee in org_txt_list:
video_name = osp.basename(trainee[0]).split('_x264')[0]
reform_txt = [i for i in reform_txt_list if video_name in i]
if not reform_txt:
if if_print:
print('Remove %s from txt_list' % video_name)
remove += 1
elif len(reform_txt) > 1:
new_txt_list.extend(reform_txt)
if if_print:
print('Replace %s to' % video_name, reform_txt)
replace += 1
else:
new_txt_list.extend(reform_txt)
if if_print:
print('List reform DONE, remove %d videos, replace %d videos' % (remove, replace))
return new_txt_list
def reform_np_array(np_array, reform=1000):
if np_array.shape[0] == reform:
np_output = np_array[:, :4096]
elif np_array.shape[0] < reform:
quotient = reform // np_array.shape[0]
np_temp = np.concatenate((np_array, np_array.repeat(quotient - 1, axis=0)), axis=0)
np_output = np.concatenate((np_temp, np_array[:reform - len(np_temp)]), axis=0)[:, :4096]
# if np_array.shape[0] > reform:
# np_copy = copy.deepcopy(np_array)
# np.random.shuffle(np_copy)
# np_output = np_copy[:reform, :4096]
else:
raise ValueError('np_array height > reform num: %d > %d' %(np_array.shape[0], reform))
return np_output
def read_npy_file_path_list(npy_file_path_list, class_name_in_keys=True, sep='@'):
feed_data = {}
for npy_file_path in npy_file_path_list:
# '/absolute/ext3t/anoma_motion16_npy_rand_2019_c50/normal_train@Normal_Videos167_x264.npy'
# 'normal_train@Normal_Videos167_x264'
file_name = osp.basename(osp.splitext(npy_file_path)[0])
if class_name_in_keys:
feed_data[file_name] = np.load(npy_file_path)
else:
# Normal_Videos167_x264
file_name = osp.splitext(file_name.split(sep)[-1])[0]
feed_data[file_name] = np.load(npy_file_path)
return feed_data
|
{"hexsha": "5e666d0f60e7e479faf5240a4fcd4ea23945b9a4", "size": 7027, "ext": "py", "lang": "Python", "max_stars_repo_path": "zc3d_npy_base.py", "max_stars_repo_name": "zbhoscar/submax", "max_stars_repo_head_hexsha": "725c4de09e182f9cd1c4afe93ad175464f5c7cea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "zc3d_npy_base.py", "max_issues_repo_name": "zbhoscar/submax", "max_issues_repo_head_hexsha": "725c4de09e182f9cd1c4afe93ad175464f5c7cea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zc3d_npy_base.py", "max_forks_repo_name": "zbhoscar/submax", "max_forks_repo_head_hexsha": "725c4de09e182f9cd1c4afe93ad175464f5c7cea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.91875, "max_line_length": 117, "alphanum_fraction": 0.6287178028, "include": true, "reason": "import numpy", "num_tokens": 1684}
|
import numpy as np
import nengo
import nengo_spinnaker
spinnaker = True
dimensions = 1
from nengo.utils.functions import whitenoise
model = nengo.Network()
with model:
config = nengo_spinnaker.Config()
inp = nengo.Node(whitenoise(0.1, 5, dimensions=dimensions), label = "inp")
config[inp].f_of_t = True
pre = nengo.Ensemble(60, dimensions=dimensions, label = "pre")
nengo.Connection(inp, pre)
post = nengo.Ensemble(60, dimensions=dimensions, label = "post")
conn = nengo.Connection(pre, post, function=lambda x: np.random.random(dimensions))
#if not spinnaker:
inp_p = nengo.Probe(inp)
pre_p = nengo.Probe(pre, synapse=0.01)
post_p = nengo.Probe(post, synapse=0.01)
if spinnaker:
sim = nengo_spinnaker.Simulator(model, config = config)
sim.run(10.0, clean = True)
else:
sim = nengo.Simulator(model)
sim.run(10.0)
import matplotlib.pyplot as plt
plt.figure(figsize=(12, 8))
for d in range(dimensions):
#if dimensions > 1:
plt.subplot(dimensions, 1, d + 1)
#if not spinnaker:
plt.plot(sim.trange(), sim.data[inp_p].T[d], c='k', label='Input')
plt.plot(sim.trange(), sim.data[pre_p].T[d], c='b', label='Pre')
plt.plot(sim.trange(), sim.data[post_p].T[d], c='r', label='Post')
plt.ylabel("Dimension 1")
plt.legend(loc='best')
plt.show()
|
{"hexsha": "8eeba70738cde8cd8a311bfe463a0a72f4be5b03", "size": 1462, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/learn_communication_channel_1.py", "max_stars_repo_name": "ctn-archive/nengo_spinnaker_2014", "max_stars_repo_head_hexsha": "2dcfe9506fedd9c4aebb66f5e1ba745d2f027871", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/learn_communication_channel_1.py", "max_issues_repo_name": "ctn-archive/nengo_spinnaker_2014", "max_issues_repo_head_hexsha": "2dcfe9506fedd9c4aebb66f5e1ba745d2f027871", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-06-15T19:51:15.000Z", "max_issues_repo_issues_event_max_datetime": "2015-12-16T16:25:39.000Z", "max_forks_repo_path": "examples/learn_communication_channel_1.py", "max_forks_repo_name": "ctn-archive/nengo_spinnaker_2014", "max_forks_repo_head_hexsha": "2dcfe9506fedd9c4aebb66f5e1ba745d2f027871", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8367346939, "max_line_length": 87, "alphanum_fraction": 0.6128590971, "include": true, "reason": "import numpy", "num_tokens": 421}
|
"""This pipeline is based on Orion https://github.com/orion-search/orion.
For bugs/issues, contact the Nesta team or myself at k.stathou@gmail.com.
"""
from metaflow import FlowSpec, step, Parameter
import pandas as pd
from sqlalchemy.sql import exists
from sqlalchemy import create_engine, and_
from sqlalchemy.orm import sessionmaker
from dotenv import load_dotenv, find_dotenv
import glob
import toolz
import pickle
import os
import ast
import numpy as np
import ci_mapping
from ci_mapping import logger
from ci_mapping.data.create_db_and_tables import create_db_and_tables
from ci_mapping.data.query_mag import (
query_mag_api,
query_fields_of_study,
build_composite_expr,
query_by_id,
)
from ci_mapping.data.geocode import place_by_id, place_by_name, parse_response
from ci_mapping.utils.utils import unique_dicts, unique_dicts_by_value, flatten_lists
from ci_mapping.utils.utils import date_range, str2datetime, allocate_in_group
from ci_mapping.data.parse_mag_data import (
parse_affiliations,
parse_authors,
parse_fos,
parse_journal,
parse_papers,
parse_conference,
)
from ci_mapping.data.mag_orm import (
Paper,
PaperAuthor,
Journal,
Author,
Affiliation,
FieldOfStudy,
PaperFieldsOfStudy,
Conference,
AuthorAffiliation,
FosMetadata,
CoreControlGroup,
AffiliationLocation,
AffiliationType,
OpenAccess,
Reference,
)
from ci_mapping.analysis.descriptive_analysis import (
annual_publication_increase,
annual_citation_sum,
publications_by_affiliation_type,
international_collaborations,
industry_non_industry_collaborations,
open_access_publications,
annual_fields_of_study_usage,
papers_in_journals_and_conferences,
annual_publication_count,
plot_shannon_diversity,
)
from ci_mapping.analysis.data_cleaning import (
clean_data,
clean_author_affiliations,
)
from skbio.diversity.alpha import shannon
from collections import Counter
load_dotenv(find_dotenv())
config = ci_mapping.config["data"]
mag_config = ci_mapping.config["data"]["mag"]
plot_config = ci_mapping.config["plots"]
class CollectiveIntelligenceFlow(FlowSpec):
"""
Metaflow pipeline running the analysis of the CI research landscape.
Steps:
1. Create a PostgreSQL database and the required tables as shown in the ER diagram.
If they already exist, the initialisation is skipped.
2. Collect papers from MAG based on Fields of Study (FoS).
The pickled responses are stored locally in data/raw/.
3. Parse the MAG API response in a PostgreSQL database.
4. Collect the level of a Field of Study in MAG's hierarchy.
5. Tag papers as CI and AI+CI. This method could be modified to divide a
dataset to core and control groups.
6. Geocode author affiliation using Google Places API.
7. Tag journals as open access based on a seed list.
8. Find the type (industry, non-industry) of affiliations based on a seed list.
9. Process the data used in EDA. This involves changing data types, merging and
grouping tables.
10. Exploratory data analysis of the CI research landscape.
"""
db_name = Parameter(
"db_name", help="DB configuration filename", default=config["db_name"]
)
mag_start_date = Parameter(
"mag_start_date",
help="Start date of the data collection",
default=mag_config["mag_start_date"],
)
mag_end_date = Parameter(
"mag_end_date",
help="End date of the data collection",
default=mag_config["mag_end_date"],
)
intervals_in_a_year = Parameter(
"intervals_in_a_year",
help="Collection timeframes. Used to bypass MAG's throttling.",
default=mag_config["intervals_in_a_year"],
)
entity_name = Parameter(
"entity_name", help="MAG API field to query.", default=mag_config["entity_name"]
)
query_values = Parameter(
"query_values", help="Query sent to MAG", default=mag_config["query_values"]
)
metadata = Parameter(
"metadata", help="Fields to fetch from MAG.", default=mag_config["metadata"]
)
subscription_key = Parameter(
"subscription_key",
help="MAG API key stored in the .env file.",
default=os.getenv("mag_key"),
)
google_api_key = Parameter(
"google_api_key", help="Google API Key", default=os.getenv("google_key")
)
with_doi = Parameter(
"with_doi", help="Fetch ONLY papers with a DOI.", default=mag_config["with_doi"]
)
store_path = Parameter(
"store_path",
help="Path to store MAG response files.",
default=mag_config["store_path"],
)
external_data = Parameter(
"external_data",
help="Path to external data.",
default=f'{ci_mapping.project_dir}/{config["external_path"]}',
)
store_path_references = Parameter(
"store_path_references",
help="Path to store MAG response files.",
default=mag_config["store_path_references"],
)
references_path = Parameter(
"references_path",
help="Path to downloaded reference data.",
default=f'{ci_mapping.project_dir}/{config["references_path"]}',
)
fos_subset = Parameter(
"fos_subset",
help="Subset of Fields of Study related to AI.",
default=ci_mapping.config["fos_subset"],
)
oa_journals = Parameter(
"open_access_journals",
help="List of open access journals, mainly *Xivs",
default=ci_mapping.config["open_access"],
)
non_industry = Parameter(
"non_industry_affiliations",
help="List of non-industry affiliations.",
default=ci_mapping.config["affiliations"]["non_industry"],
)
fos_levels = Parameter(
"fos_levels",
help="Field of Study level to create Figure 7 for.",
default=plot_config["fos_levels"],
)
top_n = Parameter(
"top_n",
help="Number of most used FoS to plot in Figure 7.",
default=plot_config["top_n"],
)
preselected_fos = Parameter(
"preselected_fos",
help="FoS to use in Figure 7.",
default=plot_config["preselected_fos"],
)
excluded_fos = Parameter(
"excluded_fos",
help="FoS to NOT use in Figure 7.",
default=plot_config["excluded_fos"],
)
fos_mapping = Parameter(
"fos_mapping",
help="Merge FoS based on a given mapping.",
default=plot_config["fos_mapping"],
)
def _create_session(self):
"""Creates a PostgreSQL session."""
# Connect to postgresql
db_config = os.getenv(self.db_name)
engine = create_engine(db_config)
Session = sessionmaker(bind=engine)
return Session()
def _is_open_access(self, name):
"""Tag papers as open access based on a seed list."""
if name in set(self.oa_journals):
return 1
else:
return 0
def _find_non_industry_affiliations(self, name):
"""Tag affiliations as non-industry based on a seed list."""
if any(val in name for val in self.non_industry):
return 1
else:
return 0
@step
def start(self):
"""Creates the PostgreSQL database and tables if they do not exist."""
create_db_and_tables(self.db_name)
# Proceed to next task
self.next(self.collect_mag)
@step
def collect_mag(self):
"""Collect papers from MAG and store the response locally as a pickle."""
# Convert strings to datetime objects
mag_start_date = str2datetime(self.mag_start_date)
mag_end_date = str2datetime(self.mag_end_date)
# Number of time intervals for the data collection
total_intervals = (
abs(mag_start_date.year - mag_end_date.year) + 1
) * self.intervals_in_a_year
i = 0
query_count = 1000
for date in toolz.sliding_window(
2, list(date_range(mag_start_date, mag_end_date, total_intervals))
):
logger.info(f"Date interval: {date}")
expression = build_composite_expr(self.query_values, self.entity_name, date)
logger.info(f"{expression}")
has_content = True
# i = 1
offset = 0
# Request the API as long as we receive non-empty responses
while has_content:
logger.info(f"Query {i} - Offset {offset}...")
data = query_mag_api(
expression,
self.metadata,
self.subscription_key,
query_count=query_count,
offset=offset,
)
if self.with_doi:
# Keep only papers with a DOI
results = [
ents for ents in data["entities"] if "DOI" in ents.keys()
]
else:
results = [ents for ents in data["entities"]]
# Store results
with open(
f"{ci_mapping.project_dir}/{self.store_path}_{i}.pickle", "wb"
) as h:
pickle.dump(results, h)
logger.info(f"Number of stored results from query {i}: {len(results)}")
i += 1
offset += query_count
if len(results) == 0:
has_content = False
self.next(self.parse_mag)
@step
def parse_mag(self):
"""Parse MAG responses to PostgreSQL."""
# Connect to postgresql
s = self._create_session()
# Read MAG responses
data = []
for filename in glob.iglob("".join([self.external_data, "*.pickle"])):
with open(filename, "rb") as h:
data.extend(pickle.load(h))
# Collect IDs from tables to ensure we're not inserting duplicates
paper_ids = {id_[0] for id_ in s.query(Paper.id)}
author_ids = {id_[0] for id_ in s.query(Author.id)}
fos_ids = {id_[0] for id_ in s.query(FieldOfStudy.id)}
aff_ids = {id_[0] for id_ in s.query(Affiliation.id)}
# Remove duplicates and keep only papers that are not already in the mag_papers table.
data = [
d for d in unique_dicts_by_value(data, "Id") if d["Id"] not in paper_ids
]
logger.info(f"Number of unique papers not existing in DB: {len(data)}")
papers = [parse_papers(response) for response in data]
logger.info(f"Completed parsing papers: {len(papers)}")
journals = [
parse_journal(response, response["Id"])
for response in data
if "J" in response.keys()
]
logger.info(f"Completed parsing journals: {len(journals)}")
conferences = [
parse_conference(response, response["Id"])
for response in data
if "C" in response.keys()
]
logger.info(f"Completed parsing conferences: {len(conferences)}")
# Parse author information
items = [parse_authors(response, response["Id"]) for response in data]
authors = [
d
for d in unique_dicts_by_value(
flatten_lists([item[0] for item in items]), "id"
)
if d["id"] not in author_ids
]
paper_with_authors = unique_dicts(flatten_lists([item[1] for item in items]))
logger.info(f"Completed parsing authors: {len(authors)}")
logger.info(f"Completed parsing papers_with_authors: {len(paper_with_authors)}")
# Parse Fields of Study
items = [
parse_fos(response, response["Id"])
for response in data
if "F" in response.keys()
]
paper_with_fos = unique_dicts(flatten_lists([item[0] for item in items]))
fields_of_study = [
d
for d in unique_dicts(flatten_lists([item[1] for item in items]))
if d["id"] not in fos_ids
]
logger.info(f"Completed parsing fields_of_study: {len(fields_of_study)}")
logger.info(f"Completed parsing paper_with_fos: {len(paper_with_fos)}")
# Parse affiliations
items = [parse_affiliations(response, response["Id"]) for response in data]
affiliations = [
d
for d in unique_dicts(flatten_lists([item[0] for item in items]))
if d["id"] not in aff_ids
]
paper_author_aff = unique_dicts(flatten_lists([item[1] for item in items]))
logger.info(f"Completed parsing affiliations: {len(affiliations)}")
logger.info(f"Completed parsing author_with_aff: {len(paper_author_aff)}")
logger.info("Parsing completed!")
# Insert dicts into postgresql
s.bulk_insert_mappings(Paper, papers)
s.bulk_insert_mappings(Journal, journals)
s.bulk_insert_mappings(Conference, conferences)
s.bulk_insert_mappings(Author, authors)
s.bulk_insert_mappings(PaperAuthor, paper_with_authors)
s.bulk_insert_mappings(FieldOfStudy, fields_of_study)
s.bulk_insert_mappings(PaperFieldsOfStudy, paper_with_fos)
s.bulk_insert_mappings(Affiliation, affiliations)
s.bulk_insert_mappings(AuthorAffiliation, paper_author_aff)
s.commit()
logger.info("Committed to DB!")
self.next(self.collect_fields_of_study_level)
@step
def collect_fields_of_study_level(self):
"""Collect Fields' of Study metadata."""
# Connect to postgresql
s = self._create_session()
# Keep the FoS IDs that haven't been collected yet
fields_of_study_ids = [
id_[0]
for id_ in s.query(FieldOfStudy.id).filter(
~exists().where(FieldOfStudy.id == FosMetadata.id)
)
]
logger.info(f"Fields of study left: {len(fields_of_study_ids)}")
# Collect FoS metadata
fos = query_fields_of_study(self.subscription_key, ids=fields_of_study_ids)
# Parse api response
for response in fos:
s.add(FosMetadata(id=response["id"], level=response["level"]))
s.commit()
self.next(self.fos_groups)
@step
def fos_groups(self):
"""Tag Fields of Study as Core Collective Intelligence and AI+CI.
This method could be extended to divide a dataset to core and control
group.
"""
# Connect to postgresql
s = self._create_session()
# Delete rows in CoreControlGroup
s.query(CoreControlGroup).delete()
s.commit()
# Fetch postgres tables
fos = pd.read_sql(s.query(FieldOfStudy).statement, s.bind)
pfos = pd.read_sql(s.query(PaperFieldsOfStudy).statement, s.bind)
# Merge and groupby so that FoS are in a list
pfos = pfos.merge(fos, left_on="field_of_study_id", right_on="id")
pfos = pd.DataFrame(pfos.groupby("paper_id")["norm_name"].apply(list))
# Allocate papers in CI, AI+CI groups based on Fields of Study.
pfos["type"] = pfos.norm_name.apply(allocate_in_group, args=([self.fos_subset]))
logger.info(f"CI papers: {pfos[pfos['type']=='CI'].shape[0]}")
logger.info(f"AI+CI papers: {pfos[pfos['type']=='AI_CI'].shape[0]}")
for idx, row in pfos.iterrows():
s.add(CoreControlGroup(id=idx, type=row["type"]))
s.commit()
self.next(self.geocode_affiliation)
@step
def geocode_affiliation(self):
"""Geocode author affiliation using Google Places API."""
# Connect to postgresql
s = self._create_session()
# Fetch affiliations that have not been geocoded yet.
queries = s.query(Affiliation.id, Affiliation.affiliation).filter(
~exists().where(Affiliation.id == AffiliationLocation.affiliation_id)
)
logger.info(f"Number of places need geocoding: {queries.count()}")
for id, name in queries:
r = place_by_name(name, self.google_api_key)
if r is not None:
response = place_by_id(r, self.google_api_key)
place_details = parse_response(response)
place_details.update({"affiliation_id": id})
s.add(AffiliationLocation(**place_details))
s.commit()
else:
continue
self.next(self.open_access_journals)
@step
def open_access_journals(self):
"""Tag journals as open access based on a seed list."""
# Connect to postgresql
s = self._create_session()
# Delete rows in OpenAccess
s.query(OpenAccess).delete()
s.commit()
# Get journal names and IDs
journal_access = [
{"id": id, "open_access": self._is_open_access(journal_name)}
for (id, journal_name) in s.query(Journal.id, Journal.journal_name)
.distinct()
.all()
]
logger.info(f"{len(journal_access)}")
# Store journal types
s.bulk_insert_mappings(OpenAccess, journal_access)
s.commit()
self.next(self.affiliation_type)
@step
def affiliation_type(self):
"""Find the type (industry, non-industry) of an
affiliation based on a seed list.
"""
# Connect to postgresql
s = self._create_session()
# Delete rows in AffiliationType
s.query(AffiliationType).delete()
s.commit()
logger.info(self.non_industry)
# Get affiliation names and IDs
aff_types = [
{
"id": aff.id,
"type": self._find_non_industry_affiliations(aff.affiliation),
}
for aff in s.query(Affiliation)
.filter(and_(~exists().where(Affiliation.id == AffiliationType.id)))
.all()
]
logger.info(f"Mapped {len(aff_types)} affiliations.")
# Store affiliation types
s.bulk_insert_mappings(AffiliationType, aff_types)
s.commit()
self.next(self.data_wrangling)
@step
def data_wrangling(self):
"""Cleaning data for exploratory data analysis."""
# Connect to postgresql
s = self._create_session()
# Read geocoded affiliations
self.aff_location = pd.read_sql(s.query(AffiliationLocation).statement, s.bind)
self.aff_location = self.aff_location.dropna(subset=["country"])
# Read journals, open access flag and conferences
self.journals = pd.read_sql(s.query(Journal).statement, s.bind)
self.open_access = pd.read_sql(s.query(OpenAccess).statement, s.bind)
self.conferences = pd.read_sql(s.query(Conference).statement, s.bind)
# Read Fields of Study and their metadata (level in hierarchy)
pfos = pd.read_sql(s.query(PaperFieldsOfStudy).statement, s.bind)
fos = pd.read_sql(s.query(FieldOfStudy).statement, s.bind)
self.pfos = pfos.merge(fos, left_on="field_of_study_id", right_on="id")[
["paper_id", "field_of_study_id", "name"]
]
# That's very hacky, sorry :(
self.pfos["name"] = [
self.fos_mapping[n] if n in self.fos_mapping.keys() else n
for n in self.pfos.name
]
self.fos_metadata = pd.read_sql(s.query(FosMetadata).statement, s.bind)
# Data wrangling
self.data = clean_data(s)
self.aff_papers, self.paper_author_aff = clean_author_affiliations(s, self.data)
self.next(self.eda)
@step
def eda(self):
"""Exploratory data analysis of the CI research landscape."""
# Figure 1: Annual publication increase (base year: 2000)
annual_publication_increase(self.data)
# Figure 2: Annual sum of citations
annual_citation_sum(self.data)
# Figure 3: Publications by industry and non-industry affiliations
publications_by_affiliation_type(self.aff_papers)
# Figure 4: International collaborations: % of cross-country teams in CI, AI+CI
international_collaborations(self.paper_author_aff, self.aff_location)
# Figure 5: Industry - academia collaborations: % in CI, AI+CI
industry_non_industry_collaborations(self.paper_author_aff)
# Figure 6: Adoption of open access by CI, AI+CI
open_access_publications(self.data, self.journals, self.open_access)
# Figure 7: Field of study comparison for CI, AI+CI.
annual_fields_of_study_usage(
self.data,
self.pfos,
self.fos_metadata,
self.fos_levels,
top_n=self.top_n,
preselected_fos=[],
excluded_fos=self.excluded_fos
# preselected_fos=self.preselected_fos,
)
annual_fields_of_study_usage(
self.data,
self.pfos,
self.fos_metadata,
self.fos_levels,
top_n=self.top_n,
excluded_fos=self.excluded_fos,
preselected_fos=self.preselected_fos,
)
# Figure 8: Annual publications in conferences and journals.
papers_in_journals_and_conferences(
self.data, self.journals, self.conferences, self.top_n
)
# Figure 9: Annual publication count
annual_publication_count(self.data)
# self.next(self.end)
self.next(self.collect_references)
@step
def collect_references(self):
# Connect to postgresql
s = self._create_session()
# Read mag_papers table
df = pd.read_sql(s.query(Paper).statement, s.bind)
# Add nulls and transform to list
df["references"] = df["references"].apply(lambda x: np.nan if x == "NaN" else x)
df["references"] = df["references"].apply(
lambda x: ast.literal_eval(x) if isinstance(x, str) else np.nan
)
# Get all references
refs = []
for references in df["references"].dropna():
refs.extend(references)
logger.info(f"Unique references: {len(set(refs))}")
i = 0
# Create queries
for ids in toolz.itertoolz.partition_all(1000, set(refs)):
query = query_by_id(ids)
data = query_mag_api(
query,
self.metadata,
self.subscription_key,
)
if self.with_doi:
# Keep only papers with a DOI
results = [ents for ents in data["entities"] if "DOI" in ents.keys()]
else:
results = [ents for ents in data["entities"]]
# Store results
with open(
f"{ci_mapping.project_dir}/{self.store_path_references}_{i}.pickle",
"wb",
) as h:
pickle.dump(results, h)
logger.info(f"Number of stored results from query {i}: {len(results)}")
i += 1
self.next(self.parse_references)
@step
def parse_references(self):
"""Parse MAG responses to PostgreSQL."""
# Connect to postgresql
s = self._create_session()
# Read MAG references
data = []
for filename in glob.iglob("".join([self.references_path, "*.pickle"])):
with open(filename, "rb") as h:
data.extend(pickle.load(h))
# Collect IDs from table to ensure we're not inserting duplicates
paper_ids = {id_[0] for id_ in s.query(Reference.id)}
# Remove duplicates and keep only papers that are not already in the mag_papers table.
data = [
d for d in unique_dicts_by_value(data, "Id") if d["Id"] not in paper_ids
]
logger.info(f"Number of unique papers not existing in DB: {len(data)}")
papers = [parse_papers(response) for response in data]
logger.info(f"Completed parsing papers: {len(papers)}")
logger.info("Parsing completed!")
# Insert dicts into postgresql
s.bulk_insert_mappings(Reference, papers)
s.commit()
self.next(self.shannon_diversity)
@step
def shannon_diversity(self):
"""Calculate shannon diversity using the Fields of Study."""
# Connect to postgresql
s = self._create_session()
fos = pd.read_sql(s.query(FieldOfStudy).statement, s.bind)
pfos = pd.read_sql(s.query(PaperFieldsOfStudy).statement, s.bind)
paper = pd.read_sql(s.query(Paper).statement, s.bind)
# List of fields of study names for each paper
pfos = pfos.merge(fos, left_on="field_of_study_id", right_on="id")[
["paper_id", "field_of_study_id", "name"]
]
grouped_pfos = pd.DataFrame(
pfos.groupby("paper_id").name.agg(list)
).reset_index()
paper = paper.merge(grouped_pfos, left_on="id", right_on="paper_id")
# Create an empty dataframe with all the FoS as column names
fos = set(flatten_lists(paper["name"].to_list()))
fos = pd.DataFrame(columns=fos)
# Fill the count vectors
for year, fields_of_study in (
paper[["year", "name"]].groupby("year").name.agg(list).iteritems()
):
annual_count_vector = Counter(flatten_lists(fields_of_study))
for field_of_study, count in annual_count_vector.items():
fos.loc[year, field_of_study] = count
fos = fos.fillna(0)
# Calculate annual shannon diversity
df = pd.DataFrame()
for i, year in enumerate(fos.index):
df.loc[i, "Shannon diversity index"] = shannon(fos.loc[year].values)
df.loc[i, "Year"] = year
plot_shannon_diversity(df)
self.next(self.end)
@step
def end(self):
"""Gracefully exit metaflow."""
logger.info("Tasks completed.")
if __name__ == "__main__":
CollectiveIntelligenceFlow()
|
{"hexsha": "cfd804493f80c70784d34733c23b3157b1e6de45", "size": 26074, "ext": "py", "lang": "Python", "max_stars_repo_path": "ci_mapping/run_pipeline.py", "max_stars_repo_name": "kstathou/ci_mapping", "max_stars_repo_head_hexsha": "73ba03a67d76bf5f37e4507674e60737ad3d37ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ci_mapping/run_pipeline.py", "max_issues_repo_name": "kstathou/ci_mapping", "max_issues_repo_head_hexsha": "73ba03a67d76bf5f37e4507674e60737ad3d37ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ci_mapping/run_pipeline.py", "max_forks_repo_name": "kstathou/ci_mapping", "max_forks_repo_head_hexsha": "73ba03a67d76bf5f37e4507674e60737ad3d37ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7668038409, "max_line_length": 94, "alphanum_fraction": 0.618470507, "include": true, "reason": "import numpy", "num_tokens": 5856}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by Hao Luo at 1/25/21
"""paper_plot_fig3.py
:description : script
:param :
:returns:
:rtype:
"""
import os
import matplotlib
import numpy as np
import pandas as pd
matplotlib.rc('font', family="Arial")
matplotlib.rcParams["font.family"] = 'Arial' # 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['Arial']
os.chdir('../../ComplementaryData/Step_04_Pan_Core_model/')
print('----- loading data -----')
output_table = 'models_df_products_2.tsv'
her_name = [
'LR1',
'LR10', 'LR11', 'LR12', 'LR13', 'LR14',
'LR17', 'LR18', 'LR19', 'LR2', 'LR3',
'LR4', 'LR6', 'LR7', 'LR8', 'LR9'
]
omn_name = ['JCM1112', 'MM2_3', 'MM4_1A', 'CF48_3A', 'SD2112', 'I5007', 'ATCC53608', 'DSM200016', 'IRT', 'TD1', 'mlc3',
'100_23', '20_2', '3c6', 'lpuph',]
sour_name = ['LTH5448', 'TMW1_112', 'TMW1_656', 'LTH2584']
# models_df_products_1 = pd.read_csv(output_table, sep='\t', index_col=0)
# models_df_products_2 = pd.read_csv('models_df_products_.tsv', sep='\t', index_col=0)
# models_df_products = models_df_products_1.merge(models_df_products_2[['model_id','hista_c','dhap_c' ,'mthgxl_c', '12ppd__R_c']],
# how='left', on='model_id')
# models_df_products.to_csv(output_table, sep='\t')
models_df_products = pd.read_csv(output_table, sep='\t', index_col=0)
models_df_products['group'] = 'her'
models_df_products.loc[models_df_products['model_id'].isin(omn_name), ['group']] = 'omn'
models_df_products.loc[models_df_products['model_id'].isin(sour_name), ['group']] = 'sou'
models_df_products = models_df_products.sort_values(by=['group', ])
# Index(['model_id', 'growth', 'reaset', 'metset', 'genset', 'lac__L_c', 'ac_c',
# 'etoh_c', 'hista_c', 'fol_c', 'adeadocbl_c', 'ppoh_c', '13ppd_c',
# 'dhap_c', 'mthgxl_c', '12ppd__R_c', 'group'],
# dtype='object')
products = ['lac__L_c', 'ac_c', 'etoh_c', 'hista_c', 'dhap_c', '12ppd__R_c', '13ppd_c', ]
results = {'her': [], 'omn': [], 'sou': []}
for group_i in results.keys():
df_temp = models_df_products[(models_df_products['group'] == group_i)]
len_i = df_temp.shape[0]
for product_i in products:
postive_i = df_temp[df_temp[product_i] > 0.1].shape[0] / len_i
results[group_i].append(postive_i)
print(results)
products_plot_df = pd.DataFrame.from_dict(data=results, orient='index', columns=products)
# %%
import matplotlib.pyplot as plt
colors = plt.cm.get_cmap('Set2').colors
colors = np.array(colors)
w = 0.35
for product_i in products:
fig, axs = plt.subplots(1, 3, figsize=(3 * w, w))
fig.patch.set_alpha(0)
axs[0].pie([products_plot_df[product_i][0], 1.0 - products_plot_df[product_i][0]], startangle=90,
colors=colors[[0, -1]], )
axs[1].pie([products_plot_df[product_i][1], 1.0 - products_plot_df[product_i][1]], startangle=90,
colors=colors[[1, -1]])
axs[2].pie([products_plot_df[product_i][2], 1.0 - products_plot_df[product_i][2]], startangle=90,
colors=colors[[2, -1]])
fig.subplots_adjust(wspace=-0.4)
fig.savefig('fig3_' + product_i + '_percent.pdf', bbox_inches='tight')
fig.show()
# %%
fig, axs = plt.subplots(4, 1, figsize=(w * 2, 3.5 * w))
fig.patch.set_alpha(0)
axs[0].pie([1, 0], startangle=180, labels=['Herbivore', ''],
colors=colors[[0, -1]], textprops={'family': 'Arial', 'size': 8})
axs[1].pie([1, 0], startangle=180, labels=['Omnivore', ''],
colors=colors[[1, -1]], textprops={'family': 'Arial', 'size': 8})
axs[2].pie([1, 0], startangle=180, labels=['Sourdough', ''],
colors=colors[[2, -1]], textprops={'family': 'Arial', 'size': 8})
axs[3].pie([0, 1], startangle=180, labels=['', 'Negative'],
colors=colors[[2, -1]], textprops={'family': 'Arial', 'size': 8})
fig.subplots_adjust(hspace=-0.1)
fig.savefig('fig3_' + 'lenged' + '_percent.pdf', bbox_inches='tight')
fig.show()
fig, axs = plt.subplots(1, 3, figsize=(w * 12, w))
fig.patch.set_alpha(0)
axs[0].pie([0.5, 0.5], startangle=90, labels=[ '','Herbivore'],
colors=colors[[-1,0 ]], textprops={'family': 'Arial', 'size': 8},)
axs[1].pie([0.5, 0.5], startangle=90, labels=[ '','Omnivore'],
colors=colors[[ -1,1]], textprops={'family': 'Arial', 'size': 8})
axs[2].pie([0.5, 0.5], startangle=90, labels=['','Sourdough' ],
colors=colors[[-1,2 ]], textprops={'family': 'Arial', 'size': 8})
# fig.subplots_adjust(hspace=-0.1)
fig.savefig('fig3_' + 'lenged_2' + '_percent.pdf', bbox_inches='tight')
fig.show()
|
{"hexsha": "cebb1d3a9b51a418eac790c2125083775f36d59b", "size": 4556, "ext": "py", "lang": "Python", "max_stars_repo_path": "ComplementaryScripts/Step_04_Pan_Core_mode/paper_plot_fig3.py", "max_stars_repo_name": "HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM", "max_stars_repo_head_hexsha": "9be6a48e7467e0c81b0b974180860d599fc9c201", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ComplementaryScripts/Step_04_Pan_Core_mode/paper_plot_fig3.py", "max_issues_repo_name": "HaoLuoChalmers/Lactobacillus_reuteri_MM41A_GEM", "max_issues_repo_head_hexsha": "9be6a48e7467e0c81b0b974180860d599fc9c201", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-19T16:00:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-19T16:00:03.000Z", "max_forks_repo_path": "ComplementaryScripts/Step_04_Pan_Core_mode/paper_plot_fig3.py", "max_forks_repo_name": "SysBioChalmers/Lactobacillus_reuteri_MM41A_GEM", "max_forks_repo_head_hexsha": "9be6a48e7467e0c81b0b974180860d599fc9c201", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9666666667, "max_line_length": 130, "alphanum_fraction": 0.6286215979, "include": true, "reason": "import numpy", "num_tokens": 1529}
|
import sys, os
sys.path.append('../src/utils/')
sys.path.append('../exps/Models/')
import numpy as np
import matplotlib as plt
from polygonal_obstacles import PolygonalObstacle as PolyObs
from viz import *
from ISS import get_ISS_zones
lims_btm, lims_up = np.array([5.,-3.5, 3.]), np.array([13., 8.5, 6.5])
keepin_zones, keepout_zones = get_ISS_zones()
# --------------------------------------------
plt.figure(1)
ax = plt.gca()
# --------------------------------------------
for obs in keepin_zones:
center, widths = obs.c, 2*np.array([obs.dx,obs.dy,obs.dz])
plot_rectangle(ax, center[:2], widths[:2], color='g')
for obs in keepout_zones:
center, widths = obs.c, 2*np.array([obs.dx,obs.dy,obs.dz])
plot_rectangle(ax, center[:2], widths[:2], color='r')
plt.xlim([lims_btm[0], lims_up[0]])
plt.ylim([lims_btm[1], lims_up[1]])
plt.draw()
plt.show()
# --------------------------------------------
|
{"hexsha": "30c8026cb1105861bca88d912e2377885b25701d", "size": 904, "ext": "py", "lang": "Python", "max_stars_repo_path": "exps/ISS_script.py", "max_stars_repo_name": "StanfordASL/ccscp", "max_stars_repo_head_hexsha": "a727dfc10d4acc43248c9a525a37279a70cecd80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-03-21T14:00:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T08:29:07.000Z", "max_issues_repo_path": "exps/ISS_script.py", "max_issues_repo_name": "StanfordASL/ccscp", "max_issues_repo_head_hexsha": "a727dfc10d4acc43248c9a525a37279a70cecd80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exps/ISS_script.py", "max_forks_repo_name": "StanfordASL/ccscp", "max_forks_repo_head_hexsha": "a727dfc10d4acc43248c9a525a37279a70cecd80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-03-20T00:02:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T08:29:09.000Z", "avg_line_length": 28.25, "max_line_length": 70, "alphanum_fraction": 0.5896017699, "include": true, "reason": "import numpy", "num_tokens": 256}
|
[STATEMENT]
lemma V_imp_Nil: "V (trn # tr) = [] \<Longrightarrow> V tr = []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. V (trn # tr) = [] \<Longrightarrow> V tr = []
[PROOF STEP]
by (cases "\<phi> trn") auto
|
{"llama_tokens": 89, "file": "Bounded_Deducibility_Security_BD_Security_TS", "length": 1}
|
# DEPENDENCIES
from globals import *
# LIBRARIES
import cv2 as cv
import numpy as np
from urllib.request import urlopen
# IMAGE CONVERSION
def pathIsURL(imagePath):
if imagePath[:4] == "http":
return True
else:
return False
def imagePathIsValid(imagePath):
if pathIsURL(imagePath):
# If its an image on an URL
try:
urlopen(imagePath)
return ""
except Exception as e:
return "Error on opening URL: " + str(e) + "."
else:
# If its an image on the computer
image = cv.imread(imagePath)
if image.all() == None:
return "Error on opening local image: File doesn't exist."
else:
return ""
def getImage(imagePath):
image = []
if pathIsURL(imagePath):
# If the image is in an URL
resp = urlopen(imagePath)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv.imdecode(image, cv.IMREAD_COLOR)
else:
# If the image is in the computer
image = cv.imread(imagePath)
return image
# ERROR CHECKING
def valueIsEmpty(values, key):
if values["-" + key + "-"] == "":
return True
else:
return False
def valueIsNumber(values, key):
try:
float(values["-" + key + "-"])
return True
except:
return False
def valueIsEvenNumber(values, key):
if valueIsNumber(values, key):
if int(values["-" + key + "-"]) % 2 == 0:
return True
return False
def valueIsGreaterThanOne(values, key):
if valueIsNumber(values, key):
return int(values["-" + key + "-"]) > 1
def valueAsBoolean(values, key):
boolean = bool(values["-" + key + "-"])
return boolean
def checkErrors(values, imagePath):
returnValue = [True, "[ERROR]"] # [0] is True or False, [1] is a string representing the error
# Image URL/Path
if valueIsEmpty(values, "imagePath"):
returnValue[1] = "Image URL/Path cannot be empty."
# Blocksize
elif valueIsEmpty(values, "blockSize") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Blocksize cannot be empty."
elif not valueIsNumber(values, "blockSize") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Blocksize needs to be a number."
elif valueIsEvenNumber(values, "blockSize") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Blocksize must be an odd number."
elif not valueIsGreaterThanOne(values, "blockSize") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Blocksize must be greater than one."
# C
elif valueIsEmpty(values, "c") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "C property cannot be empty."
elif not valueIsNumber(values, "c") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "C needs to be a number."
# Simple Threshold
elif valueIsEmpty(values, "simpleThreshold") and not valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Simple Threshold property cannot be empty."
elif not valueIsNumber(values, "simpleThreshold") and not valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Simple Threshold needs to be a number."
# Simple Threshold Max Value
elif valueIsEmpty(values, "simpleThresholdMaxValue") and not valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Simple Threshold Max Value property cannot be empty."
elif not valueIsNumber(values, "simpleThreshold") and not valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Simple Threshold Max Value needs to be a number."
# Adaptive Threshold Max Value
elif valueIsEmpty(values, "adaptiveThresholdMaxValue") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Adaptive Threshold Max Value property cannot be empty."
elif not valueIsNumber(values, "adaptiveThreshold") and valueAsBoolean(values, "adaptiveThreshold"):
returnValue[1] = "Adaptive Threshold Max Value needs to be a number."
# Delay
elif valueIsEmpty(values, "delay"):
returnValue[1] = "Delay property cannot be empty."
elif not valueIsNumber(values, "simpleThreshold"):
returnValue[1] = "Delay needs to be a number."
# If path is not valid
elif imagePathIsValid(imagePath) != "":
errorMessage = imagePathIsValid(imagePath)
returnValue[1] = errorMessage
# If no errors
else:
returnValue = [False, ""]
return returnValue
# DRAWER
def generateSimpleContours(imageGray, threshold, maxValue, type, approxMethod):
thresholdAmount, imageThresholdedNormal = cv.threshold(imageGray, threshold, maxValue, THRESHOLD_TYPES[type])
contours = cv.findContours(imageThresholdedNormal, cv.RETR_LIST, THRESHOLD_CONTOUR_APPROX_METHODS[approxMethod])[0]
return contours, imageThresholdedNormal
def generateAdaptiveContours(imageGray, maxValue, adaptiveMethod, type, blockSize, c, approxMethod):
imageThresholdedAdaptive = cv.adaptiveThreshold(imageGray, maxValue, ADAPTIVE_THRESHOLD_METHODS[adaptiveMethod], THRESHOLD_TYPES[type], blockSize, c)
contours = cv.findContours(imageThresholdedAdaptive, cv.RETR_LIST, THRESHOLD_CONTOUR_APPROX_METHODS[approxMethod])[0]
return contours, imageThresholdedAdaptive
|
{"hexsha": "a76a65c790891524b1ca9c1222875d8d35601c76", "size": 5549, "ext": "py", "lang": "Python", "max_stars_repo_path": "functions.py", "max_stars_repo_name": "418888967/AutoDrawer-1", "max_stars_repo_head_hexsha": "746dfacee3eff942a0f44d8c135463610e47fba6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-20T15:38:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-20T15:38:42.000Z", "max_issues_repo_path": "functions.py", "max_issues_repo_name": "418888967/AutoDrawer-1", "max_issues_repo_head_hexsha": "746dfacee3eff942a0f44d8c135463610e47fba6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "functions.py", "max_forks_repo_name": "418888967/AutoDrawer-1", "max_forks_repo_head_hexsha": "746dfacee3eff942a0f44d8c135463610e47fba6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4104477612, "max_line_length": 154, "alphanum_fraction": 0.6626419175, "include": true, "reason": "import numpy", "num_tokens": 1272}
|
// Copyright Louis Dionne 2013-2016
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
#include <boost/hana/type.hpp>
#include <type_traits>
namespace hana = boost::hana;
// Makes sure that `hana::type`s have a nested ::type alias
struct T;
static_assert(std::is_same<decltype(hana::type_c<T>)::type, T>{}, "");
static_assert(std::is_same<hana::type<T>::type, T>{}, "");
int main() { }
|
{"hexsha": "b1ef119832d59358e70cf72f8e6a05b7c5961b14", "size": 504, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "thirdparty-cpp/boost_1_62_0/libs/hana/test/type/nested_type.cpp", "max_stars_repo_name": "nxplatform/nx-mobile", "max_stars_repo_head_hexsha": "0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-12-15T19:57:24.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-15T19:57:24.000Z", "max_issues_repo_path": "thirdparty-cpp/boost_1_62_0/libs/hana/test/type/nested_type.cpp", "max_issues_repo_name": "nxplatform/nx-mobile", "max_issues_repo_head_hexsha": "0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thirdparty-cpp/boost_1_62_0/libs/hana/test/type/nested_type.cpp", "max_forks_repo_name": "nxplatform/nx-mobile", "max_forks_repo_head_hexsha": "0dc174c893f2667377cb2ef7e5ffeb212fa8b3e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5263157895, "max_line_length": 82, "alphanum_fraction": 0.6765873016, "num_tokens": 132}
|
import numpy as np
from scipy import integrate
from .utility import read_csv, atmo_model
import matplotlib.pyplot as mpl
from stabsim.rocket import Rocket
"""
Simulates a spin-stabilized launch profile
"""
class Profile:
def __init__(self, rocket, motor, init_spin, launch_altit=0, length=0, motor_pos=0, hangle=4, timesteps=50):
# Static constants #
self.rocket = rocket # This should be from the Rocket class
self.motor = motor
self.init_spin = init_spin
self.motor_pos = motor_pos
self.aoa = hangle
# Time varying constants #
if length == 0: # assume a length==0 implies simulation should end at end of motor burn
length = self.motor.t[-1]
self.tt = np.linspace(0, length, timesteps)
self.mass = np.array([self.motor.mass(t) + self.rocket.static_params["Mass"] for t in self.tt])
# Solve eqns of motion #
z0 = [launch_altit, 0] # Initial condition
t = self.tt
def model(z0, t):
#Function that returs a list of (dxdt, dvdt) over t
# Equations based on https://www.overleaf.com/project/5fe249e8a42b0068add612ab
x, v = z0
ind = np.abs(self.tt - t).argmin()
alt = x * np.cos(np.radians(hangle))
self.rocket.update_coeffs([v], self.aoa, [alt], [self.mass[ind]])
dxdt = v
dvdt = self.motor.thrust(t) / (self.mass[ind]) + -9.80665 + \
self.drag(alt, v) / (self.mass[ind]) + self.lift(alt, v) / (self.mass[ind])
dzdt = [dxdt, dvdt]
return dzdt
z = integrate.odeint(model, z0, t)
self.altit = z[:,0] * np.cos(np.radians(hangle))
self.vel = z[:, 1]
self.rocket.update_coeffs(self.vel, self.aoa, self.altit, self.mass, single=False)
self.rho = self.rho()
def drag(self, x, v):
cd = self.rocket.get_cd()
ref_area = np.pi / 4 * (self.rocket.static_params['Diameter'] ** 2)
return -cd * ref_area * 0.5 * (v ** 2) * self.rho([x])
def lift(self, x, v):
cl = self.rocket.get_cl_alpha()
ref_area = np.pi / 4 * (self.rocket.static_params['Diameter'] ** 2)
return 0.5 * self.rho([x]) * (v ** 2) * ref_area * cl * np.sin(np.radians(self.aoa))
def temp(self, altit=-1):
if altit == -1:
altit = self.altit
return np.array([atmo_model(x)[1] for x in altit])
def rho(self, altit=-1):
if altit == -1:
altit = self.altit
return np.array([atmo_model(x)[0] for x in altit])
def iz(self):
return np.array([self.rocket.static_params["I_z"] + self.motor.iz(time) + self.motor.mass(time) * self.motor_pos**2 \
for time in self.tt])
def ix(self):
#TODO: intermediate axis from com?
return np.array([self.rocket.static_params["I_x"] + self.motor.ix(time) + self.motor.mass(time) * self.motor_pos**2 \
for time in self.tt])
"""
Gyroscopic stability criterion in radians per second
Stability of moving spinning top
"""
def gyro_stab_crit(self):
ref_area = np.pi / 4 * (self.rocket.static_params['Diameter'] ** 2)
gyro_spin_crit = self.vel / self.ix() * np.sqrt(2 * self.rho * self.iz() * ref_area * \
np.abs(self.rocket.get_cm_alpha()) * self.rocket.static_params['Diameter'])
return np.abs(gyro_spin_crit)
"""
McCoy dynamics stability criterion in radians per second
Incorporates aerodynamic effects
"""
def dynamic_stab_crit(self):
cm_alpha = np.abs(self.rocket.get_cm_alpha()) # Overturning (a.k.a. pitching/rolling) moment coeff
cl_alpha = self.rocket.get_cl_alpha() # Lift force coeff
cd = self.rocket.get_cd() # Drag coeff
cm_alpha_dot_plus_cm_q = self.rocket.get_cm_dot() # Pitch damping moment coefficient (due to rate of change of angle of attack plus tranverse angular velocity)
cm_p_alpha = self.rocket.get_cm_p_alpha() # Magnus moment coeff
ref_area = np.pi / 4 * (self.rocket.static_params['Diameter'] ** 2)
dyn_spin_crit = self.vel * np.sqrt(2 * self.rho * ref_area * self.rocket.static_params['Diameter'] * cm_alpha * self.ix()) * \
(cl_alpha - cd - ((self.mass * self.rocket.static_params['Diameter'] ** 2 / self.ix()) * (cm_alpha_dot_plus_cm_q))) / \
(2 * (self.iz() * cl_alpha + self.mass * self.rocket.static_params['Diameter'] ** 2 * cm_p_alpha))
return np.abs(dyn_spin_crit)
def spin(self):
omega0 = self.init_spin
C_spin = self.rocket.get_c_spin()
def spin_damping(omega, t, C, profile):
ind = np.abs(profile.tt - t).argmin()
ref_area = np.pi / 4 * (profile.rocket.static_params['Diameter'] ** 2)
domegadt = 0.5 * C * profile.rho[ind] * profile.vel[ind] * ref_area * omega * profile.rocket.static_params['Diameter']
return domegadt
return integrate.odeint(spin_damping, omega0, self.tt, args=(C_spin, self))
def is_stable(self):
return all(self.spin() > self.gyro_stab_crit()) and \
all(self.spin() > self.dynamic_stab_crit())
def min_spin(self):
gyro_max = np.max(self.gyro_stab_crit())
dyn_max = np.max(self.dynamic_stab_crit())
return max(gyro_max, dyn_max)
|
{"hexsha": "de2242a61e076a0c6f732d3ebfb896e366c7a7e0", "size": 5501, "ext": "py", "lang": "Python", "max_stars_repo_path": "stabsim/profile.py", "max_stars_repo_name": "roguextech/Stanford-SSI-Spaceshot-Dynamics-Aero", "max_stars_repo_head_hexsha": "ef468a0507cbf1e7927fb889984a2fb4ad0c465b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-25T00:36:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-09T21:18:45.000Z", "max_issues_repo_path": "stabsim/profile.py", "max_issues_repo_name": "roguextech/Stanford-SSI-Spaceshot-Dynamics-Aero", "max_issues_repo_head_hexsha": "ef468a0507cbf1e7927fb889984a2fb4ad0c465b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stabsim/profile.py", "max_forks_repo_name": "roguextech/Stanford-SSI-Spaceshot-Dynamics-Aero", "max_forks_repo_head_hexsha": "ef468a0507cbf1e7927fb889984a2fb4ad0c465b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-04-01T23:26:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-02T02:41:12.000Z", "avg_line_length": 42.3153846154, "max_line_length": 168, "alphanum_fraction": 0.5913470278, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1471}
|
#organizando lo datos
datos<-read.csv("Hoja de Colegios - cop_simpl.csv",dec = ",")
#transformar tipo y valores religiosos a factor
#esto facilita algunos analisis
datos$Tipo<-as.factor(datos$Tipo)
datos$Rel<-as.factor(datos$Rel)
datos <- datos[order(datos$PUNT,decreasing = TRUE),]#de mayor a menor nota
datos
library(ggplot2)
#ordenados por nota
ggplot(data = datos, mapping = aes(x = reorder(Centro, PUNT), PUNT),fill=factor(Tipo)) +
geom_bar(stat = "identity",col = "black", size = 0.25, width=0.5) + coord_flip()+theme_bw()+
ylab("puntuación") +xlab("")
table(datos$Tipo,datos$NOTA)
table(datos$Rel,datos$NOTA)
#solo los laicos llegan a tener una nota alta
#suma de valores
valores = colSums(datos[c(4:12)])
names<-c("Fem","Antibul","LGTB","Ecol","Exam","Coop","Dep","Cri","Pluril")
tabla.valores<-data.frame(names,valores)
ggplot(data = tabla.valores, mapping = aes(x = reorder(names,valores ), valores)) +
geom_bar(stat = "identity",col = "black", size = 0.5, width=0.5) + coord_flip()+theme_bw()+
ylab("número de centros") +
xlab("valores")
# overlay histogram and normal density
a<-ggplot(datos, aes(PUNT)) +
stat_function(
fun = dnorm,
args = list(mean = mean(datos$PUNT), sd = sd(datos$PUNT)),
lwd = 1,
col = 'black'
)+theme_bw()+
ylab("densidad") +
xlab("puntuación") +
ggtitle("a) Densidad")
b<-ggplot(datos, aes(PUNT)) +
geom_histogram()+theme_bw()+
ylab("frecuencia") +
xlab("puntuación") +
ggtitle("b) Histograma")
library(gridExtra)
grid.arrange(a,b,ncol=2)
shapiro.test(log(datos$PUNT))#no normality as data is skewed to the right
median(datos$PUNT)
wilcox.colegios<-wilcox.test(datos$PUNT,mu=5,exact = F)
wilcox.colegios
#http://www.sthda.com/english/wiki/one-sample-wilcoxon-signed-rank-test-in-r
library(rcompanion)
wilcoxonOneSampleR(x = datos$PUNT, mu=5)#tamaño del efecto alto
c<-ggplot(datos,aes(x=Tipo,y=PUNT))+geom_boxplot(fill="grey",outlier.shape = NA)+theme_bw()+
stat_summary(fun="mean",shape=7,size=3)+geom_jitter(width=0.05)+
ylab("puntuación") +ggtitle("a) Tipo de centro")+xlab("")
d<-ggplot(datos,aes(x=Rel,y=PUNT))+geom_boxplot(fill="grey",outlier.shape = NA)+theme_bw()+
stat_summary(fun="mean")+ylab("puntuación") +
ggtitle("b) Valores religiosos")+
stat_summary(fun="mean",shape=7,size=3)+geom_jitter(width=0.05)+xlab("")
grid.arrange(c,d,ncol=2)
kruskal.test(PUNT~Tipo,data=datos)
library(rstatix)
dunn_test(PUNT~Tipo,data=datos, p.adjust.method = "bonferroni")
kruskal_effsize(PUNT~Tipo,data=datos)
library("dplyr")
group_by(datos, Tipo) %>%
summarise(
count = n(),
mean = mean(PUNT, na.rm = TRUE),
median = median(PUNT, na.rm = TRUE),
sd = sd(PUNT, na.rm = TRUE),
se = sd(PUNT, na.rm = TRUE)/sqrt(n())
)
wilcox.test(PUNT~Rel,data=datos,exact = F)
wilcox_effsize(PUNT~Rel,data=datos)
#The effect size r is calculated as Z statistic divided by square root of the sample size
group_by(datos, Rel) %>%
summarise(
count = n(),
mean = mean(PUNT, na.rm = TRUE),
median = median(PUNT, na.rm = TRUE),
sd = sd(PUNT, na.rm = TRUE),
se = sd(PUNT, na.rm = TRUE)/sqrt(n())
)
|
{"hexsha": "b6cde69572ad6395115d872aaca80547247585a0", "size": 3163, "ext": "r", "lang": "R", "max_stars_repo_path": "colegios vitoria-gasteiz a examen.r", "max_stars_repo_name": "norberello/vitoria-gasteiz-a-examen", "max_stars_repo_head_hexsha": "b0b8ef013d7e215aaeb8572e8a4863b5a99c12cf", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "colegios vitoria-gasteiz a examen.r", "max_issues_repo_name": "norberello/vitoria-gasteiz-a-examen", "max_issues_repo_head_hexsha": "b0b8ef013d7e215aaeb8572e8a4863b5a99c12cf", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "colegios vitoria-gasteiz a examen.r", "max_forks_repo_name": "norberello/vitoria-gasteiz-a-examen", "max_forks_repo_head_hexsha": "b0b8ef013d7e215aaeb8572e8a4863b5a99c12cf", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3168316832, "max_line_length": 94, "alphanum_fraction": 0.6762567183, "num_tokens": 1042}
|
\<^marker>\<open>creator "Kevin Kappelmann"\<close>
subsubsection \<open>Basic Setup\<close>
theory Transport_Natural_Functors_Base
imports
HOL.BNF_Def
HOL_Basics.HOL_Alignment_Functions
Transport_Base
begin
paragraph \<open>Summary\<close>
text \<open>Basic setup for closure proofs and simple lemmas.\<close>
text \<open>In the following, we willingly use granular apply-style proofs since,
in practice, these theorems have to be automatically generated whenever we
declare a new natural functor.
Note that "HOL-Library" provides a command \<open>bnf_axiomatization\<close> which allows
one to axiomatically declare a bounded natural functor. However, we only need a
subset of these axioms - the boundedness of the functor is irrelevant for our
purposes. For this reason - and the sake of completeness - we state all the
required axioms explicitly below.\<close>
lemma Grp_UNIV_eq_eq_comp: "BNF_Def.Grp UNIV f = (=) \<circ> f"
by (intro ext) (auto elim: GrpE intro: GrpI)
lemma eq_comp_rel_comp_eq_comp: "(=) \<circ> f \<circ>\<circ> R = R \<circ> f"
by (intro ext) auto
lemma Domain_Collect_case_prod_eq_Collect_in_dom:
"Domain {(x, y). R x y} = {x. in_dom R x}"
by blast
lemma ball_in_dom_iff_ball_ex:
"(\<forall>x \<in> S. in_dom R x) \<longleftrightarrow> (\<forall>x \<in> S. \<exists>y. R x y)"
by blast
lemma pair_mem_Collect_case_prod_iff: "(x, y) \<in> {(x, y). R x y} \<longleftrightarrow> R x y"
by blast
paragraph \<open>Natural Functor Axiomatisation\<close>
typedecl ('d, 'a, 'b, 'c) F
consts Fmap :: "('a1 \<Rightarrow> 'a2) \<Rightarrow> ('b1 \<Rightarrow> 'b2) \<Rightarrow> ('c1 \<Rightarrow> 'c2) \<Rightarrow>
('d, 'a1, 'b1, 'c1) F \<Rightarrow> ('d, 'a2, 'b2, 'c2) F"
Fset1 :: "('d, 'a, 'b, 'c) F \<Rightarrow> 'a set"
Fset2 :: "('d, 'a, 'b, 'c) F \<Rightarrow> 'b set"
Fset3 :: "('d, 'a, 'b, 'c) F \<Rightarrow> 'c set"
axiomatization
where Fmap_id: "Fmap id id id = id"
and Fmap_comp: "\<And>f1 f2 f3 g1 g2 g3.
Fmap (g1 \<circ> f1) (g2 \<circ> f2) (g3 \<circ> f3) = Fmap g1 g2 g3 \<circ> Fmap f1 f2 f3"
and Fmap_cong: "\<And>f1 f2 f3 g1 g2 g3 x.
(\<And>x1. x1 \<in> Fset1 x \<Longrightarrow> f1 x1 = g1 x1) \<Longrightarrow>
(\<And>x2. x2 \<in> Fset2 x \<Longrightarrow> f2 x2 = g2 x2) \<Longrightarrow>
(\<And>x3. x3 \<in> Fset3 x \<Longrightarrow> f3 x3 = g3 x3) \<Longrightarrow>
Fmap f1 f2 f3 x = Fmap g1 g2 g3 x"
and Fset1_natural: "\<And>f1 f2 f3. Fset1 \<circ> Fmap f1 f2 f3 = image f1 \<circ> Fset1"
and Fset2_natural: "\<And>f1 f2 f3. Fset2 \<circ> Fmap f1 f2 f3 = image f2 \<circ> Fset2"
and Fset3_natural: "\<And>f1 f2 f3. Fset3 \<circ> Fmap f1 f2 f3 = image f3 \<circ> Fset3"
lemma Fmap_id_eq_self: "Fmap id id id x = x"
by (subst Fmap_id, subst id_eq_self, rule refl)
lemma Fmap_comp_eq_Fmap_Fmap:
"Fmap (g1 \<circ> f1) (g2 \<circ> f2) (g3 \<circ> f3) x = Fmap g1 g2 g3 (Fmap f1 f2 f3 x)"
by (fact fun_cong[OF Fmap_comp, simplified comp_eq])
lemma Fset1_Fmap_eq_image_Fset1: "Fset1 (Fmap f1 f2 f3 x) = f1 ` Fset1 x"
by (fact fun_cong[OF Fset1_natural, simplified comp_eq])
lemma Fset2_Fmap_eq_image_Fset2: "Fset2 (Fmap f1 f2 f3 x) = f2 ` Fset2 x"
by (fact fun_cong[OF Fset2_natural, simplified comp_eq])
lemma Fset3_Fmap_eq_image_Fset3: "Fset3 (Fmap f1 f2 f3 x) = f3 ` Fset3 x"
by (fact fun_cong[OF Fset3_natural, simplified comp_eq])
lemmas Fset_Fmap_eqs = Fset1_Fmap_eq_image_Fset1 Fset2_Fmap_eq_image_Fset2
Fset3_Fmap_eq_image_Fset3
paragraph \<open>Relator\<close>
definition Frel :: "('a1 \<Rightarrow> 'a2 \<Rightarrow> bool) \<Rightarrow> ('b1 \<Rightarrow> 'b2 \<Rightarrow> bool) \<Rightarrow> ('c1 \<Rightarrow> 'c2 \<Rightarrow> bool) \<Rightarrow>
('d, 'a1, 'b1, 'c1) F \<Rightarrow> ('d, 'a2, 'b2, 'c2) F \<Rightarrow> bool"
where "Frel R1 R2 R3 x y \<equiv> (\<exists>z.
z \<in> {x. Fset1 x \<subseteq> {(x, y). R1 x y} \<and> Fset2 x \<subseteq> {(x, y). R2 x y}
\<and> Fset3 x \<subseteq> {(x, y). R3 x y}}
\<and> Fmap fst fst fst z = x
\<and> Fmap snd snd snd z = y)"
lemma FrelI:
assumes "Fset1 z \<subseteq> {(x, y). R1 x y}"
and "Fset2 z \<subseteq> {(x, y). R2 x y}"
and "Fset3 z \<subseteq> {(x, y). R3 x y}"
and "Fmap fst fst fst z = x"
and "Fmap snd snd snd z = y"
shows "Frel R1 R2 R3 x y"
apply (subst Frel_def)
apply (intro exI conjI CollectI)
apply (fact assms)+
done
lemma FrelE:
assumes "Frel R1 R2 R3 x y"
obtains z where "Fset1 z \<subseteq> {(x, y). R1 x y}" "Fset2 z \<subseteq> {(x, y). R2 x y}"
"Fset3 z \<subseteq> {(x, y). R3 x y}" "Fmap fst fst fst z = x" "Fmap snd snd snd z = y"
apply (insert assms)
apply (subst (asm) Frel_def)
apply (elim exE CollectE conjE)
apply assumption
done
lemma Grp_UNIV_Fmap_eq_Frel_Grp: "BNF_Def.Grp UNIV (Fmap f1 f2 f3) =
Frel (BNF_Def.Grp UNIV f1) (BNF_Def.Grp UNIV f2) (BNF_Def.Grp UNIV f3)"
apply (intro ext iffI)
apply (rule FrelI[where
?z="Fmap (BNF_Def.convol id f1) (BNF_Def.convol id f2) (BNF_Def.convol id f3) _"])
apply (subst Fset_Fmap_eqs,
rule image_subsetI,
rule convol_mem_GrpI[simplified Fun_id_eq_id],
rule UNIV_I)+
apply (unfold Fmap_comp_eq_Fmap_Fmap[symmetric]
fst_convol[simplified Fun_comp_eq_comp]
snd_convol[simplified Fun_comp_eq_comp]
Fmap_id id_eq_self)
apply (rule refl)
apply (subst (asm) Grp_UNIV_eq_eq_comp)
apply (subst (asm) comp_eq)
apply assumption
apply (erule FrelE)
apply hypsubst
apply (subst Grp_UNIV_eq_eq_comp)
apply (subst comp_eq)
apply (subst Fmap_comp_eq_Fmap_Fmap[symmetric])
apply (rule Fmap_cong;
rule Collect_case_prod_Grp_eqD[simplified Fun_comp_eq_comp],
drule rev_subsetD,
assumption+)
done
lemma Frel_Grp_UNIV_Fmap:
"Frel (BNF_Def.Grp UNIV f1) (BNF_Def.Grp UNIV f2) (BNF_Def.Grp UNIV f3)
x (Fmap f1 f2 f3 x)"
apply (subst Grp_UNIV_Fmap_eq_Frel_Grp[symmetric])
apply (subst Grp_UNIV_eq_eq_comp)
apply (subst comp_eq)
apply (rule refl)
done
lemma Frel_Grp_UNIV_iff_eq_Fmap:
"Frel (BNF_Def.Grp UNIV f1) (BNF_Def.Grp UNIV f2) (BNF_Def.Grp UNIV f3) x y \<longleftrightarrow>
(y = Fmap f1 f2 f3 x)"
by (subst eq_commute[of y])
(fact fun_cong[OF fun_cong[OF Grp_UNIV_Fmap_eq_Frel_Grp],
simplified Grp_UNIV_eq_eq_comp comp_eq, folded Grp_UNIV_eq_eq_comp, symmetric])
lemma Frel_eq: "Frel (=) (=) (=) = (=)"
apply (unfold BNF_Def.eq_alt[simplified Fun_id_eq_id])
apply (subst Grp_UNIV_Fmap_eq_Frel_Grp[symmetric])
apply (subst Fmap_id)
apply (fold BNF_Def.eq_alt[simplified Fun_id_eq_id])
apply (rule refl)
done
corollary Frel_eq_self: "Frel (=) (=) (=) x x"
by (fact iffD2[OF fun_cong[OF fun_cong[OF Frel_eq]] refl])
lemma Frel_mono_strong:
assumes "Frel R1 R2 R3 x y"
and "\<And>x1 y1. x1 \<in> Fset1 x \<Longrightarrow> y1 \<in> Fset1 y \<Longrightarrow> R1 x1 y1 \<Longrightarrow> S1 x1 y1"
and "\<And>x2 y2. x2 \<in> Fset2 x \<Longrightarrow> y2 \<in> Fset2 y \<Longrightarrow> R2 x2 y2 \<Longrightarrow> S2 x2 y2"
and "\<And>x3 y3. x3 \<in> Fset3 x \<Longrightarrow> y3 \<in> Fset3 y \<Longrightarrow> R3 x3 y3 \<Longrightarrow> S3 x3 y3"
shows "Frel S1 S2 S3 x y"
apply (insert assms(1))
apply (erule FrelE)
apply (rule FrelI)
apply (rule subsetI,
frule rev_subsetD,
assumption,
frule imageI[of _ "Fset1 _" fst]
imageI[of _ "Fset2 _" fst]
imageI[of _ "Fset3 _" fst],
drule imageI[of _ "Fset1 _" snd]
imageI[of _ "Fset2 _" snd]
imageI[of _ "Fset3 _" snd],
(subst (asm) Fset_Fmap_eqs[symmetric])+,
intro CollectI case_prodI2,
rule assms;
hypsubst,
unfold fst_conv snd_conv,
(elim CollectE case_prodE Pair_inject, hypsubst)?,
assumption)+
apply assumption+
done
corollary Frel_mono:
assumes "R1 \<le> S1" "R2 \<le> S2" "R3 \<le> S3"
shows "Frel R1 R2 R3 \<le> Frel S1 S2 S3"
apply (intro le_relI)
apply (rule Frel_mono_strong)
apply assumption
apply (insert assms)
apply (drule le_relD[OF assms(1)] le_relD[OF assms(2)] le_relD[OF assms(3)],
assumption)+
done
lemma Frel_refl_strong:
assumes "\<And>x1. x1 \<in> Fset1 x \<Longrightarrow> R1 x1 x1"
and "\<And>x2. x2 \<in> Fset2 x \<Longrightarrow> R2 x2 x2"
and "\<And>x3. x3 \<in> Fset3 x \<Longrightarrow> R3 x3 x3"
shows "Frel R1 R2 R3 x x"
by (rule Frel_mono_strong[OF Frel_eq_self[of x]];
drule assms, hypsubst, assumption)
lemma Frel_cong:
assumes "\<And>x1 y1. x1 \<in> Fset1 x \<Longrightarrow> y1 \<in> Fset1 y \<Longrightarrow> R1 x1 y1 \<longleftrightarrow> R1' x1 y1"
and "\<And>x2 y2. x2 \<in> Fset2 x \<Longrightarrow> y2 \<in> Fset2 y \<Longrightarrow> R2 x2 y2 \<longleftrightarrow> R2' x2 y2"
and "\<And>x3 y3. x3 \<in> Fset3 x \<Longrightarrow> y3 \<in> Fset3 y \<Longrightarrow> R3 x3 y3 \<longleftrightarrow> R3' x3 y3"
shows "Frel R1 R2 R3 x y = Frel R1' R2' R3' x y"
by (rule iffI;
rule Frel_mono_strong,
assumption;
rule iffD1[OF assms(1)] iffD1[OF assms(2)] iffD1[OF assms(3)]
iffD2[OF assms(1)] iffD2[OF assms(2)] iffD2[OF assms(3)];
assumption)
lemma Frel_rel_inv_eq_rel_inv_Frel: "Frel R1\<inverse> R2\<inverse> R3\<inverse> = (Frel R1 R2 R3)\<inverse>"
by (intro ext iffI;
unfold rel_inv_iff_rel,
erule FrelE,
hypsubst,
rule FrelI[where ?z="Fmap prod.swap prod.swap prod.swap _"];
((subst Fset_Fmap_eqs,
rule image_subsetI,
drule rev_subsetD,
assumption,
elim CollectE case_prodE,
hypsubst,
subst swap_simp,
subst pair_mem_Collect_case_prod_iff,
assumption) |
(subst Fmap_comp_eq_Fmap_Fmap[symmetric],
rule Fmap_cong;
unfold comp_eq fst_swap snd_swap,
rule refl)))
text \<open>Given the former axioms, the following axiom - subdistributivity of the
relator - is equivalent to the (F, Fmap) functor preserving weak pullbacks.\<close>
axiomatization
where Frel_comp_le_Frel_rel_comp: "\<And>R1 R2 R3 S1 S2 S3.
Frel R1 R2 R3 \<circ>\<circ> Frel S1 S2 S3 \<le> Frel (R1 \<circ>\<circ> S1) (R2 \<circ>\<circ> S2) (R3 \<circ>\<circ> S3)"
lemma fst_sndOp_eq_snd_fstOp: "fst \<circ> BNF_Def.sndOp P Q = snd \<circ> BNF_Def.fstOp P Q"
unfolding fstOp_def sndOp_def by (intro ext) simp
lemma Frel_rel_comp_le_Frel_comp:
"Frel (R1 \<circ>\<circ> S1) (R2 \<circ>\<circ> S2) (R3 \<circ>\<circ> S3) \<le> (Frel R1 R2 R3 \<circ>\<circ> Frel S1 S2 S3)"
apply (rule le_relI)
apply (erule FrelE)
apply (rule rel_compI[where ?y="Fmap (snd \<circ> BNF_Def.fstOp R1 S1)
(snd \<circ> BNF_Def.fstOp R2 S2) (snd \<circ> BNF_Def.fstOp R3 S3) _"])
apply (rule FrelI[where ?z="Fmap (BNF_Def.fstOp R1 S1)
(BNF_Def.fstOp R2 S2) (BNF_Def.fstOp R3 S3) _"])
apply (subst Fset_Fmap_eqs,
intro image_subsetI,
rule fstOp_in[unfolded relcompp_eq_rel_comp],
drule rev_subsetD,
assumption+)+
apply (subst Fmap_comp_eq_Fmap_Fmap[symmetric])
apply (fold ext[of fst "fst \<circ> _", OF fst_fstOp[unfolded Fun_comp_eq_comp]])
apply hypsubst
apply (rule refl)
apply (subst Fmap_comp_eq_Fmap_Fmap[symmetric])
apply (rule refl)
apply (rule FrelI[where ?z="Fmap (BNF_Def.sndOp R1 S1)
(BNF_Def.sndOp R2 S2) (BNF_Def.sndOp R3 S3) _"])
apply (subst Fset_Fmap_eqs,
intro image_subsetI,
rule sndOp_in[unfolded relcompp_eq_rel_comp],
drule rev_subsetD,
assumption+)+
apply (subst Fmap_comp_eq_Fmap_Fmap[symmetric])
apply (unfold fst_sndOp_eq_snd_fstOp)
apply (rule refl)
apply (subst Fmap_comp_eq_Fmap_Fmap[symmetric])
apply (fold ext[of snd "snd \<circ> _", OF snd_sndOp[unfolded Fun_comp_eq_comp]])
apply hypsubst
apply (rule refl)
done
corollary Frel_comp_eq_Frel_rel_comp:
"Frel R1 R2 R3 \<circ>\<circ> Frel S1 S2 S3 = Frel (R1 \<circ>\<circ> S1) (R2 \<circ>\<circ> S2) (R3 \<circ>\<circ> S3)"
by (rule antisym; rule Frel_comp_le_Frel_rel_comp Frel_rel_comp_le_Frel_comp)
lemma Frel_Fmap_eq1: "Frel R1 R2 R3 (Fmap f1 f2 f3 x) y =
Frel (\<lambda>x. R1 (f1 x)) (\<lambda>x. R2 (f2 x)) (\<lambda>x. R3 (f3 x)) x y"
apply (rule iffI)
apply (fold comp_eq[of R1] comp_eq[of R2] comp_eq[of R3])
apply (drule rel_compI[where ?R="Frel _ _ _" and ?S="Frel _ _ _",
OF Frel_Grp_UNIV_Fmap])
apply (unfold Grp_UNIV_eq_eq_comp)
apply (drule le_relD[OF Frel_comp_le_Frel_rel_comp])
apply (unfold eq_comp_rel_comp_eq_comp)
apply assumption
apply (fold eq_comp_rel_comp_eq_comp[where ?R=R1]
eq_comp_rel_comp_eq_comp[where ?R=R2]
eq_comp_rel_comp_eq_comp[where ?R=R3]
Grp_UNIV_eq_eq_comp)
apply (drule le_relD[OF Frel_rel_comp_le_Frel_comp])
apply (erule rel_compE)
apply (subst (asm) Frel_Grp_UNIV_iff_eq_Fmap)
apply hypsubst
apply assumption
done
lemma Frel_Fmap_eq2: "Frel R1 R2 R3 x (Fmap g1 g2 g3 y) =
Frel (\<lambda>x y. R1 x (g1 y)) (\<lambda>x y. R2 x (g2 y)) (\<lambda>x y. R3 x (g3 y)) x y"
apply (subst rel_inv_iff_rel[of "Frel _ _ _", symmetric])
apply (subst Frel_rel_inv_eq_rel_inv_Frel[symmetric])
apply (subst Frel_Fmap_eq1)
apply (rule sym)
apply (subst rel_inv_iff_rel[of "Frel _ _ _", symmetric])
apply (subst Frel_rel_inv_eq_rel_inv_Frel[symmetric])
apply (unfold rel_inv_iff_rel)
apply (rule refl)
done
lemmas Frel_Fmap_eqs = Frel_Fmap_eq1 Frel_Fmap_eq2
paragraph \<open>Predicator\<close>
definition Fpred :: "('a \<Rightarrow> bool) \<Rightarrow> ('b \<Rightarrow> bool) \<Rightarrow> ('c \<Rightarrow> bool) \<Rightarrow>
('d, 'a, 'b, 'c) F \<Rightarrow> bool"
where "Fpred P1 P2 P3 x \<equiv> Frel ((=)\<restriction>\<^bsub>P1\<^esub>) ((=)\<restriction>\<^bsub>P2\<^esub>) ((=)\<restriction>\<^bsub>P3\<^esub>) x x"
lemma Fpred_mono_strong:
assumes "Fpred P1 P2 P3 x"
and "\<And>x1. x1 \<in> Fset1 x \<Longrightarrow> P1 x1 \<Longrightarrow> Q1 x1"
and "\<And>x2. x2 \<in> Fset2 x \<Longrightarrow> P2 x2 \<Longrightarrow> Q2 x2"
and "\<And>x3. x3 \<in> Fset3 x \<Longrightarrow> P3 x3 \<Longrightarrow> Q3 x3"
shows "Fpred Q1 Q2 Q3 x"
apply (insert assms(1))
apply (unfold Fpred_def)
apply (rule Frel_mono_strong,
assumption;
erule restrict_leftE,
rule restrict_leftI,
assumption,
rule assms,
assumption+)
done
lemma Fpred_top: "Fpred \<top> \<top> \<top> x"
apply (subst Fpred_def)
apply (rule Frel_refl_strong;
subst restrict_left_top_eq,
rule refl)
done
lemma FpredI:
assumes "\<And>x1. x1 \<in> Fset1 x \<Longrightarrow> P1 x1"
and "\<And>x2. x2 \<in> Fset2 x \<Longrightarrow> P2 x2"
and "\<And>x3. x3 \<in> Fset3 x \<Longrightarrow> P3 x3"
shows "Fpred P1 P2 P3 x"
using assms by (rule Fpred_mono_strong[OF Fpred_top])
lemma FpredE:
assumes "Fpred P1 P2 P3 x"
obtains "\<And>x1. x1 \<in> Fset1 x \<Longrightarrow> P1 x1"
"\<And>x2. x2 \<in> Fset2 x \<Longrightarrow> P2 x2"
"\<And>x3. x3 \<in> Fset3 x \<Longrightarrow> P3 x3"
by (elim meta_impE; (assumption |
insert assms,
subst (asm) Fpred_def,
erule FrelE,
hypsubst,
subst (asm) Fset_Fmap_eqs,
subst (asm) Domain_fst[symmetric],
drule rev_subsetD,
rule Domain_mono,
assumption,
unfold Domain_Collect_case_prod_eq_Collect_in_dom in_dom_restrict_left_eq,
elim CollectE inf1E,
assumption))
lemma Fpred_eq_ball: "Fpred P1 P2 P3 =
(\<lambda>x. Ball (Fset1 x) P1 \<and> Ball (Fset2 x) P2 \<and> Ball (Fset3 x) P3)"
by (intro ext iffI conjI ballI FpredI; elim FpredE conjE bspec)
lemma Fpred_Fmap_eq:
"Fpred P1 P2 P3 (Fmap f1 f2 f3 x) = Fpred (P1 \<circ> f1) (P2 \<circ> f2) (P3 \<circ> f3) x"
by (unfold Fpred_def Frel_Fmap_eqs)
(rule iffI;
erule FrelE,
hypsubst,
unfold Frel_Fmap_eqs,
rule Frel_refl_strong;
rule restrict_leftI,
rule refl,
drule rev_subsetD,
assumption,
elim CollectE case_prodE restrict_leftE,
hypsubst,
unfold comp_eq fst_conv,
assumption)
lemma Fpred_in_dom_if_in_dom_Frel:
assumes "in_dom (Frel R1 R2 R3) x"
shows "Fpred (in_dom R1) (in_dom R2) (in_dom R3) x"
apply (insert assms)
apply (elim in_domE FrelE)
apply hypsubst
apply (subst Fpred_Fmap_eq)
apply (rule FpredI;
drule rev_subsetD,
assumption,
elim CollectE case_prodE,
hypsubst,
unfold comp_eq fst_conv,
rule in_domI,
assumption)
done
lemma in_dom_Frel_if_Fpred_in_dom:
assumes "Fpred (in_dom R1) (in_dom R2) (in_dom R3) x"
shows "in_dom (Frel R1 R2 R3) x"
apply (insert assms)
apply (subst (asm) Fpred_eq_ball)
apply (elim conjE)
apply (subst (asm) ball_in_dom_iff_ball_ex,
drule bchoice, \<comment>\<open>requires the axiom of choice.\<close>
erule exE)+
apply (rule in_domI[where ?x=x and ?y="Fmap _ _ _ x" for x])
apply (subst Frel_Fmap_eq2)
apply (rule Frel_refl_strong)
apply (drule bspec[of "Fset1 _"])
apply assumption+
apply (drule bspec[of "Fset2 _"])
apply assumption+
apply (drule bspec[of "Fset3 _"])
apply assumption+
done
lemma in_dom_Frel_eq_Fpred_in_dom:
"in_dom (Frel R1 R2 R3) = Fpred (in_dom R1) (in_dom R2) (in_dom R3)"
by (intro ext iffI; rule Fpred_in_dom_if_in_dom_Frel in_dom_Frel_if_Fpred_in_dom)
lemma in_codom_Frel_eq_Fpred_in_codom:
"in_codom (Frel R1 R2 R3) = Fpred (in_codom R1) (in_codom R2) (in_codom R3)"
apply (subst in_dom_rel_inv_eq_in_codom[symmetric])
apply (subst Frel_rel_inv_eq_rel_inv_Frel[symmetric])
apply (subst in_dom_Frel_eq_Fpred_in_dom)
apply (subst in_dom_rel_inv_eq_in_codom)+
apply (rule refl)
done
lemma in_field_Frel_eq_Fpred_in_in_field:
"in_field (Frel R1 R2 R3) =
Fpred (in_dom R1) (in_dom R2) (in_dom R3) \<squnion>
Fpred (in_codom R1) (in_codom R2) (in_codom R3)"
apply (subst in_field_eq_in_dom_sup_in_codom)
apply (subst in_dom_Frel_eq_Fpred_in_dom)
apply (subst in_codom_Frel_eq_Fpred_in_codom)
apply (rule refl)
done
lemma Frel_restrict_left_Fpred_eq_Frel_restrict_left:
fixes R1 :: "'a1 \<Rightarrow> 'a2 \<Rightarrow> bool"
and R2 :: "'b1 \<Rightarrow> 'b2 \<Rightarrow> bool"
and R3 :: "'c1 \<Rightarrow> 'c2 \<Rightarrow> bool"
and P1 :: "'a1 \<Rightarrow> bool"
and P2 :: "'b1 \<Rightarrow> bool"
and P3 :: "'c1 \<Rightarrow> bool"
shows "(Frel R1 R2 R3 :: ('d, 'a1, 'b1, 'c1) F \<Rightarrow> _)\<restriction>\<^bsub>Fpred P1 P2 P3 :: ('d, 'a1, 'b1, 'c1) F \<Rightarrow> _\<^esub> =
Frel (R1\<restriction>\<^bsub>P1\<^esub>) (R2\<restriction>\<^bsub>P2\<^esub>) (R3\<restriction>\<^bsub>P3\<^esub>)"
apply (intro ext)
apply (rule iffI)
apply (erule restrict_leftE)
apply (elim FpredE)
apply (rule Frel_mono_strong,
assumption;
rule restrict_leftI,
assumption+)
apply (rule restrict_leftI)
apply (rule Frel_mono_strong,
assumption;
erule restrict_leftE,
assumption)
apply (drule in_domI[of "Frel (R1\<restriction>\<^bsub>P1\<^esub>) (R2\<restriction>\<^bsub>P2\<^esub>) (R3\<restriction>\<^bsub>P3\<^esub>)"])
apply (drule Fpred_in_dom_if_in_dom_Frel)
apply (rule Fpred_mono_strong,
assumption;
unfold in_dom_restrict_left_eq inf_apply inf_bool_def;
rule conjunct2,
assumption)
done
lemma Frel_restrict_right_Fpred_eq_Frel_restrict_right:
fixes R1 :: "'a1 \<Rightarrow> 'a2 \<Rightarrow> bool"
and R2 :: "'b1 \<Rightarrow> 'b2 \<Rightarrow> bool"
and R3 :: "'c1 \<Rightarrow> 'c2 \<Rightarrow> bool"
and P1 :: "'a2 \<Rightarrow> bool"
and P2 :: "'b2 \<Rightarrow> bool"
and P3 :: "'c2 \<Rightarrow> bool"
shows "(Frel R1 R2 R3 :: _ \<Rightarrow> ('d, 'a2, 'b2, 'c2) F \<Rightarrow> _)\<upharpoonleft>\<^bsub>Fpred P1 P2 P3 :: ('d, 'a2, 'b2, 'c2) F \<Rightarrow> _\<^esub> =
Frel (R1\<upharpoonleft>\<^bsub>P1\<^esub>) (R2\<upharpoonleft>\<^bsub>P2\<^esub>) (R3\<upharpoonleft>\<^bsub>P3\<^esub>)"
apply (subst restrict_right_eq)
apply (subst Frel_rel_inv_eq_rel_inv_Frel[symmetric])
apply (subst Frel_restrict_left_Fpred_eq_Frel_restrict_left)
apply (subst Frel_rel_inv_eq_rel_inv_Frel[symmetric])
apply (fold restrict_right_eq)
apply (rule refl)
done
locale transport_natural_functor =
t1 : transport L1 R1 l1 r1 + t2 : transport L2 R2 l2 r2 +
t3 : transport L3 R3 l3 r3
for L1 :: "'a1 \<Rightarrow> 'a1 \<Rightarrow> bool"
and R1 :: "'b1 \<Rightarrow> 'b1 \<Rightarrow> bool"
and l1 :: "'a1 \<Rightarrow> 'b1"
and r1 :: "'b1 \<Rightarrow> 'a1"
and L2 :: "'a2 \<Rightarrow> 'a2 \<Rightarrow> bool"
and R2 :: "'b2 \<Rightarrow> 'b2 \<Rightarrow> bool"
and l2 :: "'a2 \<Rightarrow> 'b2"
and r2 :: "'b2 \<Rightarrow> 'a2"
and L3 :: "'a3 \<Rightarrow> 'a3 \<Rightarrow> bool"
and R3 :: "'b3 \<Rightarrow> 'b3 \<Rightarrow> bool"
and l3 :: "'a3 \<Rightarrow> 'b3"
and r3 :: "'b3 \<Rightarrow> 'a3"
begin
notation L1 (infix "\<le>\<^bsub>L1\<^esub>" 50)
notation R1 (infix "\<le>\<^bsub>R1\<^esub>" 50)
notation L2 (infix "\<le>\<^bsub>L2\<^esub>" 50)
notation R2 (infix "\<le>\<^bsub>R2\<^esub>" 50)
notation L3 (infix "\<le>\<^bsub>L3\<^esub>" 50)
notation R3 (infix "\<le>\<^bsub>R3\<^esub>" 50)
notation t1.ge_left (infix "\<ge>\<^bsub>L1\<^esub>" 50)
notation t1.ge_right (infix "\<ge>\<^bsub>R1\<^esub>" 50)
notation t2.ge_left (infix "\<ge>\<^bsub>L2\<^esub>" 50)
notation t2.ge_right (infix "\<ge>\<^bsub>R2\<^esub>" 50)
notation t3.ge_left (infix "\<ge>\<^bsub>L3\<^esub>" 50)
notation t3.ge_right (infix "\<ge>\<^bsub>R3\<^esub>" 50)
notation t1.Galois (infix "\<^bsub>L1\<^esub>\<lessapprox>" 50)
notation t1.flip_Galois (infix "\<^bsub>R1\<^esub>\<lessapprox>" 50)
notation t2.Galois (infix "\<^bsub>L2\<^esub>\<lessapprox>" 50)
notation t2.flip_Galois (infix "\<^bsub>R2\<^esub>\<lessapprox>" 50)
notation t3.Galois (infix "\<^bsub>L3\<^esub>\<lessapprox>" 50)
notation t3.flip_Galois (infix "\<^bsub>R3\<^esub>\<lessapprox>" 50)
notation t1.ge_Galois (infix "\<greaterapprox>\<^bsub>L1\<^esub>" 50)
notation t1.flip_ge_Galois (infix "\<greaterapprox>\<^bsub>R1\<^esub>" 50)
notation t2.ge_Galois (infix "\<greaterapprox>\<^bsub>L2\<^esub>" 50)
notation t2.flip_ge_Galois (infix "\<greaterapprox>\<^bsub>R2\<^esub>" 50)
notation t3.ge_Galois (infix "\<greaterapprox>\<^bsub>L3\<^esub>" 50)
notation t3.flip_ge_Galois (infix "\<greaterapprox>\<^bsub>R3\<^esub>" 50)
notation t1.flip_inv_Galois (infix "\<^bsub>R1\<^esub>\<greaterapprox>" 50)
notation t1.flip_inv_ge_Galois (infix "\<lessapprox>\<^bsub>R1\<^esub>" 50)
notation t2.flip_inv_Galois (infix "\<^bsub>R2\<^esub>\<greaterapprox>" 50)
notation t2.flip_inv_ge_Galois (infix "\<lessapprox>\<^bsub>R2\<^esub>" 50)
notation t3.flip_inv_Galois (infix "\<^bsub>R3\<^esub>\<greaterapprox>" 50)
notation t3.flip_inv_ge_Galois (infix "\<lessapprox>\<^bsub>R3\<^esub>" 50)
notation t1.flip_flip_inv_Galois (infix "\<^bsub>L1\<^esub>\<greaterapprox>" 50)
notation t1.flip_flip_inv_ge_Galois (infix "\<lessapprox>\<^bsub>L1\<^esub>" 50)
notation t2.flip_flip_inv_Galois (infix "\<^bsub>L2\<^esub>\<greaterapprox>" 50)
notation t2.flip_flip_inv_ge_Galois (infix "\<lessapprox>\<^bsub>L2\<^esub>" 50)
notation t3.flip_flip_inv_Galois (infix "\<^bsub>L3\<^esub>\<greaterapprox>" 50)
notation t3.flip_flip_inv_ge_Galois (infix "\<lessapprox>\<^bsub>L3\<^esub>" 50)
notation t1.unit ("\<eta>\<^sub>1")
notation t1.counit ("\<epsilon>\<^sub>1")
notation t2.unit ("\<eta>\<^sub>2")
notation t2.counit ("\<epsilon>\<^sub>2")
notation t3.unit ("\<eta>\<^sub>3")
notation t3.counit ("\<epsilon>\<^sub>3")
definition "L \<equiv> Frel (\<le>\<^bsub>L1\<^esub>) (\<le>\<^bsub>L2\<^esub>) (\<le>\<^bsub>L3\<^esub>)"
lemma left_rel_eq_Frel: "L = Frel (\<le>\<^bsub>L1\<^esub>) (\<le>\<^bsub>L2\<^esub>) (\<le>\<^bsub>L3\<^esub>)"
unfolding L_def ..
definition "l \<equiv> Fmap l1 l2 l3"
lemma left_eq_Fmap: "l = Fmap l1 l2 l3"
unfolding l_def ..
context
begin
interpretation flip :
transport_natural_functor R1 L1 r1 l1 R2 L2 r2 l2 R3 L3 r3 l3 .
abbreviation "R \<equiv> flip.L"
abbreviation "r \<equiv> flip.l"
lemma right_rel_eq_Frel: "R = Frel (\<le>\<^bsub>R1\<^esub>) (\<le>\<^bsub>R2\<^esub>) (\<le>\<^bsub>R3\<^esub>)"
unfolding flip.left_rel_eq_Frel ..
lemma right_eq_Fmap: "r = Fmap r1 r2 r3"
unfolding flip.left_eq_Fmap ..
lemmas transport_defs = left_rel_eq_Frel left_eq_Fmap
right_rel_eq_Frel right_eq_Fmap
end
sublocale transport L R l r .
(*FIXME: somehow the notation for the fixed parameters L and R, defined in
Order_Functions_Base.thy, is lost. We hence re-declare it here.*)
notation L (infix "\<le>\<^bsub>L\<^esub>" 50)
notation R (infix "\<le>\<^bsub>R\<^esub>" 50)
lemma unit_eq_Fmap: "\<eta> = Fmap \<eta>\<^sub>1 \<eta>\<^sub>2 \<eta>\<^sub>3"
unfolding unit_eq_comp by (simp only: right_eq_Fmap left_eq_Fmap
flip: Fmap_comp t1.unit_eq_comp t2.unit_eq_comp t3.unit_eq_comp)
interpretation flip_inv : transport_natural_functor "(\<ge>\<^bsub>R1\<^esub>)" "(\<ge>\<^bsub>L1\<^esub>)" r1 l1
"(\<ge>\<^bsub>R2\<^esub>)" "(\<ge>\<^bsub>L2\<^esub>)" r2 l2 "(\<ge>\<^bsub>R3\<^esub>)" "(\<ge>\<^bsub>L3\<^esub>)" r3 l3
rewrites "flip_inv.unit \<equiv> \<epsilon>" and "flip_inv.t1.unit \<equiv> \<epsilon>\<^sub>1"
and "flip_inv.t2.unit \<equiv> \<epsilon>\<^sub>2" and "flip_inv.t3.unit \<equiv> \<epsilon>\<^sub>3"
by (simp_all only: order_functors.flip_counit_eq_unit)
lemma counit_eq_Fmap: "\<epsilon> = Fmap \<epsilon>\<^sub>1 \<epsilon>\<^sub>2 \<epsilon>\<^sub>3"
by (fact flip_inv.unit_eq_Fmap)
lemma flip_inv_right_eq_ge_left: "flip_inv.R = (\<ge>\<^bsub>L\<^esub>)"
unfolding left_rel_eq_Frel flip_inv.right_rel_eq_Frel
by (fact Frel_rel_inv_eq_rel_inv_Frel)
interpretation flip :
transport_natural_functor R1 L1 r1 l1 R2 L2 r2 l2 R3 L3 r3 l3 .
lemma flip_inv_left_eq_ge_right: "flip_inv.L \<equiv> (\<ge>\<^bsub>R\<^esub>)"
unfolding flip.flip_inv_right_eq_ge_left .
lemma mono_wrt_rel_leftI:
assumes "((\<le>\<^bsub>L1\<^esub>) \<Rrightarrow>\<^sub>m (\<le>\<^bsub>R1\<^esub>)) l1"
and "((\<le>\<^bsub>L2\<^esub>) \<Rrightarrow>\<^sub>m (\<le>\<^bsub>R2\<^esub>)) l2"
and "((\<le>\<^bsub>L3\<^esub>) \<Rrightarrow>\<^sub>m (\<le>\<^bsub>R3\<^esub>)) l3"
shows "((\<le>\<^bsub>L\<^esub>) \<Rrightarrow>\<^sub>m (\<le>\<^bsub>R\<^esub>)) l"
apply (unfold left_rel_eq_Frel right_rel_eq_Frel left_eq_Fmap)
apply (rule dep_mono_wrt_relI)
apply (unfold Frel_Fmap_eqs)
apply (fold rel_map_eq)
apply (rule le_relD[OF Frel_mono])
apply (subst mono_wrt_rel_iff_le_rel_map[symmetric], rule assms)+
apply assumption
done
end
end
|
{"author": "kappelmann", "repo": "transport-isabelle", "sha": "b6d2cb56ea4abf6e496d1c258d5b3d2a816d75ff", "save_path": "github-repos/isabelle/kappelmann-transport-isabelle", "path": "github-repos/isabelle/kappelmann-transport-isabelle/transport-isabelle-b6d2cb56ea4abf6e496d1c258d5b3d2a816d75ff/Transport/Natural_Functors/Transport_Natural_Functors_Base.thy"}
|
import numpy as np
class RegionEverywhere:
def __call__(self,xyz):
return np.ones(xyz.shape[0],dtype='bool')
class RegionFunction:
def __init__(self,function):
self.function = function
def __call__(self,xyz):
return self.function(xyz)
|
{"hexsha": "45567e04a837654c17e3d455e9d0f3ceea5a02e7", "size": 272, "ext": "py", "lang": "Python", "max_stars_repo_path": "LoopStructural/utils/regions.py", "max_stars_repo_name": "wgorczyk/LoopStructural", "max_stars_repo_head_hexsha": "bedc7abd4c1868fdbd6ed659c8d72ef19f793875", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LoopStructural/utils/regions.py", "max_issues_repo_name": "wgorczyk/LoopStructural", "max_issues_repo_head_hexsha": "bedc7abd4c1868fdbd6ed659c8d72ef19f793875", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LoopStructural/utils/regions.py", "max_forks_repo_name": "wgorczyk/LoopStructural", "max_forks_repo_head_hexsha": "bedc7abd4c1868fdbd6ed659c8d72ef19f793875", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7272727273, "max_line_length": 49, "alphanum_fraction": 0.6801470588, "include": true, "reason": "import numpy", "num_tokens": 67}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import json
import cv2
import os
import math
import time
from pycocotools import coco
from .utils.image import color_aug, get_affine_transform, affine_transform, gaussian_radius, draw_umich_gaussian
class Dataset(torch.utils.data.Dataset):
def __init__(self, opt, split):
super(Dataset, self).__init__()
self.opt = opt
self.split = split
self.img_dir = os.path.join(self.opt.data_dir, split)
self.annot_path = os.path.join(
self.opt.data_dir, 'annotations', f'instances_{split}.json')
self.max_objs = 256
self.class_name = opt.dataset_info["class_name"]
self.class_nums = len(self.class_name)
self._valid_ids = opt.dataset_info["valid_ids"]
self.mean = opt.dataset_info["mean"]
self.std = opt.dataset_info["std"]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
# print(self.cat_ids) # {1: 0, 2: 1, 3: 2, 4: 3, 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
print(f'==> initializing visdrone {split} data.')
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
print(f'Loaded {split} {len(self.images)} samples')
def __len__(self):
return len(self.images)
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img = cv2.imread(os.path.join(self.img_dir, file_name))
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
# print('test1.png',img.shape,img.shape[1]/img.shape[0])
# cv2.imwrite('test1.png',img)
height, width = img.shape[0], img.shape[1]
center = np.array([width / 2., height / 2.], dtype=np.float32)
input_h = self.opt.input_h
input_w = self.opt.input_w
output_h = input_h // 2 # down ratio = 2
output_w = input_w // 2
scale = max(height, width) * 1.0
rot = 0
flipped = False
if self.split == 'train':
# clip 限制在min, max
cf = self.opt.shift
center[0] += scale * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
center[1] += scale * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
sf = self.opt.scale
scale += scale * np.clip(np.random.randn()*sf, - sf, sf)
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, - rf, rf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
center[0] = width - center[0] - 1
trans_input = get_affine_transform(center=center,
scale=scale,
rot=rot,
output_size=[input_w, input_h])
trans_output = get_affine_transform(center=center,
scale=scale,
rot=rot,
output_size=[output_w, output_h])
inp = cv2.warpAffine(src=img,
M=trans_input,
dsize=(input_w, input_h),
flags=cv2.INTER_LINEAR)
# cv2.imwrite('gs_mask_ori.png', inp)
# time.sleep(5)
# x=inp
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train':
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1) # from [H, W, C] to [C, H, W]
hm = np.zeros((self.class_nums, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32) # width and height
reg = np.zeros((self.max_objs, 2), dtype=np.float32) # regression
ind = np.zeros((self.max_objs), dtype=np.int64) # indexs
reg_mask = np.zeros((self.max_objs), dtype=np.uint8) # choose which index
for k in range(min(len(anns), self.max_objs)):
ann = anns[k]
box = ann['bbox']
# 去除ignore和other
if ann['category_id'] not in self.cat_ids.keys():
continue
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32) # xyxy
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
# prevant overflow
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
# cv2.rectangle(img=x,
# pt1=(int(bbox[0]*4), int(bbox[1]*4)),
# pt2=(int(bbox[2]*4), int(bbox[3]*4)),
# color=[0, 255, 0],
# thickness=2)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
dtype=np.float32) # float
ct_int = ct.astype(np.int32) # int
cls_id = self.cat_ids[ann['category_id']]
# 根据一元二次方程计算出最小的半径
radius = gaussian_radius(
(math.ceil(h), math.ceil(w)), min_overlap = self.opt.min_overlap)
radius = max(0, int(radius))
draw_umich_gaussian(hm[cls_id], ct_int, radius)
# 长宽
wh[k] = 1. * w, 1. * h
# 当前是 obj 序列中的第 k 个 = fmap_w * cy + cx = fmap 中的序列数
ind[k] = ct_int[1] * output_w + ct_int[0]
# 记录偏移量
reg[k] = ct - ct_int # discretization error
# 进行 mask 标记
reg_mask[k] = 1
# time.sleep(10)
ret = {'input': inp,
'hm': hm,
'wh': wh,
'reg': reg,
'reg_mask': reg_mask,
'ind': ind,
}
gs_mask = np.zeros((output_h, output_w))
for i in range(0, 10):
np.maximum(gs_mask, hm[i], out=gs_mask)
# cv2.imwrite("gs_mask.jpg", gs_mask * 255)
# time.sleep(10)
return ret
if __name__ == "__main__":
from opts import opt
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=1,
shuffle=True,
num_workers=1,
pin_memory=True,
drop_last=True
)
for iter_id, batch in enumerate(train_loader):
if iter_id % 100 == 0:
print(iter_id)
|
{"hexsha": "28dcbed7116c62bc6a514f3ddc3c529696862b52", "size": 7445, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dataset.py", "max_stars_repo_name": "idantony/centernet-visdrone", "max_stars_repo_head_hexsha": "fde315fd52b5dc3357f093675c561f1c1ea47ea3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-05-14T21:04:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T05:42:36.000Z", "max_issues_repo_path": "src/dataset.py", "max_issues_repo_name": "idantony/centernet-visdrone", "max_issues_repo_head_hexsha": "fde315fd52b5dc3357f093675c561f1c1ea47ea3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-06T05:14:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-19T23:03:32.000Z", "max_forks_repo_path": "src/dataset.py", "max_forks_repo_name": "idantony/centernet-visdrone", "max_forks_repo_head_hexsha": "fde315fd52b5dc3357f093675c561f1c1ea47ea3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-05-15T09:17:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-27T15:58:16.000Z", "avg_line_length": 35.6220095694, "max_line_length": 112, "alphanum_fraction": 0.5011417058, "include": true, "reason": "import numpy", "num_tokens": 2009}
|
import math
from typing import *
import os
import cv2
import argparse
import numpy as np
from utils import import_open_pose
import env_vars
_op = import_open_pose()
_op_wrapper = _op.WrapperPython()
params = {
"model_folder": env_vars.MODEL_LOC,
"model_pose": "COCO",
"number_people_max": 1,
"net_resolution": "-1x64"
}
def _distance(x1, y1, x2, y2):
dist = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
return dist
def _hands_rectangles(images: List, err_thresh: float, debug=False) -> List[List[_op.Rectangle]]:
_op_wrapper.configure(params)
_op_wrapper.start()
if debug:
cv2.namedWindow("body pose", cv2.WINDOW_NORMAL) # Create window with freedom of dimensions
hands = []
for img in images:
datum = _op.Datum()
datum.cvInputData = img
_op_wrapper.emplaceAndPop([datum])
x_left, y_left, _ = datum.poseKeypoints[0][7] # left wrist
x_right, y_right, _ = datum.poseKeypoints[0][4] # right wrist
x_elbow_l, y_elbow_l, score_l = datum.poseKeypoints[0][6]
x_elbow_r, y_elbow_r, score_r = datum.poseKeypoints[0][3]
len_hand_left = _distance(x_left, y_left, x_elbow_l, y_elbow_l)
len_hand_right = _distance(x_right, y_right, x_elbow_r, y_elbow_r)
x_left_shifted = max(0, x_left - len_hand_left)
y_left_shifted = max(0, y_left - len_hand_left)
x_right_shifted = max(0, x_right - len_hand_right)
y_right_shifted = max(0, y_right - len_hand_right)
rect_len_left = 2 * max(x_left - x_left_shifted, y_left - y_left_shifted) \
if score_l > err_thresh else 0
rect_len_right = 2 * max(x_right - x_right_shifted, y_right - y_right_shifted) \
if score_r > err_thresh else 0
if debug:
print((rect_len_right, rect_len_left))
cv2.imshow("body pose", datum.cvOutputData)
cv2.waitKey(0)
hand_rectangles = [
[
_op.Rectangle(x_left_shifted, y_left_shifted, rect_len_left, rect_len_left), # left hand
_op.Rectangle(x_right_shifted, y_right_shifted, rect_len_right, rect_len_right), # right hand
]
]
hands.append(hand_rectangles)
cv2.destroyAllWindows()
return hands
def hand_keypoints(images: List, debug=False, err_thresh: float = 0.1) -> List[Tuple[np.ndarray, np.ndarray]]:
"""
Uses body pose estimation to estimate lwrist & rwrist positions,
from which we use `openpose` to obtain hand keypoints vectors.
:returns a pair of (left_hand_keypoints: ndarray, right_hand_keypoints: ndarray).
:param images: a list of images to process.
:param debug: whether or not to display results via `opencv` methods.
:param err_thresh: indicates how much score is considered false positive.
"""
hands = _hands_rectangles(images, err_thresh, debug)
hand_params = {
"model_folder": env_vars.MODEL_LOC,
"model_pose": "COCO",
"number_people_max": 1,
"hand": True,
"hand_detector": 2,
"body": 0,
}
_op_wrapper.configure(hand_params)
_op_wrapper.start()
keypoints: List[Tuple[np.ndarray, np.ndarray]] = []
if debug:
cv2.namedWindow("hand key points", cv2.WINDOW_NORMAL) # Create window with freedom of dimensions
for i, img in enumerate(images):
datum = _op.Datum()
datum.cvInputData = img
datum.handRectangles = hands[i]
_op_wrapper.emplaceAndPop([datum])
ls = datum.handKeypoints[0][0] # left
rs = datum.handKeypoints[1][0] # right
# sanitization
l_avg = sum(map(lambda y: y[2], ls)) / 21.0
r_avg = sum(map(lambda y: y[2], rs)) / 21.0
if l_avg < err_thresh:
ls *= (0.0, 0.0, 1.0)
if r_avg < err_thresh:
rs *= (0.0, 0.0, 1.0)
if debug:
print("left avg: ", l_avg)
print("right avg: ", r_avg)
print("Left hand keypoints:\n", ls)
print("Right hand keypoints:\n", rs)
cv2.imshow("hand key points", datum.cvOutputData)
cv2.waitKey(0)
keypoints.append((ls, rs))
cv2.destroyAllWindows()
return keypoints
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--image_path", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
args = parser.parse_known_args()
hand_keypoints([cv2.imread(args[0].image_path)], True)
|
{"hexsha": "ee0e01681ebfc4f24eb7e2bce73edc3d72541fde", "size": 4524, "ext": "py", "lang": "Python", "max_stars_repo_path": "gesture_detection.py", "max_stars_repo_name": "ihear-io/gestures-preprocessing", "max_stars_repo_head_hexsha": "e0dcbe5cde479d526efaea353c04fd1c22a13244", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gesture_detection.py", "max_issues_repo_name": "ihear-io/gestures-preprocessing", "max_issues_repo_head_hexsha": "e0dcbe5cde479d526efaea353c04fd1c22a13244", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-20T04:02:54.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-20T15:10:11.000Z", "max_forks_repo_path": "gesture_detection.py", "max_forks_repo_name": "ihear-io/gestures-preprocessing", "max_forks_repo_head_hexsha": "e0dcbe5cde479d526efaea353c04fd1c22a13244", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7826086957, "max_line_length": 114, "alphanum_fraction": 0.633510168, "include": true, "reason": "import numpy", "num_tokens": 1224}
|
import os
import shutil
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
def violin_plot_ax(
ax,
data,
time_point=0,
violin_axis_limit=None,
time_point_annotation=False,
arms_annotations=None,
vertical=True,
annotate_total_reward=False,
):
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
def set_axis_style(vertical):
if vertical:
labels = [r"$K_{%d}$" % j for j in range(len(data))]
ax.get_xaxis().set_tick_params(direction="out")
ax.xaxis.set_ticks_position("bottom")
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
if violin_axis_limit:
ax.set_ylim(*violin_axis_limit)
# ax.set_xlabel('Sample name')
xlim = ax.get_xlim()
ax.plot((xlim[0], xlim[1]), (0, 0), color="r", lw=0.5, ls="--")
else:
labels = [r"$K_{%d}$" % j for j in range(len(data))]
ax.get_yaxis().set_tick_params(direction="out")
ax.yaxis.set_ticks_position("left")
ax.set_yticks(np.arange(1, len(labels) + 1))
ax.set_yticklabels(labels)
ax.set_ylim(0.25, len(labels) + 0.75)
if violin_axis_limit:
ax.set_xlim(*violin_axis_limit)
# ax.set_ylabel('Sample name')
ylim = ax.get_ylim()
ax.plot((0, 0), (ylim[0], ylim[1]), color="r", lw=0.5, ls="--")
ax.set_axisbelow(True)
ax.grid(True, lw=0.5)
if time_point_annotation is True:
ax.set_title(f"Probability distribution per arm, time-point {time_point}")
else:
ax.set_title("Probability distribution per arm")
parts = ax.violinplot(
data, showmeans=False, showmedians=False, showextrema=False, vert=vertical
)
for pc in parts["bodies"]:
pc.set_facecolor("#add8e6")
pc.set_edgecolor("black")
pc.set_alpha(1)
quartile1, medians, quartile3 = np.percentile(data, [25, 50, 75], axis=1)
whiskers = np.array(
[
adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)
]
)
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians) + 1)
if vertical:
ax.scatter(inds, medians, marker="o", color="white", s=30, zorder=3)
ax.vlines(inds, quartile1, quartile3, color="k", linestyle="-", lw=5)
ax.vlines(inds, whiskers_min, whiskers_max, color="k", linestyle="-", lw=1)
else:
ax.scatter(medians, inds, marker="o", color="white", s=30, zorder=3)
ax.hlines(inds, quartile1, quartile3, color="k", linestyle="-", lw=5)
ax.hlines(inds, whiskers_min, whiskers_max, color="k", linestyle="-", lw=1)
set_axis_style(vertical)
if arms_annotations is not None:
if vertical:
y_lims = ax.get_ylim()
y_level_annotations = y_lims[1] - 0.1 * (
y_lims[1] - y_lims[0]
) # 10% below line
for aa_index, aa in enumerate(arms_annotations):
ax.annotate(
"{}".format(aa),
xy=(aa_index + 1, y_level_annotations),
xytext=(0, 0),
textcoords="offset points",
ha="center",
va="center",
)
else:
pass
return ax
def evolutionary_grid_ax(
ax,
data,
show_data_at_tp=50,
offset_before=12,
offset_after=5,
last_tp_off_grid=False,
aspect="equal",
):
if last_tp_off_grid:
delta = -1
else:
delta = 0
cmap = matplotlib.cm.inferno
cmap.set_bad(color="#DDDDDD")
data = np.clip(data, 0, np.inf).T # we want to visualise it horizontally
num_arms = data.shape[0]
window_data = np.nan * np.ones([num_arms, offset_before + offset_after])
window_data[:, :offset_before] = data[
:, show_data_at_tp - offset_before : show_data_at_tp
]
im = ax.imshow(
window_data,
interpolation="nearest",
cmap=cmap,
aspect=aspect,
vmin=0,
vmax=np.max(np.nan_to_num(data)),
origin="lower",
)
ax.set_xticks(np.arange(0, offset_before, 1))
ax.set_xticklabels(
np.arange(show_data_at_tp - offset_before + 1, show_data_at_tp + 1, 1)
)
ax.set_yticks(np.arange(0, num_arms, 1))
ax.set_yticklabels([r"$K_{%d}$" % j for j in range(num_arms)])
ax.set_xticks(np.arange(-0.5, offset_before + delta, 1), minor=True)
ax.set_yticks(np.arange(-0.5, num_arms, 1), minor=True)
ax.grid(which="minor", color="w", linestyle="-", linewidth=2)
ax.set_title("Rewards matrix")
return ax, im
def violin_plot(
game,
show=False,
save_path=None,
num_samples_per_violin=1000,
violin_axis_limit=None,
time_point_annotation=False,
arms_annotations=None,
vertical=True,
figsize=(9, 4),
):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
violin_plot_ax(
ax,
game.sample_all_arms(num_samples=num_samples_per_violin, time_point=game.tp),
game.tp,
violin_axis_limit,
time_point_annotation,
arms_annotations,
vertical,
)
fig.subplots_adjust(bottom=0.15, wspace=0.05)
if save_path is not None:
plt.savefig(save_path)
if show:
plt.show()
def slideshow_violin_distributions(
game,
output_folder,
num_samples_per_violin=1000,
violin_axis_limit=None,
time_point_annotation=False,
arms_annotations=None,
vertical=True,
):
if os.path.exists(output_folder):
shutil.rmtree(output_folder, ignore_errors=True)
os.mkdir(output_folder)
frames_list = []
for t in tqdm(range(game.T)):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
violin_plot_ax(
ax,
game.sample_all_arms(num_samples=num_samples_per_violin, time_point=t),
t,
violin_axis_limit,
time_point_annotation,
arms_annotations,
vertical,
)
plt.ioff()
pfi_frame = os.path.abspath(
os.path.join(output_folder, "step_{}.jpg".format(t))
)
fig.subplots_adjust(bottom=0.15, wspace=0.05)
plt.savefig(pfi_frame)
frames_list.append("file '" + pfi_frame + "'")
plt.close()
pfi_frames_list = os.path.abspath(os.path.join(output_folder, "frames_list.txt"))
with open(pfi_frames_list, "w+") as outfile:
outfile.write("\n".join(frames_list))
pfi_output_gif = os.path.abspath(os.path.join(output_folder, "sequence.gif"))
os.system(f"ffmpeg -r 3 -f concat -safe 0 -i {pfi_frames_list} -y {pfi_output_gif}")
print(f"gif created and stored in {pfi_output_gif}")
def get_evolving_grid(
game,
show_data_at_tp=54,
offset_before=12,
offset_after=5,
last_tp_off_grid=True,
save_path=None,
show=False,
):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
evolutionary_grid_ax(
ax,
game.q,
show_data_at_tp=show_data_at_tp,
offset_before=offset_before,
offset_after=offset_after,
last_tp_off_grid=last_tp_off_grid,
)
fig.subplots_adjust(bottom=0.15, wspace=0.05)
if save_path is not None:
plt.savefig(save_path)
if show:
plt.show()
def get_grid_and_violins_dynamic(game, output_folder, violin_axis_limit=(-20, 20)):
if os.path.exists(output_folder):
shutil.rmtree(output_folder, ignore_errors=True)
os.mkdir(output_folder)
frames_list = []
offset_before = 12
offset_after = 5
total_offset = offset_before + offset_after
for t in tqdm(range(game.T)):
fig = plt.figure(figsize=(12, 5.7))
epsilon_x = 0
epsilon_y = 0.02
ax0 = fig.add_axes([0.05 + epsilon_x, 0.08 + epsilon_y, 0.6, 0.8])
ax1 = fig.add_axes([0.7 + epsilon_x, 0.05 + epsilon_y, 0.25, 0.85])
ax2 = fig.add_axes([0.61 + epsilon_x, 0.652 + epsilon_y, 0.01, 0.2])
ax0, im = evolutionary_grid_ax(
ax0,
game.q,
show_data_at_tp=t,
offset_before=np.min([offset_before, t]),
offset_after=np.max([offset_after, total_offset - t]),
last_tp_off_grid=True,
aspect="auto",
)
ax1 = violin_plot_ax(
ax1,
game.sample_all_arms(time_point=t),
time_point=t,
violin_axis_limit=violin_axis_limit,
vertical=False,
)
fig.colorbar(im, cax=ax2)
plt.ioff()
pfi_frame = os.path.abspath(
os.path.join(output_folder, "step_{}.jpg".format(t))
)
plt.savefig(pfi_frame)
frames_list.append("file '" + pfi_frame + "'")
plt.close()
pfi_frames_list = os.path.abspath(os.path.join(output_folder, "frames_list.txt"))
with open(pfi_frames_list, "w+") as outfile:
outfile.write("\n".join(frames_list))
pfi_output_gif = os.path.abspath(os.path.join(output_folder, "sequence.gif"))
os.system(f"ffmpeg -r 3 -f concat -safe 0 -i {pfi_frames_list} -y {pfi_output_gif}")
print(f"gif created and stored in {pfi_output_gif}")
|
{"hexsha": "4bf36397706d75bdbb4663d981933ebf1fb068e0", "size": 9764, "ext": "py", "lang": "Python", "max_stars_repo_path": "mab/visualize.py", "max_stars_repo_name": "SebastianoF/multi-armed-bandits-testbed", "max_stars_repo_head_hexsha": "36e35369676e1d73a3106745c24f81f7a777db22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-04T17:49:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-04T17:49:11.000Z", "max_issues_repo_path": "mab/visualize.py", "max_issues_repo_name": "SebastianoF/multi-armed-bandits-testbed", "max_issues_repo_head_hexsha": "36e35369676e1d73a3106745c24f81f7a777db22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-12T00:25:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:25:09.000Z", "max_forks_repo_path": "mab/visualize.py", "max_forks_repo_name": "SebastianoF/multi-armed-bandits-testbed", "max_forks_repo_head_hexsha": "36e35369676e1d73a3106745c24f81f7a777db22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.498489426, "max_line_length": 88, "alphanum_fraction": 0.6003687014, "include": true, "reason": "import numpy", "num_tokens": 2597}
|
import random
import gym
import numpy as np
mutation_chance = 0.01
env = gym.make("CartPole-v1")
goal_steps = 200
def create_individual():
# Create individual
# [OBS, MOVES]
training_data = []
#Score
score = 0
# moves specifically from this environment:
game_memory = []
# previous observation that we saw
prev_observation = env.reset()
# for each frame in 200
for _ in range(goal_steps):
# choose random action (0 or 1)
action = random.randrange(0, 2)
# do it!
observation, reward, done, info = env.step(action)
# notice that the observation is returned FROM the action
# so we'll store the previous observation here, pairing
# the prev observation to the action we'll take.
if len(prev_observation) > 0:
game_memory.append([prev_observation, action])
prev_observation = observation
score += reward
if done: break
for data in game_memory:
# convert to one-hot (this is the output layer for our neural network)
if data[1] == 1:
output = [0, 1]
elif data[1] == 0:
output = [1, 0]
# saving our training data
training_data.append([data[0], output])
individual=[training_data,score]
return individual
def create_population(count):
return [ create_individual() for _ in range(count) ]
def evaluateIndividual(individual):
#Evaluate individual by his score
return individual[1]
def evaluatePopulation(population):
summ=0
for individual in population:
summ += evaluateIndividual(individual)
return summ/len(population)
def mutatePopulation(population):
for individual in population:
if mutation_chance > random.random():
#mutate individual
individual[0] = random.shuffle(individual[0])
print('Individual mutated')
return population
def evolve(population, target, retain=0.2, random_select=0.05, mutate=0.01):
graded = sorted(population,reverse=True,key= lambda x:x[1])
retain_length = int(len(graded) * retain)
parents = graded[:retain_length]
# randomly add other individuals to promote genetic diversity
for individual in graded[retain_length:]:
if random_select > random():
parents.append(individual)
# mutate some individuals
population = mutatePopulation(population)
# crossover parents to create children
parents_length = len(parents)
desired_length = len(population) - parents_length
children = []
while len(children) < desired_length:
male = random.randint(0, parents_length - 1)
female = random.randint(0, parents_length - 1)
if male != female:
male = parents[male]
female = parents[female]
half = len(male) / 2
child = male[:half] + female[half:]
children.append(child)
parents.extend(children)
return parents
|
{"hexsha": "d04d74fa96ec5583465184d7815110110dbbee0d", "size": 2997, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/CartPole-v0/geneticJaime.py", "max_stars_repo_name": "AlwaysLearningDeeper/OpenAI_Challenges", "max_stars_repo_head_hexsha": "576732fd3f1fd24afc4bdfb4920c1da8caae12ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-05-19T09:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2017-08-02T12:00:51.000Z", "max_issues_repo_path": "src/CartPole-v0/geneticJaime.py", "max_issues_repo_name": "AlwaysLearningDeeper/Project", "max_issues_repo_head_hexsha": "576732fd3f1fd24afc4bdfb4920c1da8caae12ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-01-28T21:45:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-09T08:28:30.000Z", "max_forks_repo_path": "src/CartPole-v0/geneticJaime.py", "max_forks_repo_name": "AlwaysLearningDeeper/Project", "max_forks_repo_head_hexsha": "576732fd3f1fd24afc4bdfb4920c1da8caae12ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-08-06T17:51:49.000Z", "max_forks_repo_forks_event_max_datetime": "2017-08-06T17:51:49.000Z", "avg_line_length": 28.8173076923, "max_line_length": 78, "alphanum_fraction": 0.6446446446, "include": true, "reason": "import numpy", "num_tokens": 680}
|
include("algorithms/matching.jl")
@enum State begin
used = 1
unused = 2
removed = 3
vital = 4
end
"""
AllDifferent(x::Array{<:AbstractIntVar}, trailer)
AllDifferent constraint, enforcing ∀ i ≠ j ∈ ⟦1, length(x)⟧, x[i] ≠ x[j].
The implementation of this contraint is inspired by:
https://www.researchgate.net/publication/200034395_A_Filtering_Algorithm_for_Constraints_of_Difference_in_CSPs
Many of the functions below relate to algorithms depicted in the paper, and their
documentation refer to parts of the overall algorithm.
"""
struct AllDifferent <: Constraint
x::Array{<:AbstractIntVar}
active::StateObject{Bool}
initialized::StateObject{Bool}
matching::Vector{StateObject{Tuple{Int, Int, Bool}}}
remainingEdges::RSparseBitSet{UInt64}
edgeToIndex::Dict{Edge{Int}, Int}
indexToEdge::Vector{Edge{Int}}
nodesMin::Int
numberOfVars::Int
numberOfVals::Int
numberOfEdges::Int
function AllDifferent(x::Array{<:AbstractIntVar}, trailer)::AllDifferent
max = Base.maximum(var -> maximum(var.domain), x)
min = Base.minimum(var -> minimum(var.domain), x)
range = max - min + 1
active = StateObject{Bool}(true, trailer)
initialized = StateObject{Bool}(false, trailer)
numberOfVars = length(x)
numberOfEdges = sum(var -> length(var.domain), x)
matching = Vector{StateObject{Tuple{Int, Int, Bool}}}(undef, numberOfVars)
for i = 1:numberOfVars
matching[i] = StateObject{Tuple{Int, Int, Bool}}((0, 0, false), trailer)
end
remainingEdges = RSparseBitSet{UInt64}(numberOfEdges, trailer)
edgeToIndex = Dict{Edge{Int}, Int}()
indexToEdge = Vector{Edge{Int}}(undef, numberOfEdges)
constraint = new(x,
active,
initialized,
matching,
remainingEdges,
edgeToIndex,
indexToEdge,
min,
numberOfVars,
range,
numberOfEdges
)
counter = 1
for (idx, var) in enumerate(x)
addOnDomainChange!(var, constraint)
for val in var.domain
dst = numberOfVars + val - min + 1
constraint.edgeToIndex[Edge(idx, dst)] = counter
constraint.indexToEdge[counter] = Edge(idx, dst)
counter += 1
end
end
return constraint
end
end
"""
valToNode(constraint, value)::Int
Return the node index of a value.
"""
function valToNode(con::AllDifferent, val::Int)
return con.numberOfVars + val - con.nodesMin + 1
end
"""
nodeToVal(constraint, node)::Int
Return the underlying value of a node.
"""
function nodeToVal(con::AllDifferent, node::Int)
return node - con.numberOfVars + con.nodesMin - 1
end
"""
orderEdge(edge)::Edge
Return the ordered version of an edge, i.e. with e.src ≤ e.dst.
"""
function orderEdge(e::Edge{Int})::Edge{Int}
src, dst = e.src < e.dst ? (e.src, e.dst) : (e.dst, e.src)
return Edge(src, dst)
end
function updateremaining!(constraint::AllDifferent, removed::BitVector)
clearMask!(constraint.remainingEdges)
addToMask!(constraint.remainingEdges, bitVectorToUInt64Vector(removed))
reverseMask!(constraint.remainingEdges)
intersectWithMask!(constraint.remainingEdges)
end
function updatevital!(constraint::AllDifferent, vital::BitVector)
for match in constraint.matching
e = Edge(match.value[1], match.value[2])
idx = constraint.edgeToIndex[e]
setValue!(match, (e.src, e.dst, vital[idx]))
end
end
function getvital(constraint)
vital = BitVector(undef, constraint.numberOfEdges) .= false
for match in constraint.matching
if match.value[3]
vital[constraint.edgeToIndex[Edge(match.value[1], match.value[2])]] = true
end
end
return vital
end
"""
initializeGraphs!(constraint)
Return the graph and the empty directed graph of a variable-value problem.
"""
function initializeGraphs!(con::AllDifferent)::Pair{Graph{Int}, DiGraph{Int}}
numberOfNodes = con.numberOfVars + con.numberOfVals
edgeFilter = BitVector(con.remainingEdges)[1:con.numberOfEdges]
allEdges = con.indexToEdge[edgeFilter]
graph = Graph(allEdges)
digraph = DiGraph(nv(graph))
if nv(graph) < numberOfNodes
add_vertices!(graph, numberOfNodes - nv(graph))
end
return Pair(graph, digraph)
end
"""
getAllEdges(digraph, parents)::Set{Edge}
Return all the edges visited by a BFS on `digraph` encoded in `parents`.
"""
function getAllEdges(digraph::DiGraph{Int}, parents::Vector{Int})::Set{Edge{Int}}
edgeSet = Set{Edge{Int}}()
for i = 1:nv(digraph)
if parents[i] > 0 && parents[i] != i
validneighbors = filter(v -> parents[v] > 0, inneighbors(digraph, i))
validedges = map(v -> orderEdge(Edge(v, i)), validneighbors)
union!(edgeSet, validedges)
end
end
return edgeSet
end
remapedge(edge::Edge{Int}, component::Vector{Int}) = Edge(component[edge.src], component[edge.dst])
"""
removeEdges!(constraint, prunedValue, graph, digraph)
Remove all the unnecessary edges in graph and digraph as in the original paper.
Update `constraint.edgesState` with the new status of each edge, remove some
edges from `graph` and `digraph` and push the removed values in `prunedValue`.
Following exactly the procedure in the function with the same name in the original
paper.
"""
function removeEdges!(constraint::AllDifferent, prunedValues::Vector{Vector{Int}}, graph::Graph{Int}, digraph::DiGraph{Int})
unused = BitVector(constraint.remainingEdges)[1:constraint.numberOfEdges]
vital = BitVector(undef, constraint.numberOfEdges) .= false
removed = BitVector(undef, constraint.numberOfEdges) .= .~ unused
used = BitVector(undef, constraint.numberOfEdges) .= false
allValues = constraint.numberOfVars+1:nv(digraph)
freeValues = filter(v -> indegree(digraph,v) == 0, allValues)
seen = fill(false, constraint.numberOfVals)
components = filter(comp -> length(comp)>1, strongly_connected_components(digraph))
for component in components
edgeSet = orderEdge.(remapedge.(edges(digraph[component]), [component]))
edgeIndices = getindex.([constraint.edgeToIndex], edgeSet)
used[edgeIndices] .= true
unused[edgeIndices] .= false
end
for node in freeValues
if seen[node - constraint.numberOfVars]
continue
end
parents = bfs_parents(digraph, node; dir=:out)
edgeSet = getAllEdges(digraph, parents)
edgeIndices = getindex.([constraint.edgeToIndex], edgeSet)
used[edgeIndices] .= true
unused[edgeIndices] .= false
reached = filter(v -> parents[v] > 0, allValues)
seen[reached .- constraint.numberOfVars] .= true
end
edgeIndices = map(constraint.matching) do pair
var, val = pair.value
e = Edge(var, val)
return constraint.edgeToIndex[e]
end
vital[edgeIndices] .= true .& unused[edgeIndices]
unused[edgeIndices] .= false
rest = constraint.indexToEdge[unused]
reversedRest = map(e -> Edge(e.dst, e.src), rest)
rem_edge!.([graph], rest)
rem_edge!.([digraph], reversedRest)
removed[unused] .= true
foreach(rest) do e
var, val = e.src, e.dst
push!(prunedValues[var], nodeToVal(constraint, val))
end
updateremaining!(constraint, removed)
updatevital!(constraint, vital)
end
"""
updateEdgesState!(constraint)::Set{Edge}
Return all the pruned values not already encoded in the constraint state.
"""
function updateEdgesState!(constraint::AllDifferent, prunedDomains::CPModification)
modif = Set{Edge}()
for (idx, var) in enumerate(constraint.x)
if haskey(prunedDomains, var.id)
union!(modif, Edge.([idx], valToNode.([constraint], prunedDomains[var.id])))
end
end
return modif
end
"""
propagate!(constraint::AllDifferent, toPropagate::Set{Constraint}, prunedDomains::CPModification)
`AllDifferent` propagation function. Implement the full procedure of the paper.
"""
function propagate!(constraint::AllDifferent, toPropagate::Set{Constraint}, prunedDomains::CPModification)
if !constraint.active.value
return true
end
# Variables Initialization
graph, digraph = initializeGraphs!(constraint)
# Run only once, when constraint is first propagated
if !constraint.initialized.value
matching = maximumMatching!(graph, digraph, constraint.numberOfVars)
if matching.size < constraint.numberOfVars
return false
end
for (idx, match) in enumerate(matching.matches)
setValue!(constraint.matching[idx], (match..., false))
end
setValue!(constraint.initialized, true)
# Otherwise just read the stored values
else
matching = Matching{Int}(length(constraint.matching), map(match -> (match.value[1] => match.value[2]), constraint.matching))
buildDigraph!(digraph, graph, matching)
end
# TODO change this with the CPModification
modifications = updateEdgesState!(constraint, prunedDomains)
prunedValues = Vector{Vector{Int}}(undef, constraint.numberOfVars)
for i = 1:constraint.numberOfVars
prunedValues[i] = Int[]
end
removed = .~ BitVector(constraint.remainingEdges)[1:constraint.numberOfEdges]
vital = getvital(constraint)
needrematching = false
for e in modifications
rev_e = Edge(e.dst, e.src)
idx = constraint.edgeToIndex[e]
if e in edges(graph)
if vital[idx]
return false
elseif e in edges(digraph)
needrematching = true
rem_edge!(digraph, e)
else
rem_edge!(digraph, rev_e)
end
rem_edge!(graph, e)
# TODO get rid of edgesState
removed[idx] = true
end
end
updateremaining!(constraint, removed)
if needrematching
matching = maximizeMatching!(digraph, constraint.numberOfVars)
if matching.size < constraint.numberOfVars
return false
end
for (idx, match) in enumerate(matching.matches)
setValue!(constraint.matching[idx], (match..., false))
end
end
removeEdges!(constraint, prunedValues, graph, digraph)
for (prunedVar, var) in zip(prunedValues, constraint.x)
if !isempty(prunedVar)
for val in prunedVar
remove!(var.domain, val)
end
triggerDomainChange!(toPropagate, var)
addToPrunedDomains!(prunedDomains, var, prunedVar)
end
end
if constraint in toPropagate
pop!(toPropagate, constraint)
end
if all(var -> length(var.domain) <= 1, constraint.x)
setValue!(constraint.active, false)
end
for var in constraint.x
if isempty(var.domain)
return false
end
end
return true
end
variablesArray(constraint::AllDifferent) = constraint.x
function Base.show(io::IO, ::MIME"text/plain", con::AllDifferent)
print(io, string(typeof(con)), ": ", join([var.id for var in con.x], " != "), ", active = ", con.active)
for var in con.x
print(io, "\n ", var)
end
end
function Base.show(io::IO, con::AllDifferent)
print(io, string(typeof(con)), ": ", join([var.id for var in con.x], " != "))
end
|
{"hexsha": "4bba5ca5860c1a6563d72e551ded4b5319e57b17", "size": 11514, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/CP/constraints/alldifferent.jl", "max_stars_repo_name": "corail-research/SeaPearl.jl", "max_stars_repo_head_hexsha": "648b10873e1586dc4f416a31689df855e5395ac7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2021-04-20T16:29:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:17:03.000Z", "max_issues_repo_path": "src/CP/constraints/alldifferent.jl", "max_issues_repo_name": "corail-research/SeaPearl.jl", "max_issues_repo_head_hexsha": "648b10873e1586dc4f416a31689df855e5395ac7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2021-04-23T17:20:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:42:24.000Z", "max_forks_repo_path": "src/CP/constraints/alldifferent.jl", "max_forks_repo_name": "corail-research/SeaPearl.jl", "max_forks_repo_head_hexsha": "648b10873e1586dc4f416a31689df855e5395ac7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-05-10T23:32:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T02:44:34.000Z", "avg_line_length": 32.8971428571, "max_line_length": 132, "alphanum_fraction": 0.6538996005, "num_tokens": 2814}
|
import numpy as np
class IonMobilityPeak:
def __init__(self, mz, intensity, ion_mobility):
self.mz_array = [mz, ]
self.mass_array = [[mz, ], ]
self.intensity_array = [[intensity, ]]
self.ion_mobility_array = [[ion_mobility, ]]
self.intensity_max = [intensity, ]
self.ion_mobility_opt = [ion_mobility, ]
self.ion_mobility_max = [ion_mobility, ]
self.ion_mobility_min = [ion_mobility, ]
self.total = 1
def get_nearest_values(self, value):
return np.argsort(np.abs(self.mz_array) - value)
def extend(self, mz, intensity, ion_mobility):
self.mz_array.append(mz)
self.mass_array.append([mz, ])
self.intensity_array.append([intensity, ])
self.intensity_max.append(intensity)
self.ion_mobility_opt.append(ion_mobility)
self.ion_mobility_array.append([ion_mobility, ])
self.ion_mobility_max.append(ion_mobility)
self.ion_mobility_min.append(ion_mobility)
self.total += 1
def append_and_recalc(self, mz, intensity, ion_mobility, index):
self.mass_array[index].append(mz)
self.intensity_array[index].append(intensity)
self.ion_mobility_array[index].append(ion_mobility)
self.recalc(index)
def recalc(self, index):
self.mz_array[index] = np.mean(self.mass_array[index])
self.ion_mobility_max[index] = max(self.ion_mobility_array[index])
self.ion_mobility_min[index] = min(self.ion_mobility_array[index])
if self.intensity_array[index][-1] > self.intensity_array[index][-2]:
self.intensity_max[index] = self.intensity_array[index][-1]
self.ion_mobility_opt[index] = self.ion_mobility_array[index][-1]
def push_me_to_the_peak(self, mz, intensity, ion_mobility, diff):
# nearest_ids = self.get_nearest_values(mz)
flag = 0
nearest_id = self.total - 1
mass_accuracy = diff * 1e-6 * mz
while nearest_id >= 0:
tmp_diff = abs(self.mz_array[nearest_id] - mz)
# tmp_diff = abs(self.mz_array[nearest_id] - mz) / mz
# if tmp_diff <= diff * 1e-6:
if tmp_diff <= mass_accuracy:
if abs(
self.ion_mobility_max[nearest_id] -
ion_mobility) <= 0.1 or abs(
self.ion_mobility_min[nearest_id] -
ion_mobility) <= 0.1:
flag = 1
self.append_and_recalc(
mz, intensity, ion_mobility, nearest_id)
break
else:
break
nearest_id -= 1
if not flag:
self.extend(mz, intensity, ion_mobility)
|
{"hexsha": "0463fe6de77c22a82fddac409706267a5e2d2997", "size": 2764, "ext": "py", "lang": "Python", "max_stars_repo_path": "biosaur_src/Classes/IonMobilityPeak.py", "max_stars_repo_name": "abdrakhimov1/DIA_Biosaur", "max_stars_repo_head_hexsha": "90d4929f7e664dd8b47295ecbb8b8d4c323f88ab", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "biosaur_src/Classes/IonMobilityPeak.py", "max_issues_repo_name": "abdrakhimov1/DIA_Biosaur", "max_issues_repo_head_hexsha": "90d4929f7e664dd8b47295ecbb8b8d4c323f88ab", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "biosaur_src/Classes/IonMobilityPeak.py", "max_forks_repo_name": "abdrakhimov1/DIA_Biosaur", "max_forks_repo_head_hexsha": "90d4929f7e664dd8b47295ecbb8b8d4c323f88ab", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9295774648, "max_line_length": 77, "alphanum_fraction": 0.6002170767, "include": true, "reason": "import numpy", "num_tokens": 683}
|
#include <boost/math/special_functions/ellint_rj.hpp>
|
{"hexsha": "55227ee43b163f35a0dd7ce1f4d386c928a80923", "size": 54, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_math_special_functions_ellint_rj.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_math_special_functions_ellint_rj.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_math_special_functions_ellint_rj.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 27.0, "max_line_length": 53, "alphanum_fraction": 0.8333333333, "num_tokens": 14}
|
#!/usr/local/bin/python
"""
Node objective functions
Algorithms implemented:
1. naive_greedy_parallel - In each iteration pick 1 node greedily.
2. naive_greedy_heuristic - Pick all k nodes at once greedily.
3. smart_greedy_parallel - In each iteration pick 1 node smart greedily.
"""
from __future__ import division
from markov_chain import MarkovChain
import copy
import random
import numpy as np
import networkx as nx
from multiprocessing import Pool
from multiprocessing import cpu_count
from itertools import combinations
import argparse
np.seterr(all="raise")
cores = cpu_count()
# ------------------------------------------------------------
# Naive Greedy algorithm
# ------------------------------------------------------------
def calculate_F(S):
F = np.float128(0.0)
V_minus_S = set(mc.G.nodes()) - set(S)
predecessors = []
for i in V_minus_S:
predecessors += mc.G.predecessors(i)
predecessors = set(predecessors)
for u in predecessors:
F_u = np.float128(0.0)
successors = mc.G[u]
# Calculate rho
rho = np.float128(0.0)
for v in successors:
if v in S:
rho += successors[v]['weight']
# Calculate F_u
x_dash = mc.G.node[u]['num_items'] * ( 1 - rho)
for v in successors:
if v not in S:
P_dash = mc.G.edge[u][v]['weight'] / (1 - rho)
F_u += P_dash * (1 - P_dash)
F_u = x_dash * F_u
if np.abs(F_u) < 1e-5:
F += 0
else:
F += F_u
return (S, F)
def naive_greedy_parallel(k):
pool = Pool(cores)
picked_set = []
for i in xrange(k):
candidate_nodes = set(mc.G.nodes()) - set(picked_set)
candidate_sets = [picked_set + [v] for v in candidate_nodes]
objective_values = pool.imap(calculate_F, candidate_sets)
objective_values = sorted(objective_values, key=lambda x: x[1])
picked_set = objective_values[0][0]
pool.close()
pool.join()
return calculate_F(picked_set)
def naive_greedy_heuristic(k):
pool = Pool(cores)
picked_set = []
candidate_nodes = [[x] for x in set(mc.G.nodes()) - set(picked_set)]
objective_values = pool.imap(calculate_F, candidate_nodes)
objective_values = sorted(objective_values, key=lambda x: x[1])
picked_set = [x[0][0] for x in objective_values[:k]]
pool.close()
pool.join()
return calculate_F(picked_set)
# ------------------------------------------------------------
# Smart Greedy algorithm
# ------------------------------------------------------------
def calculate_smart_F(args):
v = args[0]
rho_dict = args[1]
B_dict = args[2]
picked_set = list(args[3]) + list([v])
F = np.float128(0.0)
rho_dash_dict = {}
B_dash_dict = {}
predecessors = mc.G.predecessors(v)
for u in mc.G.nodes():
x = mc.G.node[u]['num_items']
if u in predecessors:
P = mc.G.edge[u][v]['weight']
else:
P = 0
successors = mc.G.successors(u)
if set(successors) - set(picked_set) == set():
rho_dash_dict[u] = 1
B_dash_dict[u] = 0
continue
rho_dash_dict[u] = rho_dict[u] + P
B_dash_dict[u] = B_dict[u] - (2 * P * (1 - rho_dict[u] - P))
if rho_dash_dict[u] < 1:
F_u = x * B_dash_dict[u] / (1 - rho_dash_dict[u])
if np.abs(F_u) < 1e-5:
F += 0
else:
F += F_u
else:
F += 0
return (v, F, rho_dash_dict, B_dash_dict)
def smart_greedy_parallel(k):
pool = Pool(cores)
picked_set = []
rho_dict = {}
B_dict = {}
for u in mc.G.nodes():
rho_dict[u] = 0.0
B_dict[u] = 0.0
successors = mc.G[u]
for v in successors:
P = mc.G.edge[u][v]['weight']
B_dict[u] += P * (1 - P)
while len(picked_set) < k:
candidate_nodes = set(mc.G.nodes()) - set(picked_set)
args = [(candidate_node, rho_dict, B_dict, picked_set) for candidate_node in candidate_nodes]
objective_values = pool.imap(calculate_smart_F, args)
objective_values = sorted(objective_values, key=lambda x: x[1])
picked_node = objective_values[0][0]
rho_dict = objective_values[0][2]
B_dict = objective_values[0][3]
picked_set.append(picked_node)
pool.close()
pool.join()
return calculate_F(picked_set)
# Brute force
def brute_force_nodes(k):
pool = Pool(cores)
candidate_sets = combinations(mc.G.nodes(), k)
objective_values = pool.imap(calculate_F, candidate_sets)
objective_values = sorted(objective_values, key=lambda x: x[1])
pool.close()
pool.join()
return objective_values[0]
# Other baselines
# Top k nodes with highest betweenness centrality
def highest_betweenness_centrality_nodes(k, betweenness_centrality):
sorted_nodes = sorted(betweenness_centrality, key=betweenness_centrality.get)
sorted_nodes = [x for x in reversed(sorted_nodes)]
nodes_set = sorted_nodes[:k]
return calculate_F(nodes_set)
# Top k nodes with highest incoming probabibility
def highest_in_probability_nodes(k):
incoming_probability = mc.G.in_degree(weight='weight')
sorted_nodes = sorted(incoming_probability, key=incoming_probability.get)
sorted_nodes = [x for x in reversed(sorted_nodes)]
nodes_set = sorted_nodes[:k]
return calculate_F(nodes_set)
# Top k nodes with incoming edges from highest number of other nodes
def highest_in_degree_centrality_nodes(k, in_deg_centrality):
sorted_nodes = sorted(in_deg_centrality, key=in_deg_centrality.get)
sorted_nodes = [x for x in reversed(sorted_nodes)]
nodes_set = sorted_nodes[:k]
return calculate_F(nodes_set)
# Top k nodes with highest closeness centrality
def highest_closeness_centrality_nodes(k, closeness_centrality):
sorted_nodes = sorted(closeness_centrality, key=closeness_centrality.get)
sorted_nodes = [x for x in reversed(sorted_nodes)]
nodes_set = sorted_nodes[:k]
return calculate_F(nodes_set)
# Top k nodes with higest items
def highest_item_nodes(k):
item_nodes = nx.get_node_attributes(mc.G, 'num_items')
sorted_nodes = sorted(item_nodes, key=item_nodes.get)
sorted_nodes = [x for x in reversed(sorted_nodes)]
nodes_set = sorted_nodes[:k]
return calculate_F(nodes_set)
# Top k pagerank nodes
def highest_pagerank_nodes(k, pagerank):
sorted_nodes = sorted(pagerank, key=pagerank.get)
sorted_nodes = [x for x in reversed(sorted_nodes)]
nodes_set = sorted_nodes[:k]
return calculate_F(nodes_set)
# Random k nodes
def random_nodes(k):
random_nodes = np.random.choice(mc.num_nodes, k, replace=False)
nodes_set = [x for x in random_nodes]
return calculate_F(nodes_set)
# ------------------------------------------------------------
# Evolution with k
# ------------------------------------------------------------
# Get evolution of objective with increasing k
def get_evolution(method, k):
dataframe = []
if method == smart_greedy_parallel or method == naive_greedy_heuristic:
nodes_set = method(k)[0]
for i in xrange(k):
row = {}
row['objective'] = "nodes"
row['k'] = i
row['objective_value'] = calculate_F(nodes_set[:i])[1]
row['method_name'] = method.func_name
row['item_distribution'] = mc.item_distribution
dataframe.append(row)
elif method == highest_closeness_centrality_nodes:
closeness_centrality = nx.closeness_centrality(mc.G)
for i in xrange(k):
row = {}
row['objective'] = "nodes"
row['k'] = i
row['objective_value'] = method(i, closeness_centrality)[1]
row['method_name'] = method.func_name
row['item_distribution'] = mc.item_distribution
dataframe.append(row)
elif method == highest_in_degree_centrality_nodes:
in_deg_centrality = nx.in_degree_centrality(mc.G)
for i in xrange(k):
row = {}
row['objective'] = "nodes"
row['k'] = i
row['objective_value'] = method(i, in_deg_centrality)[1]
row['method_name'] = method.func_name
row['item_distribution'] = mc.item_distribution
dataframe.append(row)
elif method == highest_pagerank_nodes:
pagerank = nx.pagerank(mc.G, tol=1e-02)
for i in xrange(k):
row = {}
row['objective'] = "nodes"
row['k'] = i
row['objective_value'] = method(i, pagerank)[1]
row['method_name'] = method.func_name
row['item_distribution'] = mc.item_distribution
dataframe.append(row)
elif method == highest_betweenness_centrality_nodes:
if mc.num_nodes > 1000:
pivots = 1000
else:
pivots = mc.num_nodes
betweenness_centrality = nx.betweenness_centrality(mc.G, k=pivots)
for i in xrange(k):
row = {}
row['objective'] = "nodes"
row['k'] = i
row['objective_value'] = method(i, betweenness_centrality)[1]
row['method_name'] = method.func_name
row['item_distribution'] = mc.item_distribution
dataframe.append(row)
else:
for i in xrange(k):
row = {}
row['objective'] = "nodes"
result = method(i)
row['k'] = i
row['objective_value'] = result[1]
row['method_name'] = method.func_name
row['item_distribution'] = mc.item_distribution
dataframe.append(row)
return dataframe
|
{"hexsha": "f6d3971a20685b1cfb04756cef2d802290a31b13", "size": 9782, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/MarkovChain/node_objectives.py", "max_stars_repo_name": "chdhr-harshal/MCMonitor", "max_stars_repo_head_hexsha": "330fc1a8f8cf83620fd6b0e503707c91e97af16d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-04T20:35:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-05T09:06:43.000Z", "max_issues_repo_path": "src/python/MarkovChain/node_objectives.py", "max_issues_repo_name": "chdhr-harshal/MCMonitor", "max_issues_repo_head_hexsha": "330fc1a8f8cf83620fd6b0e503707c91e97af16d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/MarkovChain/node_objectives.py", "max_forks_repo_name": "chdhr-harshal/MCMonitor", "max_forks_repo_head_hexsha": "330fc1a8f8cf83620fd6b0e503707c91e97af16d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-05T09:10:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-05T09:10:41.000Z", "avg_line_length": 31.6569579288, "max_line_length": 101, "alphanum_fraction": 0.5986505827, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2371}
|
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import numpy.testing as npt
from sklearn.datasets import load_iris
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_allclose
from Pipeline.DPA import DensityPeakAdvanced
@pytest.fixture
def data_Fig1():
# Read dataset used for Figure 1 in the paper.
data_F1 = pd.read_csv("./benchmarks/Fig1.dat", sep=" ", header=None)
return data_F1
@pytest.fixture
def output_Fig1_g():
# Read benchmark output of the DPA algorithm: right after the g calculation
out_F1 = pd.read_csv("./benchmarks/output_Fig1_g.csv", header=None)
out_F1.columns = ["i", "g"]
return out_F1
@pytest.fixture
def output_Fig1_borders():
# Read benchmark output of the DPA algorithm: right after border calculation, after merging
out_F1 = pd.read_csv("./benchmarks/output_Fig1_borders.csv", header=None)
out_F1.columns = ["i", "j", "rho_b", "err_rho_b"]
return out_F1
@pytest.fixture
def output_Fig1_labels():
# Read benchmark final output of the DPA algorithm
out_F1 = pd.read_csv("./benchmarks/output_Fig1_labels.csv", header=None)
out_F1.columns = ["clu"]
return out_F1
@pytest.fixture
def output_Fig1_labelsHalos():
# Read benchmark final output of the DPA algorithm
out_F1 = pd.read_csv("./benchmarks/output_Fig1_labelsHalos.csv", header=None)
out_F1.columns = ["clu"]
return out_F1
def is_almost_equal(x,y,mismatch, decimal):
d = 0
for i in range(len(x)):
if abs(x[i]-y[i]) > 1.5 * 10**(-decimal):
d += 1
print(d/len(x)*100)
if d/len(x)*100>mismatch:
npt.assert_almost_equal(x, y, decimal=decimal)
else:
assert True
def test_PointAdaptive_kNN(data_Fig1, output_Fig1_labels, output_Fig1_labelsHalos, output_Fig1_borders):
est = DensityPeakAdvanced(Z=1.5, n_jobs=-1)
assert est.dim == None
assert est.k_max == 1000
assert est.D_thr == 23.92812698
assert est.metric == "euclidean"
assert est.dim_algo == "twoNN"
est.fit(data_Fig1)
assert hasattr(est, 'is_fitted_')
assert est.k_max_ == max(est.k_hat_)
print(len(data_Fig1), len(est.densities_))
assert len(data_Fig1) == len(est.densities_)
assert_array_equal(est.labels_, [c-1 for c in output_Fig1_labels["clu"]])
is_almost_equal(est.halos_, [c-1 for c in output_Fig1_labelsHalos["clu"]], 0.0, 0)
#assert_array_equal(est.halos_, output_Fig1_labelsHalos["clu"])
assert_array_equal([est.topography_[i][0]+1 for i in range(len(est.topography_))], output_Fig1_borders["i"])
assert_array_equal([est.topography_[i][1]+1 for i in range(len(est.topography_))], output_Fig1_borders["j"])
npt.assert_almost_equal([est.topography_[i][2] for i in range(len(est.topography_))], output_Fig1_borders["rho_b"], decimal=3)
npt.assert_almost_equal([est.topography_[i][3] for i in range(len(est.topography_))], output_Fig1_borders["err_rho_b"], decimal=3)
|
{"hexsha": "809e190333facb93571670dceacde1763c084a68", "size": 3036, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pipeline/tests/test_DPA.py", "max_stars_repo_name": "giovannidoni/DPA", "max_stars_repo_head_hexsha": "ccfca1d60cd068d748dd0103417d9769dfa25a99", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pipeline/tests/test_DPA.py", "max_issues_repo_name": "giovannidoni/DPA", "max_issues_repo_head_hexsha": "ccfca1d60cd068d748dd0103417d9769dfa25a99", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pipeline/tests/test_DPA.py", "max_forks_repo_name": "giovannidoni/DPA", "max_forks_repo_head_hexsha": "ccfca1d60cd068d748dd0103417d9769dfa25a99", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7176470588, "max_line_length": 134, "alphanum_fraction": 0.7124505929, "include": true, "reason": "import numpy", "num_tokens": 867}
|
"""
WMAP plotting with HEALPix
--------------------------
This example uses the :func:`astromL.datasets.fetch_wmap_temperatures`
functionality to download and plot the raw WMAP 7-year data. The
visualization requires the `healpy <https://github.com/healpy/healpy>`_
package to be installed.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
# warning: due to a bug in healpy, importing it before pylab can cause
# a segmentation fault in some circumstances.
import healpy as hp
from astroML.datasets import fetch_wmap_temperatures
#------------------------------------------------------------
# Fetch the wmap data
wmap_unmasked = fetch_wmap_temperatures(masked=False)
#------------------------------------------------------------
# plot the unmasked map
fig = plt.figure(1)
hp.mollview(wmap_unmasked, min=-1, max=1, title='Raw WMAP data',
fig=1, cmap=plt.cm.jet, unit=r'$\Delta$T (mK)')
plt.show()
|
{"hexsha": "dc73cd22d85e56a7bfb4de19b1ab82ca88ca9b7a", "size": 1075, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/datasets/plot_wmap_raw.py", "max_stars_repo_name": "aragilar/astroML", "max_stars_repo_head_hexsha": "d3f6279eb632957662338761cb559a1dcd541fb0", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 735, "max_stars_repo_stars_event_min_datetime": "2015-01-07T23:55:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:20:25.000Z", "max_issues_repo_path": "examples/datasets/plot_wmap_raw.py", "max_issues_repo_name": "aragilar/astroML", "max_issues_repo_head_hexsha": "d3f6279eb632957662338761cb559a1dcd541fb0", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 168, "max_issues_repo_issues_event_min_datetime": "2015-01-06T21:02:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T03:15:29.000Z", "max_forks_repo_path": "examples/datasets/plot_wmap_raw.py", "max_forks_repo_name": "aragilar/astroML", "max_forks_repo_head_hexsha": "d3f6279eb632957662338761cb559a1dcd541fb0", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 278, "max_forks_repo_forks_event_min_datetime": "2015-01-26T00:29:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T21:17:50.000Z", "avg_line_length": 34.6774193548, "max_line_length": 72, "alphanum_fraction": 0.6530232558, "include": true, "reason": "import numpy", "num_tokens": 262}
|
import numpy as np
class TablePreprocessor(object):
def __init__(self, ranges, N=20):
self.ranges = ranges
self.state_dimension = ranges.shape[0]
self.N = N
self.num_states = self.N**self.state_dimension
def preprocess(self, s):
binned = (1.0*s-self.ranges[:,0])/(self.ranges[:,1]-self.ranges[:,0])
binned = binned*self.N
binned = np.ceil(binned).astype(int)-(binned>0).astype(int)
for i in range(self.state_dimension):
if binned[i] > self.N-1:
binned[i] = self.N-1
if binned[i] < 0:
binned[i] = 0
n = 0
for i in range(self.state_dimension):
n += binned[i]*( self.N**i )
return n
def deprocess(self, n):
s = np.zeros((self.state_dimension))
t = n
for i in range(self.state_dimension-1,-1,-1):
r = int(np.floor( (1.0*t)/(self.N**i) ))
s[i] = (1.0*r)/self.N*(self.ranges[i,1]-self.ranges[i,0])+self.ranges[i,0]
t = t-r*(self.N**i)
return s
|
{"hexsha": "2e38d361c3847ccb9418056a24269fab5bf1f03b", "size": 1110, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocessor.py", "max_stars_repo_name": "gmaher/distributed_rl", "max_stars_repo_head_hexsha": "194238c523c65c149e64ac576ba92ef95764c20f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocessor.py", "max_issues_repo_name": "gmaher/distributed_rl", "max_issues_repo_head_hexsha": "194238c523c65c149e64ac576ba92ef95764c20f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/preprocessor.py", "max_forks_repo_name": "gmaher/distributed_rl", "max_forks_repo_head_hexsha": "194238c523c65c149e64ac576ba92ef95764c20f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0, "max_line_length": 86, "alphanum_fraction": 0.5117117117, "include": true, "reason": "import numpy", "num_tokens": 315}
|
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage[letterpaper, total={7.5in, 9in}]{geometry}
\usepackage{amsmath}
\usepackage{amsfonts}
\title{Applied Partial Differential Equations}
\author{Grant Smith}
\date{Spring 2022}
\begin{document}
\maketitle
\section{Classifying PDEs}
\begin{itemize}
\item 2) Classify the following operators as linear or nonlinear:
\begin{enumerate}
\item $\mathcal{L} u = u_x + xu_y$ -- linear
\item $\mathcal{L} u = u_x + uu_y$ -- nonlinear
\item $\mathcal{L} u = u_x + \left(u_y\right)^2$ -- nonlinear
\item $\mathcal{L} u = u_x + u_y + 1$ -- nonlinear
\item $\mathcal{L} u = \sqrt{1+x^2}\left(cos(y)\right)u_x+u_{yxy}-\left[arctan(x/y)\right]u $ -- linear
\end{enumerate}
\item 3) For each of the following equations, state the order and whether it is nonlinear, linear inhomogeneous, or linear homogeneous. Provide reasons.
\begin{tabular}{|| c| c| c| c|| }
\hline
Equation & Order & Classification & Reason \\
\hline \hline
$u_t - u_{xx} + 1 = 0$ & 2 & linear inhomogeneous & inspection \\ \hline
$u_t - u_{xx} + xu = 0$ & 2 & linear homogeneous & inspection \\ \hline
$u_t - u_{xxt} + uu_x = 0$ & 3 & linear homogeneous & inspection \\ \hline
$u_{tt} - u_{xx} + x^2 = 0$ & 2 & linear inhomogeneous & inspection \\ \hline
$iu_t - u_{xx} + u/x = 0$ & 2 & linear inhomogeneous & inspection \\ \hline
$u_x(1+u_x^2)^{-1/2} + u_y(1+u_y^2)^{-1/2} = 0$ & 1 & nonlinear & The derivative terms are squared \\ \hline
$u_x - e^yu_{y} = 0$ & 1 & linear homogeneous & inspection \\ \hline
$u_t + u_{xxxx} + \sqrt{1+u} = 0$ & 4 & nonlinear & u is under the square root \\
\hline
\end{tabular}
\item 4) Show that the difference of two solutions of an inhomogenous linear equation $\mathcal{L} u = g$ with the same $g$ is a solution of the homogenous equation $\mathcal{L} u =0$
\begin{enumerate}
\item let solution 1 be $u_1$, and let solution 2 be $u_2$
\item the question asks about the difference of the two solutions. Thus, $\mathcal{L} \left( u_1 - u_2 \right)$
\item because $\mathcal{L}$ is linear, $\mathcal{L} \left( u_1 - u_2 \right)$ = $\mathcal{L} u_1 - \mathcal{L} u_2 = g - g = 0$
\item thus, $u_1 - u_2$ is a solution to $\mathcal{L} u =0$
\end{enumerate}
\end{itemize}
\newpage
\section{First Order Linear PDEs}
\subsection{Deriving the Method of Characteristics}
The problem we will solve is of the form
$$ a(x,y) \frac{\partial u}{\partial x}(x,y) + b(x,y) \frac{\partial u}{\partial y}(x,y) = c(x,y)$$
It's solution is:
$$\frac{\partial x}{\partial s}(s,t) = a^*(s,t)$$
$$\frac{\partial y}{\partial s}(s,t) = b^*(s,t)$$
$$ \frac{\partial u^*}{\partial s}(s,t) = c^*(s,t)$$
Which we will derive now. First, let's define some coordinate transforms. These will help us get x and y from s and t
$$x(s,t); y(s,t)$$
And we will define our transforms to be bijective (or nonzero Jacobians), so we can go the other way and get s and t from x and y
$$s(x,y); t(x,y)$$
This is enforced by:
$$ x(s(x,y),t(x,y)) = x $$
$$ y(s(x,y),t(x,y)) = y$$
$$s(x(s,t),y(s,t)) = s $$
$$ t(x(s,t),y(s,t)) = t$$
We also have a transformed u that we get by using these transform equations:
$$u^*(s,t):= u(x(s,t),y(s,t))$$
And for posterity and clarity's sake, we show that we can get u back from the transformed u:
$$u(x,y) = u^{**}(x,y) = u^*(x(s(x,y),t(x,y)),y(s(x,y),t(x,y)))$$
We can also transform the whole first equation:
$$a^*(s,t) \frac{\partial u}{\partial x}^*(s,t) + b^*(s,t) \frac{\partial u}{\partial y}^*(s,t) = c^*(s,t)$$
Here, we rewrite $u^*$ for convenience
$$u^*(s,t):= u(x(s,t),y(s,t))$$
We could differentiate $u^*$ with respect to either of its arguments. Let's just choose the first one for now, $s$.
$$\frac{\partial u^*}{\partial s}(s,t) = \frac{\partial u}{\partial x}(x(s,t),y(s,t)) \frac{\partial x}{\partial s}(s,t) + \frac{\partial u}{\partial y}(x(s,t),y(s,t)) \frac{\partial y}{\partial s}(s,t)$$
And we can do the coordinate transform on the partials of $u$ to get:
$$\frac{\partial u^*}{\partial s}(s,t) = \frac{\partial u}{\partial x}^*(s,t) \frac{\partial x}{\partial s}(s,t) + \frac{\partial u}{\partial y}^*(s,t) \frac{\partial y}{\partial s}(s,t)$$
Which we can pattern match against the transformed version of the original equation (rewritten here for convenience)
$$a^*(s,t) \frac{\partial u}{\partial x}^*(s,t) + b^*(s,t) \frac{\partial u}{\partial y}^*(s,t) = c^*(s,t)$$
or
$$c^*(s,t)= \frac{\partial u}{\partial x}^*(s,t)a^*(s,t) + \frac{\partial u}{\partial y}^*(s,t)b^*(s,t)$$
and get:
$$\frac{\partial x}{\partial s}(s,t) = a^*(s,t)$$
$$\frac{\partial y}{\partial s}(s,t) = b^*(s,t)$$
$$ \frac{\partial u^*}{\partial s}(s,t) = c^*(s,t)$$
\newpage
\subsection{Problems}
\begin{itemize}
\item 1) Solve: $2u_t + 3u_x = 0$
\begin{enumerate}
\item The form of this equation hints at a directional derivative. So if we change the coordinates to one which runs along this direction, then we have a nice form of differential equation.
\item Change the coordinates:
$$u\left(x(c,k),t(c,k)\right) = u^*(c,k)$$
\item Now, pick one of the variables, $c$ or $k$, and partially differentiate the above equation with respect to that variable. For example, we'll pick $c$.
$$\frac{\partial u}{\partial x} \frac{\partial x}{\partial c} + \frac{\partial u}{\partial t} \frac{\partial t}{\partial c} = \frac{\partial u^*}{\partial c}$$
\item By pattern matching against the initial equation, we have the following three equations:
$$\frac{\partial x}{\partial c} = 3 ; \frac{\partial t}{\partial c} = 2; \frac{\partial u^*}{\partial c} = 0$$
\item By solving all three, we get the following:
$$x = 3c + f_1(k) ; t = 2c + f_2(k); u^* = f(k)$$
\item Given that the relationships between $x, t, c, k$ are just coordinate transforms, we can choose $f_1$ and $f_2$ to be suitable. We choose $f_1 = k$ and $f_2 = 0$, which gives us the following:
$$x = 3c + k ; t = 2c; u^* = f(k)$$
This works because we know that as $c$ and $k$ vary throughout the whole plane, the corresponding $x$ and $y$ will also trace the whole plane. Also, these are convenient choices because the initial condition has $t=0$, so having $t$ be a very simple function (i.e. not dependent upon $k$) will be convenient.
\item We can rearrange to get:
$$c = \frac{t}{2} ; k = x - \frac{3t}{2}$$
\item Looking back at step 2, we can now write:
$$u\left(x(c,k),t(c,k)\right) = u^*(c,k) = f(k)$$
And if we transform the coordinates back using our equation for $k$ in step 7, we have:
$$u(x,t) = f (x-\frac{3t}{2})$$
\item Now we use the initial condition that $$u(x,0) = sin(x) = f(x)$$
\item Now we know that $$u(x,t) = sin(x-\frac{3t}{2})$$
\item And this also works if we plug it into the original equation.
\end{enumerate}
\newpage
\item 2) Solve $3u_y + u_{xy} = 0$
\begin{enumerate}
\item Assuming differentiability, we can change the order of integration, so $3u_y + u_{yx} = 0$
\item Let $v = u_y$. This gives $3v + v_x = 0$
\item Rearranging gives:
$$\frac{\partial v}{\partial x} = -3v$$
\item separating and integrating:
$$\frac{-1}{3}\frac{\partial v}{v} =\partial x$$
$$\frac{-1}{3}\ln\left| v\right| = x + f^{***}(y)$$
$$\ln\left| v\right| = -3x + f^{**}(y)$$
$$\left| v\right| = e^{-3x}f^*(y)$$
And assuming that $f^*$ can be positive or negative, we can drop the absolute value:
$$v = e^{-3x}f^*(y)$$
\item Remembering that $v$ is not necessarily $u$,
$$\frac{\partial u}{\partial y} = u_y = v = e^{-3x}f^*(y)$$
$$\partial u = e^{-3x}f^*(y) \partial y$$
$$u = e^{-3x}f(y) + g(x)$$
\item Which satisfies the original PDE, which can be verified by substitution.
\item I suppose I haven't proven that this is the most general form of the equation, but I don't really know how to prove that. I know that this is linear, so any linear combination of this function will also work, but all linear combinations are already captured in $f$ and $g$.
\end{enumerate}
\newpage
\item 7) Solve the equation
$$yu_x + xu_y = 0; u(0,y) = e^{-y^2}$$
And find the region of the x-y plane in which the solution is uniquely determined.
\begin{enumerate}
\item Note that this is the directional derivative of u. Thus, we change coordinates using the transform $T$:
$$T(u) = u^*(t,k) = u(x(t,k),y(t,k))$$
\item Now take the partial derivative of both sides with respect to $t$ or $k$. Either should work. We'll arbitrarily choose $t$.
$$\frac{\partial u}{\partial x}\frac{\partial x}{\partial t} + \frac{\partial u}{\partial y}\frac{\partial y}{\partial t} = \frac{\partial u^*}{\partial t}$$
\item Now pattern match against the original equation to get the following PDEs:
$$\frac{\partial x}{\partial t} = y;\frac{\partial y}{\partial t} = x;\frac{\partial u^*}{\partial t} = 0$$
\item Now we will focus on the other two PDEs. Please forgive this poor mathematics, but I will integrate them and get the following:
$$x = yt + f_1(k); y = xt + f_2(k); u^* = f(k)$$
Which, using the third equation, now extends the equality from step 1 to:
$$T(u) = u^*(t,k) = u(x(t,k),y(t,k)) = f(k)$$
\item Let's pause for a moment to discuss the region of the x-y plane in which the solution is uniquely determined. The equation gets information along the line $x=0$. This means that any characteristic curve that does not pass through this line will not be uniquely determined. Given the shape of the characteristic curves determined in step 4, there is an "X" shape in the x-y plane formed by $y=x$ and $y=-x$ that determines the areas of determination. The areas of the x-y plane that are between these two lines are not determined, but the area above and below both of these lines is determined. Thus, it makes a sort of bow-tie of non-determinedness, and an hourglass of determinedness.
\item Having discussed the determinedness, I also will mention that I'm not sure how to choose $f_1$ and $f_2$ such that they sweep the entire x-y plane. I seem to only be able to get half of it without making a non-injective function. Given this fact along with the fact that we only will be able to define a solution in the hourglass region anyway, I will choose $f_1$ and $f_2$ to sweep that region, and I will ignore the bow-tie region. Thus, we can choose:
$$ f_1(k) = 0; f_2(k) = k$$
So
$$x = yt; y = xt + k$$
And
$$t = \frac{x}{y}; k = y - \frac{x^2}{y}$$
\item Rearranging and rewriting the transforms gives:
$$T(u) = u^*(t,k) = u(x(t,k),y(t,k)) = u(yt, xt + k) = f(k)$$
$$T(u^*) = u(x,y) = u^*(t(x,y),k(x,y)) = u^*(\frac{x}{y},y - \frac{x^2}{y}) = f(y - \frac{x^2}{y})$$
\item And using the initial condition gives:
$$u(0,y) = e^{-y^2} = f(y)$$
So
$$u(x,y) = e^{-\left(y - \frac{x^2}{y}\right)^2}$$
Which satisfies the initial condition, but it does not satisfy the original equation, so I'm doing something wrong. But after graphing it, it appears that this solution works in the hourglass region, and the only area in which it looks off is in the bow-tie, so possibly it does work.
\end{enumerate}
\newpage
\item 8)
Solve the following:
$$au_x + bu_y + cu = 0$$
\begin{enumerate}
\item First, move $cu$ to the other side:
$$au_x + bu_y = - cu$$
\item Now note that this is the directional derivative of u. Thus, we change coordinates using the transform $T$:
$$T(u) = u^*(t,k) = u(x(t,k),y(t,k))$$
\item Now take the partial derivative of both sides with respect to $t$ or $k$. Either should work. We'll arbitrarily choose $t$.
$$\frac{\partial u}{\partial x}\frac{\partial x}{\partial t} + \frac{\partial u}{\partial y}\frac{\partial y}{\partial t} = \frac{\partial u^*}{\partial t}$$
\item Now pattern match against the original equation to get the following PDEs:
$$\frac{\partial x}{\partial t} = a;\frac{\partial y}{\partial t} = b;\frac{\partial u^*}{\partial t} = -cu$$
\item Which, when integrated, give:
$$x = at + f_1(k);y = bt + f_2(k);\frac{\partial u^*}{\partial t} = -cu$$
In which we have not yet simplified equation 3
\item We can simplify the $f_1$ and $f_2$ because we don't need that much generality. Simply letting $f_1 = 0$ and $f_2 = k$ will suffice. Thus:
$$x = at ;y = bt + k;\frac{\partial u^*}{\partial t} = -cu$$
\item and we can solve for $t$ and $k$ in terms of $y$ and $x$ as a way to invert transform $T$:
$$t = \frac{x}{a}; k = y - \frac{bx}{a}$$
Which gives us:
$$T(u) = u^*(t,k) = u(at,bt + k)$$
$$T^{-1}(u^*) = u(x,y) = u^*(\frac{x}{a},y - \frac{bx}{a})$$
\item Now we need to solve the PDE we left above:
$$\frac{\partial u^*}{\partial t} = -cu$$
\end{enumerate}
\newpage
\item 13) Use the coordinate method to solve the following equation:
$$u_x + 2u_y + (2x-y)u = 2x^2 + 3xy -2y^2$$
\begin{enumerate}
\item Using the methods from the previous problem, we can get:
$$x = t ;y = 2t + k;\frac{\partial u^*}{\partial t} = 2x^2 + 3xy - 2y^2 - (2x-y)u$$
$$t = x; k = y - 2x$$
$$T(u) = u^*(t,k) = u(t,2t + k)$$
$$T^{-1}(u^*) = u(x,y) = u^*(x,y - 2x)$$
\item and I am stuck again
\end{enumerate}
\newpage
\item 1.5-5: Solve the following PDE and discuss behavior at boundary conditions given below.
$$\forall x,y; u_x(x,y) + yu_y(x,y) = 0$$
Transform coordinates:
$$\forall s,t; u_x^*(s,t) + y(s,t)u_y^*(s,t) = 0$$
which gives the following equations:
$$\frac{\partial x}{\partial s} = 1 \rightarrow x(s,t) = s + f_1(t)$$
$$\frac{\partial y}{\partial s} = y(s,t) \rightarrow \frac{1}{y(s,t)}\partial y = \partial s \rightarrow y(s,t) = f_2(t)e^s$$
$$\frac{\partial u^*}{\partial s} = 0 \rightarrow u^*(s,t) = f(t)$$
And we know that $x(s,t)$ can trace out the whole line without $f_1(t)$, so we will drop $f_1(t)$. Also, we know that $y(s,t)$ traces out the whole plane if $f_2(t) = t$, so these three equations simplify to:
$$x(s,t) = s; y(s,t) = te^s; u^*(s,t)=f(t)$$
We can also invert the coordinate transforms and get:
$$s(x,y) = x; t(x,y) = \frac{y}{e^x}$$
And so now we can invert the transform to get $u(x,y)$ from $u^*(s,t)$:
$$u(x,y) = f(\frac{y}{e^x})$$
Now we consider the boundary condition $u(x,0) = x$
$$u(x,0) = f(\frac{0}{e^x}) = f(0) \neq x$$
We have found a contradiction because $f(0)$ is constant, and $x$ is not.
Now we consider the boundary condition $u(x,0) = 1$
$$u(x,0) = f(\frac{0}{e^x}) = f(0) = 1$$
Thus, we have found a requirement that $f(0) = 1$, but there are no other requirements on $f$, thus, there are still infinitely many possibilities for the function. The moral of this problem is that we're specifying our boundary along our characteristics instead of against them.
\newpage
\item 1.5-6: Solve the following PDE
$$\forall x,y; u_x(x,y) + 2xy^2u_y(x,y) = 0$$
First, divide the whole equation by $x$. This highlights a problem at $x = 0$
$$\forall x,y; \frac{1}{x}u_x(x,y) + 2y^2u_y(x,y) = 0$$
Now we transform coordinates:
$$\forall s,t; \frac{1}{x(s,t)}u_x^*(s,t) + 2y(s,t)^2u_y^*(s,t) = 0$$
And we have three smaller PDEs and their solutions:
$$\frac{\partial x}{\partial s} = \frac{1}{x(s,t)} \rightarrow x(s,t)^2 = 2s + f_1(t) $$
$$\frac{\partial y}{\partial s} = 2y(s,t)^2 \rightarrow y(s,t) = \frac{-1}{2s + f_2(t)}$$
$$\frac{\partial u^*}{\partial s} = 0 \rightarrow u^*(s,t) = F(t)$$
The next issue is how to choose $f_1$ and $f_2$. If we isolate the $s$ in the $x$ equation, we can substitute it in for the $y$ equation to get:
$$y(s,t) = \frac{-1}{x(s,t)^2 + f(t)}$$
Which if we simply let $f$ be the negative identity function the characteristics still trace out the whole plane, and we get:
$$y(s,t) = \frac{-1}{x(s,t)^2 - t} \rightarrow t = \frac{1}{y}+x^2$$
Which means if we invert the transform on $u^*$, we get:
$$u(x,y) = F(\frac{1}{y}+x^2)$$
Which works if we substitute it back into the original equation.
\end{itemize}
\newpage
\section{The Wave Equation}
For the following wave equation:
$$u_{tt} = c^2u_{xx}$$
with the initial conditions:
$$u(x,0) = \phi(x)$$
$$u_t(x,0) = \psi(x)$$
is
$$u(x,t) = \frac{1}{2}\left(\phi(x + ct) + \phi(x-ct)\right) + \frac{1}{2c} \int_{x-ct}^{x+ct} \psi(s) \,ds $$
\subsection{Example Problems}
\begin{itemize}
\item 2.1-1:
Solve $$u_{tt} = c^2u_{xx} ; u(x,0) = e^x ; u_t(x,0) = \sin{x}$$
$$u(x,t) = \frac{1}{2}\left(e^{x + ct} + e^{x - ct}\right) + \frac{1}{2c}\int_{x-ct}^{x+ct} \sin{s} \,ds$$
$$u(x,t) = \frac{1}{2}\left(e^{x + ct} + e^{x - ct}\right) + \frac{1}{2c} [\cos{(x-ct)}-\cos{(x+ct)}] $$
\item 3.1-2:
Solve $$u_{tt} = c^2u_{xx} ; u(x,0) = \log{1 + x^2} ; u_t(x,0) = 4 + x$$
$$u(x,t) = \frac{1}{2}\left[\log{\left(1 + (x + ct)^2\right)}+\log{\left(1 + (x - ct)^2\right)}\right] + \frac{1}{2c}\int_{x-ct}^{x+ct} (4+s) \,ds$$
$$u(x,t) = \frac{1}{2}\left[\log{\left(1 + (x + ct)^2\right)}+\log{\left(1 + (x - ct)^2\right)}\right] + 4t + xt$$
\end{itemize}
\newpage
\section{Fourier Series and Boundary Conditions}
I think one of the most important things you can know that will help you when doing fourier series is that fourier series converge to the average of the limit on the left and right. That might seem innocuous, but it will help. Here's how. We sort of learned two different types or uses of fourier series:
\begin{itemize}
\item We learned that that the boundary values determine whether you want to use sines or cosines in your expansion.
\item And we also separately learned that when we're computing fourier series to approximate a function, we often extend the function and find its period etc etc to find the fourier coefficients.
\end{itemize}
But what I noticed with the help of the above fact is how to unify these two views of the fourier series. What we can really do is use the boundary conditions to choose how to extend the function, and then that's the function we want to approximate. This is helpful because it unifies the two seemingly separate issues of using a fourier series to approximate a function vs choosing a certain type of fourier series to meet the requirements of a boundary condition. Just extend your function to meet your boundary condition, then the correct choice of sines and cosines will fall out.
\end{document}
|
{"hexsha": "9a4fcd06b8660cdb25abe81101cd12b70d2831ad", "size": 18943, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "main.tex", "max_stars_repo_name": "GSmithApps/PDEs", "max_stars_repo_head_hexsha": "0bdae742c082cbf7ac383523aef4379984ec801f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.tex", "max_issues_repo_name": "GSmithApps/PDEs", "max_issues_repo_head_hexsha": "0bdae742c082cbf7ac383523aef4379984ec801f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.tex", "max_forks_repo_name": "GSmithApps/PDEs", "max_forks_repo_head_hexsha": "0bdae742c082cbf7ac383523aef4379984ec801f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 57.403030303, "max_line_length": 700, "alphanum_fraction": 0.6131024653, "num_tokens": 6355}
|
import cv2
import numpy as np
from chainer import cuda
import chainer
try:
import cupy
except:
pass
import os
def copy_to_cpu(imgs):
if type(imgs) == chainer.variable.Variable :
imgs = imgs.data
try:
if type(imgs) == cupy.core.core.ndarray:
imgs = cuda.to_cpu(imgs)
except:
pass
return imgs
def postprocessing_tanh(imgs):
imgs = (imgs + 1) * 127.5
imgs = np.clip(imgs, 0, 255)
imgs = imgs.astype(np.uint8)
return imgs
def save_single_image(img, path, post_processing=postprocessing_tanh):
img = copy_to_cpu(img)
if post_processing is not None:
img = post_processing(img)
#ch, w, h = img.shape
img = img.transpose((1, 2, 0))
cv2.imwrite(path, img)
def save_images_grid(imgs, path, grid_w=4, grid_h=4, post_processing=postprocessing_tanh, transposed=False):
imgs = copy_to_cpu(imgs)
if post_processing is not None:
imgs = post_processing(imgs)
b, ch, w, h = imgs.shape
assert b == grid_w*grid_h
imgs = imgs.reshape((grid_w, grid_h, ch, w, h))
imgs = imgs.transpose(0, 1, 3, 4, 2)
if transposed:
imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(1, 2, 0, 3, 4).reshape((grid_h*w, grid_w*h, ch))
else:
imgs = imgs.reshape((grid_w, grid_h, w, h, ch)).transpose(0, 2, 1, 3, 4).reshape((grid_w*w, grid_h*h, ch))
if ch==1:
imgs = imgs.reshape((grid_w*w, grid_h*h))
cv2.imwrite(path, imgs)
|
{"hexsha": "9da2eb81932f8818b7a2f0e0cb1f4c5f24dfd5b2", "size": 1470, "ext": "py", "lang": "Python", "max_stars_repo_path": "common/utils/save_images.py", "max_stars_repo_name": "Aixile/chainer-gan-experiments", "max_stars_repo_head_hexsha": "4371e8369d2805e8ace6d7aacc397aa6e62680a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2017-06-24T10:55:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T22:52:37.000Z", "max_issues_repo_path": "common/utils/save_images.py", "max_issues_repo_name": "Aixile/chainer-gan-experiments", "max_issues_repo_head_hexsha": "4371e8369d2805e8ace6d7aacc397aa6e62680a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-08-21T06:19:31.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-21T07:54:28.000Z", "max_forks_repo_path": "common/utils/save_images.py", "max_forks_repo_name": "Aixile/chainer-gan-experiments", "max_forks_repo_head_hexsha": "4371e8369d2805e8ace6d7aacc397aa6e62680a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2017-08-22T07:00:16.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-18T16:15:21.000Z", "avg_line_length": 28.8235294118, "max_line_length": 114, "alphanum_fraction": 0.6326530612, "include": true, "reason": "import numpy,import cupy", "num_tokens": 458}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import sys
import os
import logging
import logging.config
import yaml
import numpy as np
from scipy import sparse
import inspect
import yaml
from .hparams import HParams
import re
logger = logging.getLogger(__name__)
def safe_mkdirs(path):
''' Safe makedirs
Directory is created with command `makedir -p`.
Returns:
`path` if the directory already exists or is created
Exception:
OSError if something is wrong
'''
try:
os.makedirs(path)
except OSError, e:
if e.errno != 17: # 17 = file exists
raise
return path
def get_from_module(module, name, params=None, regex=False):
""" Get a class or method from a module given its name
"""
members = inspect_module(module, regex=regex)
if name is None or name.lower() == 'none':
return None
members = {k.lower().strip(): v for k, v in members.items()}
try:
member = members[name.lower().strip()]
# is a class and must be instantiate if params is not none
if (member and params is not None) and inspect.isclass(member):
return member(**HParams().parse(params).values())
return member
except KeyError, e:
raise KeyError("%s not found in %s.\n Valid values are: %s" %
(name, module, ', '.join(members.keys())))
def inspect_module(module, to_dict=True, regex=False):
modules = {}
if regex:
pattern = re.compile(module)
for key, value in sys.modules.items():
if pattern.match(key):
modules[key] = value
else:
modules = {module: sys.modules[module]}
members = []
for key, value in modules.items():
members.extend(inspect.getmembers(value, lambda member:
hasattr(member, '__module__') and
member.__module__ == key))
if to_dict:
return dict(members)
return members
def ld2dl(ld):
'''Transform a list of dictionaries in a dictionaries with lists
# Note
All dictionaries have the same keys
'''
return dict(zip(ld[0], zip(*[d.values() for d in ld])))
def check_ext(fname, ext):
# Adding dot
ext = ext if ext[0] == '.' else '.' + ext
fname, f_ext = os.path.splitext(fname)
if f_ext == ext:
return True
return False
def parse_nondefault_args(args, default_args):
# removing default arguments
args_default = {k: v for k, v in vars(default_args).items()
if k not in [arg.split('-')[-1] for arg in sys.argv
if arg.startswith('-')]}
args_nondefault = {k: v for k, v in vars(args).items()
if k not in args_default or args_default[k] != v}
args_nondefault = HParams().parse(args_nondefault)
return args_nondefault
def setup_logging(default_path='logging.yaml', default_level=logging.INFO,
env_key='LOG_CFG'):
"""Setup logging configuration
"""
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
|
{"hexsha": "23356223b091ab7c7afb7bd4637653ee2c7fdf12", "size": 3469, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/generic_utils.py", "max_stars_repo_name": "igormq/asr-study", "max_stars_repo_head_hexsha": "302fa3087cc71aec4853360638dbe2f4a59b5726", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 155, "max_stars_repo_stars_event_min_datetime": "2017-03-12T22:56:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T09:03:57.000Z", "max_issues_repo_path": "utils/generic_utils.py", "max_issues_repo_name": "mvalverd/sbrt2017", "max_issues_repo_head_hexsha": "a66c33a55970fb56d91d31a9509cac7ae7ccf4c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-06-08T08:27:06.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-17T05:21:07.000Z", "max_forks_repo_path": "utils/generic_utils.py", "max_forks_repo_name": "mvalverd/sbrt2017", "max_forks_repo_head_hexsha": "a66c33a55970fb56d91d31a9509cac7ae7ccf4c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 72, "max_forks_repo_forks_event_min_datetime": "2017-03-16T12:10:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-16T10:34:50.000Z", "avg_line_length": 26.0827067669, "max_line_length": 75, "alphanum_fraction": 0.6073796483, "include": true, "reason": "import numpy,from scipy", "num_tokens": 788}
|
/*
* The MIT License (MIT)
*
* IziEditor
* Copyright (c) 2015 Martin Newhouse
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "track_loader.hpp"
#include "track.hpp"
#include "terrain_definition.hpp"
#include "tile_definition.hpp"
#include "control_point.hpp"
#include "start_point.hpp"
#include "include_path.hpp"
#include "component_readers.hpp"
#include "core/config.hpp"
#include "core/directive_reader.hpp"
#include <unordered_set>
#include <sstream>
#include <fstream>
#include <iostream>
#include <boost/filesystem.hpp>
namespace components
{
using namespace readers;
using core::Vector2i;
struct TrackLoader::Impl
{
void load_from_file(const std::string& file_name);
void load_from_stream(std::istream& stream, std::string working_directory);
void include(const std::string& file_name, std::size_t num_levels = 0);
void include(std::istream& stream, std::size_t num_levels = 0);
enum class AssetType
{
Contained,
Included
};
void process_tile_group_definition(std::istream& stream, TileId group_id, std::size_t group_size,
AssetType asset_type, bool rotatable);
void process_tile_definition(std::istream& stream, const std::string& pattern_name, const std::string& image_name, AssetType asset_type);
void process_terrain_definition(std::istream& stream, std::string terrain_name, AssetType asset_type);
void process_control_points(std::istream& stream, std::size_t num_points);
void process_start_points(std::istream& stream, std::size_t num_points);
std::string resolve_asset_path(const std::string& file_name);
void add_asset(std::string file_path);
void place_tile(const LevelTile& tile);
std::unordered_set<std::string> included_files_;
std::vector<std::string> assets_;
std::string working_directory_;
Track track_;
LayerHandle current_layer_;
std::istringstream line_stream_;
std::string directive_;
std::string line_;
};
BrokenTrackException::BrokenTrackException(std::string missing_file)
: std::runtime_error("broken track (missing file '" + missing_file + "')"),
missing_file_(std::move(missing_file))
{
}
std::string TrackLoader::Impl::resolve_asset_path(const std::string& file_name)
{
auto include_directory = find_include_directory(file_name, { working_directory_, config::data_directory });
boost::filesystem::path path = include_directory;
path /= file_name;
return path.string();
}
void TrackLoader::Impl::add_asset(std::string file_path)
{
if (std::find(assets_.begin(), assets_.end(), file_path) == assets_.end())
{
assets_.push_back(std::move(file_path));
}
}
void TrackLoader::Impl::load_from_file(const std::string& file_name)
{
track_ = Track();
auto track_name = boost::filesystem::basename(file_name);
if (!track_name.empty())
{
track_name.front() = std::toupper(track_name.front(), std::locale());
}
track_.set_path(file_name);
track_.set_name(track_name);
working_directory_ = boost::filesystem::path(file_name).parent_path().string();
include(file_name);
}
void TrackLoader::Impl::load_from_stream(std::istream& stream, std::string working_directory)
{
working_directory_ = std::move(working_directory);
include(stream);
}
void TrackLoader::Impl::include(const std::string& file_name, std::size_t num_levels)
{
// Test if the file has not been included before
if (included_files_.find(file_name) == included_files_.end())
{
auto include_path = resolve_asset_path(file_name);
std::ifstream stream(include_path, std::istream::in);
if (!stream)
{
throw BrokenTrackException(file_name);
}
add_asset(include_path);
included_files_.insert(std::move(include_path));
include(stream, num_levels);
}
}
void TrackLoader::Impl::include(std::istream& stream, std::size_t num_levels)
{
std::istringstream line_stream;
AssetType asset_type = (num_levels == 0 ? AssetType::Contained : AssetType::Included);
std::string params[2];
for (std::string line, directive; directive != "end" && std::getline(stream, line);)
{
line_stream.clear();
line_stream.str(line);
core::read_directive(line_stream, directive);
if (directive == "a")
{
Tile tile;
if (line_stream >> tile)
{
place_tile(tile);
}
}
else if (directive == "tiledefinition" && line_stream >> params[0] >> params[1])
{
process_tile_definition(stream, params[0], params[1], asset_type);
}
else if (directive == "terrain" && std::getline(line_stream, params[0]))
{
boost::trim(params[0]);
process_terrain_definition(stream, params[0], asset_type);
}
else if (directive == "subterrain")
{
SubTerrain sub_terrain;
if (line_stream >> sub_terrain)
{
if (asset_type == AssetType::Contained)
{
track_.define_contained_sub_terrain(sub_terrain);
}
else
{
track_.define_sub_terrain(sub_terrain);
}
}
}
else if (directive == "tilegroup" || directive == "norottilegroup")
{
std::size_t group_size;
TileId group_id;
if (line_stream >> group_id >> group_size)
{
bool rotatable = (directive == "tilegroup");
process_tile_group_definition(stream, group_id, group_size, asset_type, rotatable);
}
}
else if (directive == "leveltile")
{
LevelTile level_tile;
if (line_stream >> level_tile)
{
place_tile(level_tile);
}
}
else if (directive == "layer")
{
std::size_t level;
int visible;
auto& layer_name = params[0];
if (line_stream >> level >> visible >> std::ws && std::getline(line_stream, layer_name))
{
current_layer_ = track_.create_layer(layer_name, level);
current_layer_->visible = (visible != 0);
}
}
else if (directive == "include")
{
auto& include_path = params[0];
if (std::getline(line_stream, include_path))
{
boost::trim(include_path);
include(include_path, num_levels + 1);
if (num_levels == 0)
{
track_.add_asset(include_path);
}
}
}
else if (directive == "size")
{
Vector2u size;
auto line_pos = line_stream.tellg();
if (line_stream >> params[0] && params[0] == "td")
{
std::size_t num_levels;
if (line_stream >> num_levels >> size.x >> size.y)
{
track_.set_size(size);
track_.set_num_levels(num_levels);
}
}
else
{
line_stream.seekg(line_pos);
if (line_stream >> size.x >> size.y)
{
track_.set_size(size);
track_.set_num_levels(1);
}
}
}
else if (directive == "controlpoints")
{
std::size_t num_points;
if (line_stream >> num_points)
{
process_control_points(stream, num_points);
}
}
else if (directive == "startpoints")
{
std::size_t num_points;
if (line_stream >> num_points)
{
process_start_points(stream, num_points);
}
}
else if (directive == "pattern")
{
auto& pattern_file = params[0];
if (std::getline(line_stream, pattern_file))
{
boost::trim(pattern_file);
auto pattern_path = resolve_asset_path(pattern_file);
add_asset(std::move(pattern_path));
track_.set_pattern(pattern_file);
}
}
else if (directive == "maker")
{
auto& author = params[0];
if (std::getline(line_stream, author))
{
boost::trim(author);
track_.set_author(author);
}
}
else if (directive == "pit")
{
core::IntRect pit;
if (line_stream >> pit.left >> pit.top >> pit.width >> pit.height)
{
track_.define_pit(pit);
}
}
else if (directive == "killterrain")
{
std::int32_t kill_terrain;
if (line_stream >> kill_terrain)
{
auto terrain_id = static_cast<TerrainId>(kill_terrain);
if (asset_type == AssetType::Contained)
{
track_.define_contained_kill_terrain(terrain_id);
}
else
{
track_.define_kill_terrain(terrain_id);
}
}
}
else if (directive == "gravity")
{
std::int32_t gravity_strength;
if (line_stream >> gravity_strength)
{
track_.set_gravity_strength(gravity_strength);
}
}
else if (directive == "gravitydirection")
{
std::int32_t gravity_direction;
if (line_stream >> gravity_direction)
{
track_.set_gravity_direction(gravity_direction);
}
}
else if (directive == "punaballtrack")
{
track_.set_track_type(TrackType::PunaBall);
}
else if (directive == "battletrack")
{
TrackType track_type = TrackType::Battle;
if (line_stream >> params[0])
{
boost::to_lower(params[0]);
if (params[0] == "bumpz")
{
track_type = TrackType::XBumpz;
}
}
track_.set_track_type(TrackType::Battle);
}
else if (directive == "singlelaptrack")
{
track_.set_track_type(TrackType::SingleLap);
}
}
}
void TrackLoader::Impl::place_tile(const LevelTile& tile)
{
if (!current_layer_ || current_layer_->level != tile.level)
{
current_layer_ = track_.create_layer("Level " + std::to_string(tile.level), tile.level);
}
current_layer_->tiles.push_back(tile);
}
void TrackLoader::Impl::process_tile_definition(std::istream& stream, const std::string& pattern_file,
const std::string& image_file, AssetType asset_type)
{
auto pattern_path = resolve_asset_path(pattern_file);
auto image_path = resolve_asset_path(image_file);
if (!pattern_path.empty() && !image_path.empty())
{
TileDefinition tile_def(pattern_path, image_path);
add_asset(std::move(pattern_path));
add_asset(std::move(image_path));
for (directive_.clear(); directive_ != "end" && std::getline(stream, line_); )
{
line_stream_.clear();
line_stream_.str(line_);
core::read_directive(line_stream_, directive_);
if ((directive_ == "tile" || directive_ == "norottile") && line_stream_ >> tile_def)
{
tile_def.rotatable = (directive_ == "tile");
if (asset_type == AssetType::Contained)
{
track_.define_contained_tile(tile_def, pattern_file, image_file);
}
else
{
track_.define_tile(tile_def);
}
}
}
}
}
void TrackLoader::Impl::process_tile_group_definition(std::istream& stream, TileId group_id,
std::size_t group_size, AssetType asset_type, bool rotatable)
{
TileGroupDefinition tile_group(group_id, group_size, rotatable);
for (directive_.clear(); directive_ != "end" && std::getline(stream, line_);)
{
line_stream_.clear();
line_stream_.str(line_);
core::read_directive(line_stream_, directive_);
if (directive_ == "a")
{
Tile tile;
if (line_stream_ >> tile)
{
tile_group.add_sub_tile(tile);
}
}
else if (directive_ == "leveltile")
{
LevelTile tile;
if (line_stream_ >> tile)
{
tile_group.add_sub_tile(tile);
}
}
}
if (asset_type == AssetType::Contained)
{
track_.define_contained_tile_group(tile_group);
}
else
{
track_.define_tile_group(tile_group);
}
}
void TrackLoader::Impl::process_terrain_definition(std::istream& stream, std::string terrain_name, AssetType asset_type)
{
TerrainDefinition terrain_def;
terrain_def.name = std::move(terrain_name);
if (stream >> terrain_def)
{
if (asset_type == AssetType::Contained)
{
track_.define_contained_terrain(terrain_def);
}
else
{
track_.define_terrain(terrain_def);
}
}
}
void TrackLoader::Impl::process_control_points(std::istream& stream, std::size_t num_points)
{
for (directive_.clear(); directive_ != "end" && std::getline(stream, line_);)
{
line_stream_.clear();
line_stream_.str(line_);
core::read_directive(line_stream_, directive_);
if (directive_ == "point")
{
Vector2i point;
std::int32_t length;
std::int32_t direction;
if (line_stream_ >> point.x >> point.y >> length >> direction)
{
ControlPoint control_point;
control_point.start = point;
control_point.length = length;
control_point.direction = direction != 0 ? ControlPoint::Horizontal : ControlPoint::Vertical;
track_.append_control_point(control_point);
}
}
}
}
void TrackLoader::Impl::process_start_points(std::istream& stream, std::size_t num_points)
{
StartPoint start_point;
for (directive_.clear(); directive_ != "end" && std::getline(stream, line_);)
{
line_stream_.clear();
line_stream_.str(line_);
core::read_directive(line_stream_, directive_);
double degrees = 0.0;
if (line_stream_ >> start_point.position.x >> start_point.position.y >>
start_point.rotation >> start_point.level)
{
track_.append_start_point(start_point);
}
}
}
TrackLoader::TrackLoader()
: impl_(std::make_unique<Impl>())
{
}
TrackLoader::~TrackLoader()
{
}
void TrackLoader::load_from_file(const std::string& file_name)
{
impl_->load_from_file(file_name);
}
void TrackLoader::include(const std::string& file_name)
{
impl_->include(file_name, 1);
}
void TrackLoader::load_from_stream(std::istream& stream, std::string working_directory)
{
impl_->load_from_stream(stream, std::move(working_directory));
}
Track TrackLoader::get_result()
{
return std::move(impl_->track_);
}
const std::vector<std::string>& TrackLoader::assets() const
{
return impl_->assets_;
}
}
|
{"hexsha": "c7bab77fc29614bc8ebb83140f91a37fb55e49db", "size": 18574, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/components/track_loader.cpp", "max_stars_repo_name": "mnewhouse/izieditor", "max_stars_repo_head_hexsha": "0a7f300737de9ab5a2a9a02c1a8c786083e71054", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/components/track_loader.cpp", "max_issues_repo_name": "mnewhouse/izieditor", "max_issues_repo_head_hexsha": "0a7f300737de9ab5a2a9a02c1a8c786083e71054", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/components/track_loader.cpp", "max_forks_repo_name": "mnewhouse/izieditor", "max_forks_repo_head_hexsha": "0a7f300737de9ab5a2a9a02c1a8c786083e71054", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1644295302, "max_line_length": 145, "alphanum_fraction": 0.5218585119, "num_tokens": 3623}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest # noqa: F401
import torch
import theseus as th
from theseus.constants import EPS
from theseus.utils import numeric_jacobian
from .common import (
check_adjoint,
check_compose,
check_exp_map,
check_inverse,
check_jacobian_for_local,
check_projection_for_compose,
check_projection_for_exp_map,
check_projection_for_inverse,
check_projection_for_log_map,
check_projection_for_rotate_and_transform,
)
def check_SE3_log_map(tangent_vector, atol=EPS):
g = th.SE3.exp_map(tangent_vector)
assert torch.allclose(th.SE3.exp_map(g.log_map()).data, g.data, atol=atol)
def test_exp_map():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= (
torch.rand(batch_size, 1, generator=rng).double() * 2 * np.pi - np.pi
)
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_exp_map(tangent_vector, th.SE3)
# SE3.exp_map uses approximations for small theta
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= 1e-5
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_exp_map(tangent_vector, th.SE3)
check_projection_for_exp_map(tangent_vector, th.SE3)
# SE3.exp_map uses the exact exponential map for small theta
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= 3e-3
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_exp_map(tangent_vector, th.SE3)
check_projection_for_exp_map(tangent_vector, th.SE3, atol=1e-6)
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= 2 * np.pi - 1e-11
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_exp_map(tangent_vector, th.SE3)
check_projection_for_exp_map(tangent_vector, th.SE3)
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= np.pi - 1e-11
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_exp_map(tangent_vector, th.SE3)
check_projection_for_exp_map(tangent_vector, th.SE3)
def test_log_map():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng) - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= (
torch.rand(batch_size, 1, generator=rng).double() * 2 * np.pi - np.pi
)
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_SE3_log_map(tangent_vector)
check_projection_for_log_map(tangent_vector, th.SE3)
# SE3.log_map uses approximations for small theta
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= 1e-5
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_SE3_log_map(tangent_vector)
check_projection_for_log_map(tangent_vector, th.SE3)
# SE3.log_map uses the exact logarithm map for small theta
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= 3e-3
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_SE3_log_map(tangent_vector)
check_projection_for_log_map(tangent_vector, th.SE3)
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= 2 * np.pi - 1e-11
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_SE3_log_map(tangent_vector)
check_projection_for_log_map(tangent_vector, th.SE3)
for batch_size in [1, 20, 100]:
tangent_vector_ang = torch.rand(batch_size, 3, generator=rng).double() - 0.5
tangent_vector_ang /= tangent_vector_ang.norm(dim=1, keepdim=True)
tangent_vector_ang *= np.pi - 1e-11
tangent_vector_lin = torch.randn(batch_size, 3, generator=rng).double()
tangent_vector = torch.cat([tangent_vector_lin, tangent_vector_ang], dim=1)
check_SE3_log_map(tangent_vector)
check_projection_for_log_map(tangent_vector, th.SE3)
def test_compose():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se3_1 = th.SE3.rand(batch_size, generator=rng, dtype=torch.float64)
se3_2 = th.SE3.rand(batch_size, generator=rng, dtype=torch.float64)
check_compose(se3_1, se3_2)
def test_inverse():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se3 = th.SE3.rand(batch_size, generator=rng, dtype=torch.float64)
check_inverse(se3)
def test_adjoint():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
se3 = th.SE3.rand(batch_size, generator=rng, dtype=torch.float64)
tangent = torch.randn(batch_size, 6).double()
check_adjoint(se3, tangent)
def test_transform_from_and_to():
rng = torch.Generator()
rng.manual_seed(0)
for _ in range(10): # repeat a few times
for batch_size_group in [1, 20]:
for batch_size_pnt in [1, 20]:
if (
batch_size_group != 1
and batch_size_pnt != 1
and batch_size_pnt != batch_size_group
):
continue
se3 = th.SE3.rand(batch_size_group, generator=rng, dtype=torch.float64)
point_tensor = torch.randn(batch_size_pnt, 3).double()
point_tensor_ext = torch.cat(
(point_tensor, torch.ones(batch_size_pnt, 1).double()), dim=1
)
jacobians_to = []
point_to = se3.transform_to(point_tensor, jacobians=jacobians_to)
expected_to = (
se3.inverse().to_matrix() @ point_tensor_ext.unsqueeze(2)
)[:, :3]
jacobians_from = []
point_from = se3.transform_from(point_to, jacobians_from)
# Check the operation result
assert torch.allclose(expected_to.squeeze(2), point_to.data, atol=EPS)
assert torch.allclose(point_tensor, point_from.data, atol=EPS)
# Check the jacobians
expected_jac = numeric_jacobian(
lambda groups: groups[0].transform_to(groups[1]),
[se3, th.Point3(point_tensor)],
function_dim=3,
)
assert torch.allclose(jacobians_to[0], expected_jac[0])
assert torch.allclose(jacobians_to[1], expected_jac[1])
expected_jac = numeric_jacobian(
lambda groups: groups[0].transform_from(groups[1]),
[se3, point_to],
delta_mag=1e-5,
function_dim=3,
)
assert torch.allclose(jacobians_from[0], expected_jac[0])
assert torch.allclose(jacobians_from[1], expected_jac[1])
def test_projection():
rng = torch.Generator()
rng.manual_seed(0)
for _ in range(10): # repeat a few times
for batch_size in [1, 20]:
# Test SE3.transform_to
check_projection_for_rotate_and_transform(
th.SE3, th.Point3, th.SE3.transform_to, batch_size, rng
)
# Test SE3.transform_from
check_projection_for_rotate_and_transform(
th.SE3, th.Point3, th.SE3.transform_from, batch_size, rng
)
# Test SE3.compose
check_projection_for_compose(th.SE3, batch_size, rng)
# Test SE3.inverse
check_projection_for_inverse(th.SE3, batch_size, rng)
def test_local_map():
rng = torch.Generator()
rng.manual_seed(0)
for batch_size in [1, 20, 100]:
group0 = th.SE3.rand(batch_size, dtype=torch.float64)
group1 = th.SE3.rand(batch_size, dtype=torch.float64)
check_jacobian_for_local(group0, group1, Group=th.SE3, is_projected=True)
|
{"hexsha": "76c6b667be710aebd27c7f9bcefe6731ef585cef", "size": 10312, "ext": "py", "lang": "Python", "max_stars_repo_path": "theseus/geometry/tests/test_se3.py", "max_stars_repo_name": "jeffin07/theseus", "max_stars_repo_head_hexsha": "3498bbddf9cca740c2703d0c1aa3a78a7264cb15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "theseus/geometry/tests/test_se3.py", "max_issues_repo_name": "jeffin07/theseus", "max_issues_repo_head_hexsha": "3498bbddf9cca740c2703d0c1aa3a78a7264cb15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theseus/geometry/tests/test_se3.py", "max_forks_repo_name": "jeffin07/theseus", "max_forks_repo_head_hexsha": "3498bbddf9cca740c2703d0c1aa3a78a7264cb15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4392156863, "max_line_length": 87, "alphanum_fraction": 0.655159038, "include": true, "reason": "import numpy", "num_tokens": 2583}
|
# coding=utf-8
import numpy as np
import pickle
from json import loads
import csv
def get_config():
"""
Get info in the config.json file
and turn all data to SI units
"""
with open('./config.json', 'r', encoding='utf-8') as f:
config = loads(f.read())
assert config["unit"] == "mm" or config["unit"] == "SI", "Please enter the accurate unit."
if config["unit"] == "mm":
config["electrode_centers"] = list(np.array(config["electrode_centers"]) / 1000)
config["electrode_radius"] = config["electrode_radius"] / 1000
config["detection_bound"] = config["detection_bound"] / 1000
return config
def save_parameter(param, filename, path_name="."):
"""
Save parameter to .pkl file,
Args:
param: parameter to save
filename: filename of the destination without suffix ".pkl" example: "mesh_cache"
path_name: path name of the destination example: "./MESH"
"""
with open(path_name + "/" + 'cache_' + filename + '.pkl', "wb") as file:
pickle.dump(param, file)
def read_parameter(filename, path_name="."):
"""
Read from .pkl file Use this only with the save_parameter() utility !IMPORTANT,
Args:
filename: filename of the destination with suffix example: "aaa.csv"
path_name: path name of the destination example: "./MESH"
Returns:
data: data in the file
"""
with open(path_name + "/" + 'cache_' + filename + '.pkl', 'rb') as file:
param = pickle.load(file)
return param
def save_to_csv_file(data, filename, path_name="."):
"""
Save parameter to .csv file,
Args:
data: parameter to save must be 1D or 2D data
filename: filename of the destination with suffix example: "aaa.csv"
path_name: path name of the destination example: "./MESH"
"""
assert filename.endswith(".csv"), "The filename is not ended with csv."
data = np.array(data)
if len(data.shape) > 2:
raise ValueError("This function can only store two dimension data.")
with open(path_name + "/" + filename, mode='w', newline='') as file:
data_writer = csv.writer(file, delimiter=',')
if len(data.shape) == 1:
data_writer.writerow(data)
else:
for line in data:
data_writer.writerow(line)
def read_csv_from_file(filename, path_name="."):
"""
Read from .csv file
Args:
filename: filename of the destination with suffix example: "aaa.csv"
path_name: path name of the destination example: "./MESH"
Returns:
data: data in the file
"""
data = []
assert filename.endswith(".csv"), "The filename is not ended with csv."
with open(path_name + "/" + filename, newline='') as file:
csv_reader = csv.reader(file, delimiter=',')
for line in csv_reader:
if line:
line = [float(x) for x in line]
data.append(line)
return data
def read_csv_one_line_from_file(filename, path_name=".", idx=0):
"""
Read from .csv file one line
Args:
filename: filename of the destination with suffix example: "aaa.csv"
path_name: path name of the destination example: "./MESH"
idx: line of the data, default to 0
Returns:
data: data in the file
"""
data = []
assert filename.endswith(".csv"), "The filename is not ended with csv."
with open(path_name + "/" + filename, newline='') as file:
csv_reader = csv.reader(file, delimiter=',')
count = 0
for line in csv_reader:
if line:
if count == idx:
data = [float(x) for x in line]
break
else:
count += 1
return data
|
{"hexsha": "01efc3d36e8f316e3c765a5b7abec8577f3a55a6", "size": 3808, "ext": "py", "lang": "Python", "max_stars_repo_path": "MyEIT/utilities.py", "max_stars_repo_name": "zehao99/CEIT-segmentation", "max_stars_repo_head_hexsha": "19f48b126e2ea82cea68d9fd10ec609344fd7247", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MyEIT/utilities.py", "max_issues_repo_name": "zehao99/CEIT-segmentation", "max_issues_repo_head_hexsha": "19f48b126e2ea82cea68d9fd10ec609344fd7247", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MyEIT/utilities.py", "max_forks_repo_name": "zehao99/CEIT-segmentation", "max_forks_repo_head_hexsha": "19f48b126e2ea82cea68d9fd10ec609344fd7247", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0, "max_line_length": 94, "alphanum_fraction": 0.5971638655, "include": true, "reason": "import numpy", "num_tokens": 898}
|
%\documentclass[twocolumn,superscriptaddress,aps,prb,floatfix]{revtex4-1}
\documentclass[11pt]{article}
\usepackage{amsmath,graphicx}
%\usepackage[cmex10]{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{geometry}
\usepackage{graphicx}
\usepackage{epstopdf}
\usepackage{hyperref}
% \usepackage[francais]{babel}
\usepackage[applemac]{inputenc}
\usepackage{color}
%\usepackage{graphicx}% Include figure files
%\usepackage{dcolumn}% Align table columns on decimal point
%\usepackage{bm}% bold math
%\usepackage{color}
%\usepackage[caption=false]{subfig}
\usepackage{listings}
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{mauve}{rgb}{0.58,0,0.82}
\lstset{frame=tb,
language=python,
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,
columns=flexible,
basicstyle={\small\ttfamily},
numbers=none,
numberstyle=\tiny\color{gray},
keywordstyle=\color{blue},
commentstyle=\color{dkgreen},
stringstyle=\color{mauve},
breaklines=true,
breakatwhitespace=true,
tabsize=3
}
\newcommand {\lb} {{\langle}}
\newcommand {\rb} {{\rangle}}
\newcommand {\R} {{\mathbb{R}}}
\newcommand {\X} {{\tilde X}}
\newcommand {\w} {{\tilde w}}
\newcommand {\hh} {{k}}
\newcommand {\p} {{\tilde p}}
\newcommand {\RN} {{{\mathbb{R}}^N}}
\newcommand {\Z} {{\mathbb{Z}}}
\newcommand {\E} {{\mathbb{E}}}
\newcommand {\W} {{\cal{W}}}
\newcommand {\Fem} {F_{e}}
\newcommand {\For} {F_{o}}
\newcommand{\Exp}[1]{\mathbb{E}\left(#1 \right)}
\usepackage{amsthm}
\usepackage{amsmath}
\usepackage{amssymb}
\newcommand{\figref}[1]{Fig. \ref{#1}}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
%\newenvironment{proof}[1][Proof]{\begin{trivlist}
%\item[\hskip \labelsep {\bfseries #1}]}{\end{trivlist}}
%\newenvironment{definition}[1][Definition]{\begin{trivlist}
%\item[\hskip \labelsep {\bfseries #1}]}{\end{trivlist}}
%\newenvironment{example}[1][Example]{\begin{trivlist}
%\item[\hskip \labelsep {\bfseries #1}]}{\end{trivlist}}
%\newenvironment{remark}[1][Remark]{\begin{trivlist}
%\item[\hskip \labelsep {\bfseries #1}]}{\end{trivlist}}
%\newcommand{\qed}{\nobreak \ifvmode \relax \else
% \ifdim\lastskip<1.5em \hskip-\lastskip
% \hskip1.5em plus0em minus0.5em \fi \nobreak
% \vrule height0.75em width0.5em depth0.25em\fi}
\begin{document}
%\allowdisplaybreaks
\title{Topology and Geometry of Deep Rectified Network Optimization Landscapes}
%\author{C. Daniel Freeman \\
%daniel.freeman@berkeley.edu, Department of Physics, University of California, Berkeley, CA 94720, USA}
\author{C. Daniel Freeman and Joan Bruna \\
Department of Statistics, University of California, Berkeley, CA 94720, USA \\
Courant Institute of Mathematical Sciences, New York University, New York USA
}
\date{\today}
\maketitle
\begin{abstract}
The loss surface of deep neural networks has recently attracted interest
in the optimization and machine learning communities as a prime example of
high-dimensional non-convex problem. Some insights were recently gained using spin glass
models, but at the expense of strongly simplifying the nonlinear nature of the model.
In this work, we do not make any such assumption and study conditions
on the data distribution and model architecture that prevent the existence
of bad local minima. Together with
recent results that rigorously establish that no gradient descent can
get stuck on saddle points, we conclude that gradient descent converges
to a global optimum in deep rectified networks.
The conditioning of gradient descent is the next challenge we address.
We study this question by estimating the geometry of level sets, and we introduce
an algorithm to estimate the regularity of such sets on large-scale networks.
Our empirical results suggest that these sets become exponentially more curvy
as the energy level decays, in accordance to what is observed in practice.
\end{abstract}
\tableofcontents
%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
\label{sec:Intro}
%%%%%%%%%%%%%%%%%%%%%%
\begin{itemize}
\item Context of the problem
\item Related work: Spin glass, recent result.
\item Gradient Descent converges to minimizers (Jordan Recht et al).
\item Main result on connectedness of level sets.
\item Geometry of the level sets. Algorithm to estimate the geodesics along level sets. Measure of curvature of these sets.
\end{itemize}
\section{Topology of Level Sets}
Let $P$ be a probability measure on a product space $\mathcal{X} \times \mathcal{Y}$,
where we assume $\mathcal{X}$ and $\mathcal{Y}$ are Euclidean vector spaces for simplicity.
Let $\{ (x_i, y_i)\}_i$ be an iid sample of size $L$ drawn from $P$ defining the training set.
We consider the classic empirical risk minimization of the form
\begin{equation}
\label{emp_risk_min}
\Fem(\theta) = \frac{1}{L} \sum_{l=1}^L \| \Phi(x_i;\theta) - y_i \|^2~,
\end{equation}
where $\Phi(x ; \theta)$ encapsulates the feature representation
that uses parameters $\theta \in \R^S$. In a deep neural network, this parameter
contains the weights and biases used in all layers.
For convenience, in our analysis we will also use the oracle risk minimization:
\begin{equation}
\label{risk_min}
\For(\theta) = \E_{(X,Y) \sim P} \| \Phi(X;\theta) - Y \|^2~,
\end{equation}
We define the level set of $F(\theta)$ as
\begin{equation}
\Omega_F(\lambda) = \{ \theta \in \R^S~;~F(\theta) \leq \lambda \}~.
\end{equation}
The first question we study is the structure of critical points of $\Fem(\theta)$ and $\For(\theta)$
when $\Phi$ is a multilayer neural network. In particular, we are interested to know whether
$\Fem$ has local minima which are not global minima. This question is answered by
knowing whether $\Omega_F(\lambda)$ is connected at each energy level $\lambda$:
\begin{proposition}
If $\Omega_F(\lambda)$ is connected for all $\lambda$ then every local minima of $F(\theta)$ is a global minima.
\end{proposition}
{\it Proof:} Suppose that $\theta_1$ is a local minima and $\theta_2$ is a global minima,
but $F(\theta_1) > F(\theta_2)$. If $\lambda = F(\theta_1)$, then clearly
$\theta_1$ and $\theta_2$ both belong to $\Omega_F(\lambda)$. Suppose
now that $\Omega_F(\lambda)$
is connected. Then we could find a smooth (i.e. continuous and differentiable) path $\gamma(t)$
with $\gamma(0) = \theta_1$, $\gamma(1)= \theta_2$ and $F(\gamma(t)) \leq \lambda = F(\theta_1)$.
In particular, as $t \to 0$, we have
\begin{eqnarray*}
F(\gamma(t)) &=& F(\theta_1) + t \langle \nabla F(\theta_1) , \dot{\gamma}(0) \rangle + \frac{t^2}{2} \left(\dot{\gamma}(0)^T H F(\theta_1) \dot{\gamma}(0) + \langle \nabla F(\theta_1), \ddot{\gamma}(0) \rangle \right) + o(t^2) \\
&=& F(\theta_1) + \frac{t^2}{2} \dot{\gamma}(0)^T H F(\theta_1) \dot{\gamma}(0) + o(t^2) ~,
\end{eqnarray*}
which shows that $F(\gamma(t)) \leq F(\theta_1)$ for all $t$ is incompatible with $H(\theta_1) \succeq 0$. $\square$
\subsection{The Linear Case}
A particularly simple but insightful case is
when $F$ is a multilayer network defined by
\begin{equation}
\label{linearcase}
\Phi(x;\theta) = W_K \dots W_1 x~,~\theta = (W_1, \dots, W_K)~.
\end{equation}
This model defines a non-convex (and non-concave) loss $\Fem(\theta)$
which has been recently studied in \cite{linearcase} concurrently with our work.
We provide here an alternative proof that in that case, there are no poor local
minima.
%For that purpose, let $W_1, W_2, \dots, W_K$ be weight matrices of sizes
%$n_k \times n_{k+1}$, $k < K$. Assume first that $n_j \geq \min(n_1, n_K)$ for $j=2 \dots K-1$.
%and let us define the following multilinear regression problem:
%\begin{equation}
%\label{multilinloss}
%L_0(W_1, \dots, W_K) = \sum_i \| W_K, \dots W_1 x_i - y_i \|^2~,
%\end{equation}
%where $\{ (x_i, y_i)\,; x_i \in \mathbb{R}^{n_1}, y_i \in \mathbb{R}^{n_K} \}_i$ is a given
%training set.
We have the following result.
\begin{proposition}
\label{proplinear}
Let $W_1, W_2, \dots, W_K$ be weight matrices of sizes
$n_k \times n_{k+1}$, $k < K$, and let $\Fem(\theta)$, $\For(\theta)$
denote the risk minimizations using $\Phi$ as in (\ref{linearcase}).
Assume that $n_j \geq \min(n_1, n_K)$ for $j=2 \dots K-1$ [TODO I think this is not necessary].
Then $\Omega_{\Fem}(\lambda)$ is connected for all $\lambda$, as well as $\For$.
\end{proposition}
{\it Proof:} We proceed by induction over the number of layers $K$.
For $K=1$, the loss $F(\theta)$ is convex. Let $\theta_1$, $\theta_2$ be two arbitrary points
in a level set $\Omega_\lambda$. Thus $L(\theta_1) \leq \lambda$ and $L(\theta_2) \leq \lambda$. We have
$$L( t \theta_1 + (1-t) \theta_2) \leq t L(\theta_1) + (1-t) L(\theta_2) \leq \lambda~,$$
and thus a linear path is sufficient in that case to connect $\theta_1$ and $\theta_2$.
Suppose the result is true for $K-1$. Let $\theta_1 = (W_1^1, \dots, W^1_K)$ and
$\theta_2 = (W_1^2, \dots, W^2_K)$ with $L(\theta_1) \leq \lambda$, $L(\theta_2) \leq \lambda$.
For each $W_1, \dots, W_K$, we denote $\tilde{W}_j = W_j$ for $j < K-1$ and
$\tilde{W}_{K-1} = W_K W_{K-1}$. By induction hypothesis, the
loss expressed in terms of $\tilde{\theta} = (\tilde{W}_1, \dots, \tilde{W}_{K-1})$ is connected
between $\tilde{\theta}_1$ and $\tilde{\theta_2}$. Let $\tilde{W}_{K-1}(t)$ the corresponding
path projected in the last layer. We just need to produce a path in the variables $W_{K-1}(t)$, $W_K(t)$
such that (i) $W_{K-1}(0) = W_{K-1}^1$, $W_{K-1}(1) = W_{K-1}^2$,
(ii) $W_{K}(0) = W_{K}^1$, $W_{K}(1) = W_{K}^2$, and
(iii) $W_{K}(t) W_{K-1}(t) = \tilde{W}_{K-1}(t)$ for $t \in (0,1)$.
We construct it as follows. Let
$$W_{K}(t) = t W_{K}^2 + (1-t) W_{K}^1 + t (1-t) V~,$$
$$W_{K-1}(t) = W_{K}(t)^\dagger \tilde{W}_{K-1}(t) ~,$$
where $W_{K}(t)^\dagger = ( W_{K}(t)^T W_{K}(t))^{-1} W_{K}(t)^T$ denotes the pseudoinverse
and $V$ is a $n_{K-1} \times n_{K}$ matrix drawn from a iid distribution.
Conditions (i) and (ii) are immediate from the definition, and condition (iii) results from the fact that
$$W_{K}(t) W_{K}(t)^\dagger = {\bf I}_{N_K}~,$$
since $W_K(t)$ has full rank for all $t \in (0,1)$.
$\square$.
\subsection{Half-Rectified Nonlinear Case}
We now study the setting given by
\begin{equation}
\label{relucase}
\Phi(x;\theta) = W_K \rho W_{K-1} \rho \dots \rho W_1 x~,~\theta = (W_1, \dots, W_K)~,
\end{equation}
where $\rho(z) = \max(0 ,z)$.
The biases can be implemented by replacing the input vector $x$
with $\overline{x}=(x, 1)$ and by rebranding each parameter matrix as
$$\overline{W}_i = \left(
\begin{array}{c|c}
W_i & b_i \\
\hline
0 & 1
\end{array}
\right)~,$$
where $b_i$ contains the biases for each layer.
For simplicity, we continue to use $W_i$ and $x$ in the following.
We start with a characterization of the oracle loss.
\begin{theorem}
Let $W_1, W_2, \dots, W_K$ be weight matrices of sizes
$n_k \times n_{k+1}$, $k < K$, and let $\Fem(\theta)$, $\For(\theta)$
denote the risk minimizations using $\Phi$ as in (\ref{relucase}).
Assume that $n_j \geq \min(n_1, n_K)$ for $j=2 \dots K-1$ [TODO I think this is not necessary].
Then $\Omega_{\For}(\lambda)$ is connected for all $\lambda$.
\end{theorem}
{\it Proof:}
We will again prove the result by induction over the depth $K$.
Suppose first that $K=2$. The oracle risk is
$$\For( W_1, W_2) = \Exp{ \| W_2 \rho W_1 X - Y \|^2}~.$$
If we denote $X_{W_1} = \rho W_1 X$, let us verify that $\For(W_1, W_2)$
only depends upon the correlation operator of $X_{W_1}$
and its cross-correlation to $Y$. Indeed, we have
\begin{eqnarray*}
\Exp{ \| W_2 \rho W_1 X - Y \|^2} &=& \Exp{ \| W_2 X_{W_1} - Y \|^2} \\
&=& W_2 \Sigma_{W_1} W_2^T + \Sigma_Y - 2 Tr\left( W_2 \Sigma_{W_1,Y} \right)~,
\end{eqnarray*}
where $\Sigma_{W_1} = \Exp{ X_{W_1} X_{W_1}^T}$ and $\Sigma_{W_1,Y} = \Exp{ X_{W_1} Y^T}$.
Let us see that when $\rho(z)$ is the half-rectification the covariance structure
of $X_{W_1}$ can be easily related to the original distribution. Indeed, we have
the following
\begin{lemma}
Let $Z = \rho W X$ with $\rho(z) = \max(0,z)$. Then
\begin{equation}
\Sigma_Z = \tilde{W}^T \Sigma_X \tilde{W}
\end{equation}
\end{lemma}
$\square$
[ TODO $\Fem$ case].
%%%%%%%%%%%%%%%%%%%%%%
\section{Geometry of Level Sets}
%\section{Quantifying Nonconvexity}
\label{sec:QuanNoncon}
\subsection{Definitions}
\label{sec:Defs}
%%%%%%%%%%%%%%%%%%%%%%
For a model with network parameters $\theta_i$, and a learning problem with sample space $X$, the fundamental object of study is the loss function, $L(X, \theta_i)$. In practice, one only has access to an estimate of the loss function over some restricted subset, $\chi_i$, of the sample space: $E( L(X, \theta_i), \chi_i )$. Unless otherwise stated, the loss functions computed throughout are assumed to be on a restricted test set not used during training.
A key ingredient of the algorithm is the use of an \emph{interpolated model}. For two given models, $\theta_1$ and $\theta_2$, we defined the interpolated model with parameter $t$ as follows:
\begin{equation}
\Theta (\theta_1 ,\theta_2, t) := \theta_1 (1-t) + \theta_2 t
\end{equation}
Thus, the interpolated model parameters---i.e., weights and biases---are simply linearly interpolated between two given models.
Additionally, the algorithm requires an estimate of the interpolated loss curve:
\begin{equation}
\gamma(\theta_1, \theta_2) := L (X ,\Theta (\theta_1, \theta_2, t)), t \in [0,1]
\end{equation}
or, an estimate of the loss on those models which are linear interpolations sitting between $\theta_1$ and $\theta_2$. More specifically, we seek efficient estimates of the location of the maxima, $t^* := \frac{d \gamma(\theta_1, \theta_2, t)}{dt} \bigg|_{t^*} = 0, \frac{d^2 \gamma(\theta_1, \theta_2, t)}{dt^2} \bigg|_{t^*} < 0$. While in principle, the interpolated loss curve could have rich structure, in practice it is generally fairly smooth, thus straightforward hill climbing algorithms can efficiently locate these points.
Finally, for a pair of models $(\theta_1, \theta_2)$, it will be convenient to define the maximum interpolated error:
\begin{equation}
\Gamma(\theta_1, \theta_2) := \rm{min}_{\Theta^*(\theta_1, \theta_2)}\:\rm{max}\:L (X, \theta_i) \bigg|_{\theta_i \in \Theta^*} \label{eq:minmaxerror}
\end{equation}
where $\Theta^* (\theta_1, \theta_2)$ is \emph{any} continuous path in the space of weights connecting $\theta_1$ and $\theta_2$. Thus, $\Gamma(\theta_1, \theta_2)$ represents the minimum possible maximum loss achieved by those paths in the space of weights connecting $\theta_1$ and $\theta_2$. More intuitively, if $\Gamma (\theta_1, \theta_2) \leq \rm{max}\: (L(X,\theta_1), L(X,\theta_2))$, then the models are ``connected''---there exists a continuous path in the space of models with total loss never exceeding the maximum loss achieved by $\theta_1$ or $\theta_2$.
\subsection{The Greedy Algorithm}
\label{sec:GreedyAlg}
1. Train two models $\theta_i$ and $\theta_j$ to a threshold loss value, $L_0$.
2. Determine the location of the global maxima, $t^*$, on the interpolated loss curve $\gamma(\theta_i, \theta_j)$.
3. Perform gradient descent on the interpolated model $\Theta (\theta_i, \theta_j, t^*) := \theta_{i,j}$ until it is below $\alpha L_0$ for some $\alpha \in [0,1]$ .
4. Calculate the maxima of the interpolated losses $\gamma(\theta_i, \theta_{i,j})$ and $\gamma(\theta_{i,j}, \theta_j)$. If these maxima are below $L_0$, then stop recursing on this branch and proceed to remaining branches(see 5). If not, proceed to step 5.
5. For those pairs, $\theta_a, \theta_b$ from step 4 for which the maxima exceeds $L_0$, start a new branch by returning to step 2 and making the replacement $i->a$ and $j->b$. If depth exceeds $d$, stop (see below).
\begin{figure}
\begin{center}
\scalebox{1}{\includegraphics[width=1.0\columnwidth]{AlgorithmFigure}}
\end{center}
\caption{A cartoon of the algorithm. $a):$ The initial two models with approximately the same loss, $L_0$. $b):$ The interpolated loss curve, in red, and its global maximum, occuring at $t=t^*$. $c):$ The interpolated model $\Theta(\theta_i, \theta_j, t^*)$ is added and labeled $\theta_{i,j}$. $d):$ Stochastic gradient descent is performed on the interpolated model until its loss is below $\alpha L_0$. $e):$ New interpolated loss curves are calculated between the models, pairwise on a chain. $f):$ As in step $c)$, a new model is inserted at the maxima of the interpolated loss curve between $\theta_i$ and $\theta_{i,j}$. $g):$ As in step $d)$, gradient descent is performed until the model has low enough loss.}
\label{fig:AlgorithmFigure}
\end{figure}
We provide a cartoon of the algorithm in \figref{fig:AlgorithmFigure}. If the algorithm succeeds, then the output of the algorithm is a sequence of models, $\theta_i$ such that the pairwise interpolated loss curve between each in a sequence will be less than the threshold $L_0$. Thus, the algorithm outputs a continuous path in parameter space connecting the original two models such that everywhere along the path, the total loss is less than or equal to the loss of the original models.
As written, if a path does \emph{not} exist, then the algorithm will clearly not converge. Thus, on top of the parameter $\alpha$, discussed below, the algorithm has an additional free parameter in the \emph{depth} chosen to explore. For convenience, we define the string of models produced by the algorithm at depth $d$ with parameter $\alpha$ to be the \emph{interpolated string}, $S(\theta_1, \theta_2, \alpha, d)$. These are precisely those models recursively generated by the algorithm in step 3. Further, these models are naturally ordered along a path, starting from $\theta_1$ and terminating on $\theta_2$, as indicated in \figref{fig:AlgorithmFigure}.
Finally, to use this as a tool to diagnose convexity, we define the \emph{maximum interpolated error at depth $d$ and tolerance $\alpha$}:
\begin{align}
\tilde{\Gamma}( \theta_1, \theta_2, d, \alpha ) &:= \rm{max}\ \gamma(\theta_i, \theta_j, t)\\ \notag
&i,\ j\ \rm{neighbors\ in}\ S(\theta_1, \theta_2, \alpha, d)
\end{align}
where by ``neighbors in $S(\theta_1, \theta_2, \alpha, d)$'', we only mean that the models are immediately adjacent on the interpolating string. This quantity upper bounds the true maximum interpolated error, i.e. \eqref{eq:minmaxerror}.
In summary: the algorithm recursively produces and trains new models lying on a continuous path in the space of model parameters, i.e. a string. Training via gradient descent biases the path towards valleys on the loss surface, thus encouraging the loss along this path to be low. In practice, the parameter $\alpha$ is chosen to be less than 1 to aid convergence. We provide numerical and theoretical evidence for this choice in section SECTIONGOHERE.
\subsection{Constrained Dynamic String Sampling}
\label{sec:ConstrainedAlg}
While the algorithm presented in Sec. \ref{sec:GreedyAlg} is fast for sufficiently smooth families of loss surfaces with few saddle points, here we present a slightly modified version which, while slower, provides more control over the convergence of the string. Instead of training intermediate models via full SGD to a desired accuracy, intermediate models will be subject to a constraint that ensures they are ``close'' to the neighboring models on the string. Specifically, intermediate models will be constrained to the unique hyperplane in weightspace equidistant from its two neighbors. This is similar to a sort of ``$L_1$ regularization'' where the loss function for a given model on the string, $\theta_i$, has an additional term $\tilde{L}(\theta) = L(\theta)+\zeta(\|\theta_{i-1} - \theta_i\|+\|\theta_{i+1} + \theta_i\|)$. The strength of the $\zeta$ regularization term controls the ``springy-ness'' of the weightstring. note: make this more precise, the hyperplane constraint is stronger than the $L_1$ constraint...$L_1$ only keeps the model in a ball close to the midpoint between the models.
Because adapting DSS to use this constraint is straightforward, here we will describe an alternative ``breadth-first'' approach wherein models are trained in parallel until convergence. This alternative approach has the advantage that it will indicate a disconnection between two models ``sooner'' insofar as it will be clear two models cannot be connected once the loss on either of the two initial models, $\theta_1$ or $\theta_2$, is less than $\Gamma(\theta_1, \theta_2)$. The precise geometry of the loss surface will dictate which approach to use in practice.
Given two random models $\sigma_i$ and $\sigma_j$ where $|\sigma_i - \sigma_j| < \kappa$, we aim to follow the evolution of the family of models connecting $\sigma_i$ to $\sigma_j$. Intuitively, almost every continuous path in the space of random models connecting $\sigma_i$ to $\sigma_j$ has, on average, the same (high) loss. For simplicity, we choose to initialize the string to the linear segment interpolating between these two models. If this entire segment is evolved via gradient descent, the segment will either evolve into a string which is entirely contained in a basin of the loss surface, or some number of points will become fixed at a higher loss. These fixed points are difficult to detect directly, but will be indirectly detected by the persistence of a large interpolated loss between two adjacent models on the string.
The algorithm proceeds as follows:
(0.) Initialize model string to have two models, $\sigma_i$ and $\sigma_j$.
1. Begin training all models to the desired loss, keeping the instantaneous loss of all models being trained approximately constant..
2. If the pairwise interpolated loss $\gamma(\sigma_n,\sigma_{n+1})$ exceeds a tolerance $\alpha_1$, insert a new model at the maximum of the interpolated loss between these two models. For simplicity, this tolerance is chosen to be $(1 + \alpha_1^*)$ times the instantaneous loss of all other models on the string.
3. Repeat steps (1) and (2) until all models (and interpolated errors) are below a threshold loss $L_0$, or until a chosen failure condition (see \ref{sec:Fail}).
\subsection{Failure Conditions}
\label{sec:Fail}
While the algorithms presented will faithfully certify two models are connected if the algorithm converges, it is worth reemphasizing that they do not guarantee that two models are disconnected if the algorithm fails to converge. In general, the problem of determining if two models are connected can be made arbitrarily difficult by choice of a particularly pathological geometry for the loss function, so we are constrained to heuristic arguments for determining when to stop running the algorithm.
Thankfully, in practice, loss function geometries for problems of interest are not intractably difficult to explore.
%%%%%%%%%%%%%%%%%%%%%%
\section{Numerical Experiments}
\label{sec:NumExp}
For our numerical experiments, we aimed to extract qualitative features of both small, toy networks, as well as of larger workhorse networks suitable for use on real world tasks (e.g. MNIST). At its core, the maximum interpolated error (i.e., \eqref{eq:minmaxerror}) is a measure of problem nonconvexity---or, more precisely, of the nonconvexity of the loss surface of a given architecture on a particular learning problem.
\subsection{Polynomial Regression}
\label{sec:PolyFuncs}
%%%%%%%%%%%%%%%%%%%%%%
Polynomial function regression is a task for which small neural networks can achieve extremely high accuracy. For our numerical experiments, we studied a 1-4-4-1 fully connected multilayer perceptron style architecture with RELU activation and RMSProp optimization. For ease-of-analysis, we restricted the family of polynomials to be strictly contained in the interval $x\in[0,1]$ and $f(x)\in[0,1]$.
Discussion of different Loss functions
etc.
%%%%%%%%%%%%%%%%%%%%%%
\subsection{Convolutional Neural Networks}
\label{sec:CNN}
%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%
\section{Discussion}
\label{sec:Discussion}
%%%%%%%%%%%%%%%%%%%%%%
\begin{itemize}
\item Future: Generalization Error Question.
\end{itemize}
\bibliography{nonconvex}
\end{document}
%
% {**}{**}{**} End of file apssamp.tex {**}{**}{**}
|
{"hexsha": "2bb73681426d9388a83621775fae7d0ebf1e3edb", "size": 24481, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Writeup/nonconvex2.tex", "max_stars_repo_name": "danielfreeman11/convex-nets", "max_stars_repo_head_hexsha": "252a8230845fb2076221113ac8cabfade5152bfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-08-09T00:48:46.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-03T09:04:59.000Z", "max_issues_repo_path": "Writeup/nonconvex2.tex", "max_issues_repo_name": "danielfreeman11/convex-nets", "max_issues_repo_head_hexsha": "252a8230845fb2076221113ac8cabfade5152bfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Writeup/nonconvex2.tex", "max_forks_repo_name": "danielfreeman11/convex-nets", "max_forks_repo_head_hexsha": "252a8230845fb2076221113ac8cabfade5152bfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.4022222222, "max_line_length": 1115, "alphanum_fraction": 0.7127976798, "num_tokens": 7292}
|
import cv2
import numpy as np
class Imageprocessor:
def __init__(self):
print ("Initializing image processor ..")
print ("Done ...")
def abs_sobel_threshold(self, image, orient='x', sobel_kernel=3, sobel_threshold=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=sobel_kernel))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply inclusive (>=, <=) thresholds
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= sobel_threshold[0]) & (scaled_sobel <= sobel_threshold[1])] = 1
# Return the result
return binary_output
def magnitude_threshold(self, image, sobel_kernel=3, magnitude_threshold=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Calculate Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
magnitude = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(magnitude)/255
magnitude = (magnitude/scale_factor).astype(np.uint8)
# Create a copy and apply inclusive (>=, <=) thresholds
binary_output = np.zeros_like(magnitude)
binary_output[(magnitude >= magnitude_threshold[0]) & (magnitude <= magnitude_threshold[1])] = 1
# Return the binary image
return binary_output
def direction_threshold(self, image, sobel_kernel=3, direction_threshold=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Calculate Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate direction
abs_gradient_direction = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create a copy and apply inclusive (>=, <=) thresholds
binary_output = np.zeros_like(abs_gradient_direction)
binary_output[(abs_gradient_direction>=direction_threshold[0])&(abs_gradient_direction<=direction_threshold[1])] = 1
# Return the binary image
return binary_output
def hls_color_threshold(self, image, h_threshold=(256, 256), l_threshold=(256, 256), s_threshold=(256, 256)):
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
binary_output = np.zeros_like(image[:,:,0])
binary_output[(hls[:,:,0] >= h_threshold[0]) & (hls[:,:,0] <= h_threshold[1])] = 1
binary_output[(hls[:,:,1] >= l_threshold[0]) & (hls[:,:,1] <= l_threshold[1])] = 1
binary_output[(hls[:,:,2] >= s_threshold[0]) & (hls[:,:,2] <= s_threshold[1])] = 1
return binary_output
def hls_color_threshold_h_and_s_or_l(self, image, h_threshold=(256, 256), l_threshold=(256, 256), s_threshold=(256, 256)):
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
binary_output = np.zeros_like(image[:,:,0])
binary_output[(hls[:,:,1] >= l_threshold[0]) & (hls[:,:,1] <= l_threshold[1])] = 1
binary_output[(hls[:,:,2] >= s_threshold[0]) & (hls[:,:,2] <= s_threshold[1])&\
(hls[:,:,0] >= h_threshold[0]) & (hls[:,:,0] <= h_threshold[1])] = 1
return binary_output
def region_of_interest(self, image, vertices):
#defining a blank mask to start with
binary_output = np.zeros_like(image[:,:,0])
#filling pixels inside the polygon defined by "vertices" with the 1
cv2.fillPoly(binary_output, vertices, 1)
return binary_output
def combined_threshold(self, gradx, grady, mag_binary, dir_binary):
combined = np.zeros_like(dir_binary)
combined[(((gradx == 1)&(grady == 1)) | ((mag_binary == 1)&(dir_binary == 1)))] = 1
return combined
def combined_threshold_roi(self, gradx, grady, mag_binary, dir_binary, hls_binary,roi_binary):
combined = np.zeros_like(dir_binary)
combined[(((gradx == 1)&(grady == 1)) | ((mag_binary == 1)&(dir_binary == 1)) | (hls_binary==1)) & (roi_binary==1)] = 1
return combined
def set_perspective_transform(self, src,dst):
M = cv2.getPerspectiveTransform(src, dst)
MR = cv2.getPerspectiveTransform(dst, src)
self.M = M
self.MR = MR
return M, MR
def perspective_transform(self, image):
warped = cv2.warpPerspective(image, self.M, (image.shape[1],image.shape[0]), flags=cv2.INTER_LINEAR)
return warped
def perspective_reverse_transform(self, image):
warped = cv2.warpPerspective(image, self.MR, (image.shape[1],image.shape[0]), flags=cv2.INTER_LINEAR)
return warped
|
{"hexsha": "f50cb78bbb39c3b575376b50152ce2725fe998c4", "size": 5254, "ext": "py", "lang": "Python", "max_stars_repo_path": "imageprocessor/imageprocessor.py", "max_stars_repo_name": "nguyenbuiUCSD/Advanced-Lane-Finding", "max_stars_repo_head_hexsha": "f54c75100fa4d06cb4bbe439180056ca9928ff55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-04-06T09:47:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-06T09:47:24.000Z", "max_issues_repo_path": "imageprocessor/imageprocessor.py", "max_issues_repo_name": "nguyenbuiUCSD/Advanced-Lane-Finding", "max_issues_repo_head_hexsha": "f54c75100fa4d06cb4bbe439180056ca9928ff55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imageprocessor/imageprocessor.py", "max_forks_repo_name": "nguyenbuiUCSD/Advanced-Lane-Finding", "max_forks_repo_head_hexsha": "f54c75100fa4d06cb4bbe439180056ca9928ff55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.7636363636, "max_line_length": 127, "alphanum_fraction": 0.629615531, "include": true, "reason": "import numpy", "num_tokens": 1419}
|
"""
Run this file to setup the tree system used for initial scoring.
Look at parameters.py before running!
What does this script do?
Before the program starts, an estimate of duration and disk space will be calculated.
This is fairly accurate except for the duration of the tree fase which is a guesstimate.
Then the 'real' program starts.
First, every db image will be normalised.
To be more precise every db image is grayscaled and then rescaled so that both width and height don't exceed the parameter MAX_IMAGE_SIZE.
Afterwards, precalculated data of every image is stored in the calc/ folder.
This data will be used in the following step.
If the parameter PRE_CALC_DES is True, then this data will remain on disk, otherwise it will be removed when the program exits.
This takes up a lot of disk space but will speed up the online fase. Note that this speedup is only significant when the data is quickly retrievable.
Therefore, using a SSD drive as storage device is strongly advised.
A hard disk might even slow down the online fase; it's best you test this yourself.
Finally, the data structure for retrieval will be generated.
During this fase it's possible that there'll be temporary data stored in the tmp/ folder.
This folder will be systematically emptied when the program doesn't need the information anymore.
This fase requires a *lot* of RAM.
The program estimates how much RAM it needs.
Don't forget to specify how much RAM you have available for the program in the parameter MAX_MEMORY_USAGE_GB.
If the program doesn't have enough RAM, an estimation will be used for the data structure.
More on this in the section "How it works > Scaling up".
If the parameter SAVE_DISK_SPACE_DURING_RUNNING is True, the calc/ folder will be emptied during this fase.\
At the end of the program, if SAVE_DISK_SPACE_DURING_RUNNING and PRE_CALC_DES are both True, the data in the calc/ folder needs to be recalculated.
frequently used abbreviations:
kp - keypoint
des - descriptor
db - database
File structure:
(input) data/ : all db images. assumption: all images have extension '.jpg'.
(temp) tmp/ : temporary folder for storing checkpoints
(output) calc/ : contains all stored kp and des of db images
(output) tree.p : the resulting tree
"""
import os
import time
import db_image
import parallelize
import random
from parameters import *
import numpy as np
import datetime
import kmeans_tree
import utils
import numpy_indexed as npi
def create_missing_output_folders():
"""
Helper function to make sure all folders used by program exist.
"""
if not os.path.isdir("data/"): # raise exception if no db images are provided
raise Exception("Folder data/ is empty. No db images provided.")
if not os.path.isdir("calc/"):
os.mkdir("calc/")
if not os.path.isdir("tmp/"):
os.mkdir("tmp/")
def eta(all_ids):
"""
Helper function: estimate runtime and disk space.
Try 100 random items to estimate duration of the whole program.
Returns estimations of:
- Total runtime
- Runtime of resize and grayscale operations
- Runtime of calculating all kp & des of db images
- Runtime of tree
- Size of all kp and des
- Extra disk space used during runtime
- The fraction of the data that should be used in the first level of the tree
Note: tree runtime is a guesstimate
"""
eta_resize, eta_calc, eta_tree, tmp_size_gb = 0, 0, 0, 0
# define which ids to test
multiply_by = len(all_ids)/100
ids_to_test = all_ids[np.random.choice(all_ids.shape[0], 100, replace=False)]
# eta for resizing
if RESIZE_IMAGES:
start = time.time()
paths_to_test = ["data/" + str(i) + ".jpg" for i in ids_to_test]
parallelize.parallelize_resize(paths_to_test)
eta_resize = (time.time() - start) * multiply_by
# eta for calculating des
start = time.time()
test_data = parallelize.parallelize_calc(ids_to_test)
eta_calc = (time.time() - start) * multiply_by
# est. size of des
calc_size_gb = 0
for i, d in test_data:
calc_size_gb += i.nbytes + d.nbytes
calc_size_gb *= multiply_by / 10**9
# weight
weight = min(1, MAX_MEMORY_USAGE_GB / (2.1 * calc_size_gb))
# eta for tree, hard to guesstimate
eta_tree = 7 * calc_size_gb * np.log(K * L + 1) * np.log(ATTEMPTS_KMEANS * CRITERIA[1] + 1)
if weight == 1:
eta_tree /= 2
# total eta
eta_tot = eta_resize + eta_calc + eta_tree
if PRE_CALC_DES and SAVE_DISK_SPACE_DURING_RUNNING:
eta_tot += eta_calc
# raise error if to little memory provided based on size of des
if 1/weight > K:
raise MemoryError("To little memory allocated. Max memory defined by user {} GB. "
"Min memory needed {:.2f} GB.".format(MAX_MEMORY_USAGE_GB, 2.1 * calc_size_gb / K))
# est. extra disk space used during runtime
if weight < 1 and not SAVE_DISK_SPACE_DURING_RUNNING:
tmp_size_gb = calc_size_gb
return eta_tot, eta_resize, eta_calc, eta_tree, calc_size_gb, tmp_size_gb, weight
def format_ids_des(data):
"""
Helper function to format chunked data.
"""
ids, des = list(), list()
# check if useful data is given
if len(data) == 0:
return ids, des
# process data
for i, d in data:
if d is not None and 0 != np.size(d, axis=0):
ids.extend([i] * np.size(d, axis=0))
des.append(d)
return np.array(ids, np.uint32), np.concatenate(des, axis=0, dtype=np.float32)
def main():
create_missing_output_folders() # data/, calc/ and temp/
all_ids = db_image.get_ids() # get ids of all db images
# estimate runtime and used disk space
eta_tot, eta_resize, eta_calc, eta_tree, calc_size_gb, tmp_size_gb, weight = eta(all_ids)
print("(PERMANENT) Est. size of calc/ folder: {:.2f}GB".format(calc_size_gb))
print("(TEMPORARY) Est. extra disk space used during offline fase: {:.2f}".format(tmp_size_gb))
print("Total ETA: " + str(datetime.timedelta(seconds=eta_tot)))
print('Percentage of data used on first level {:.2f}'.format(weight * 100))
# Resizing and grayscale images
print("1) Start resizing and grayscaling images, ETA: " + str(datetime.timedelta(seconds=eta_resize)))
start = time.time()
if RESIZE_IMAGES:
parallelize.parallelize_resize(["data/" + str(i) + ".jpg" for i in all_ids])
print("Resizing and grayscaling finished! Runtime: " + str(datetime.timedelta(seconds=time.time() - start)))
# Calculate and store all kp & des of images
print("2) Start calculating des, ETA: " + str(datetime.timedelta(seconds=eta_calc)))
start = time.time()
ids, des = format_ids_des(parallelize.parallelize_calc(all_ids, weight))
print("Calculating des finished! Runtime: " + str(datetime.timedelta(seconds=time.time() - start)))
# Build data structure for retrieval
print("3) Start building tree, ETA: " + str(datetime.timedelta(seconds=eta_tree)))
start = time.time()
tree = kmeans_tree.KMeansTree("tree.p", len(db_image.get_ids()), K, L, CRITERIA, ATTEMPTS_KMEANS)
if weight == 1:
for attempt in range(ATTEMPTS_TREE_BRANCH):
try:
tree.build_branch(ids, des, attempts_level=ATTEMPTS_TREE_LEVEL)
except Exception as e:
print("Failed attempt:", e)
else:
break
else:
raise Exception('Failed building tree.')
else:
# build first node
clusters = tree.build_node_from_given_data(des)
if clusters is not None:
# cluster descriptors in their respective nodes
for image_id in all_ids:
des = db_image.DbImage(image_id).get_kp_des(delete=SAVE_DISK_SPACE_DURING_RUNNING)[1]
if des is not None and len(des) != 0: # only use valid images
# get closest cluster center for every des
indices = utils.get_closest_indexes(clusters, des)
# sort des in groups with the same cluster center
indices, des = npi.group_by(indices, des)
# append grouped des into their respective files
for i, d in zip(indices, des):
utils.pickle_data((image_id, d), "tmp/" + str(i), mode="ab")
# build all branches on level 1
for node_index in range(K):
# load data of branch
ids, des = format_ids_des(utils.get_pickle_data_chunks("tmp/" + str(node_index)))
# try a few times to build the branch
for attempt in range(ATTEMPTS_TREE_BRANCH):
try:
tree.build_branch(ids, des, level=1, index=node_index, attempts_level=ATTEMPTS_TREE_LEVEL)
except Exception as e:
print("Failed attempt:", e)
else:
break
else:
raise Exception('Failed building branch ' + str(node_index) + ".")
try: # remove file if exists
os.remove("tmp/" + str(node_index))
except OSError:
pass
tree.finalise()
print("Building tree finished! Runtime: " + str(datetime.timedelta(seconds=time.time() - start)))
if __name__ == "__main__":
main()
|
{"hexsha": "0df5104ce3fe523b96060f3608de90276d225d43", "size": 9372, "ext": "py", "lang": "Python", "max_stars_repo_path": "offline.py", "max_stars_repo_name": "DaanS8/ScalableRecognition", "max_stars_repo_head_hexsha": "f642f78d5e809b339ba0adad8aeb2e4e9031a5ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "offline.py", "max_issues_repo_name": "DaanS8/ScalableRecognition", "max_issues_repo_head_hexsha": "f642f78d5e809b339ba0adad8aeb2e4e9031a5ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "offline.py", "max_forks_repo_name": "DaanS8/ScalableRecognition", "max_forks_repo_head_hexsha": "f642f78d5e809b339ba0adad8aeb2e4e9031a5ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0512820513, "max_line_length": 149, "alphanum_fraction": 0.6670934699, "include": true, "reason": "import numpy", "num_tokens": 2215}
|
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
#定义窗口名称
winName='Colors of the rainbow'
#定义滑动条回调函数,此处pass用作占位语句保持程序结构的完整性
def nothing(x):
pass
img_original=cv2.imread('lane.png')
#颜色空间的转换
img_hsv=cv2.cvtColor(img_original,cv2.COLOR_BGR2HSV)
#新建窗口
cv2.namedWindow(winName)
#新建6个滑动条,表示颜色范围的上下边界,这里滑动条的初始化位置即为黄色的颜色范围
cv2.createTrackbar('LowerbH',winName,27,255,nothing)
cv2.createTrackbar('LowerbS',winName,160,255,nothing)
cv2.createTrackbar('LowerbV',winName,215,255,nothing)
cv2.createTrackbar('UpperbH',winName,83,255,nothing)
cv2.createTrackbar('UpperbS',winName,255,255,nothing)
cv2.createTrackbar('UpperbV',winName,255,255,nothing)
while(1):
#函数cv2.getTrackbarPos()范围当前滑块对应的值
lowerbH=cv2.getTrackbarPos('LowerbH',winName)
lowerbS=cv2.getTrackbarPos('LowerbS',winName)
lowerbV=cv2.getTrackbarPos('LowerbV',winName)
upperbH=cv2.getTrackbarPos('UpperbH',winName)
upperbS=cv2.getTrackbarPos('UpperbS',winName)
upperbV=cv2.getTrackbarPos('UpperbV',winName)
#得到目标颜色的二值图像,用作cv2.bitwise_and()的掩模
img_target=cv2.inRange(img_hsv,(lowerbH,lowerbS,lowerbV),(upperbH,upperbS,upperbV))
#输入图像与输入图像在掩模条件下按位与,得到掩模范围内的原图像
img_specifiedColor=cv2.bitwise_and(img_original,img_original,mask=img_target)
cv2.imshow(winName,img_specifiedColor)
if cv2.waitKey(1)==ord('q'):
break
cv2.destroyAllWindows()
|
{"hexsha": "35111b9e034b54352d024e5bf8dcb7ec672d33c6", "size": 1404, "ext": "py", "lang": "Python", "max_stars_repo_path": "sensing/lane_detection/hsv_adjust.py", "max_stars_repo_name": "lnexenl/XTDrone", "max_stars_repo_head_hexsha": "f0402d44ac3b9a8435cfc67aea769ef659892010", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 457, "max_stars_repo_stars_event_min_datetime": "2020-03-21T05:27:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:05:52.000Z", "max_issues_repo_path": "sensing/lane_detection/hsv_adjust.py", "max_issues_repo_name": "lnexenl/XTDrone", "max_issues_repo_head_hexsha": "f0402d44ac3b9a8435cfc67aea769ef659892010", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2020-05-18T16:48:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T05:43:24.000Z", "max_forks_repo_path": "sensing/lane_detection/hsv_adjust.py", "max_forks_repo_name": "lnexenl/XTDrone", "max_forks_repo_head_hexsha": "f0402d44ac3b9a8435cfc67aea769ef659892010", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 121, "max_forks_repo_forks_event_min_datetime": "2020-03-21T06:43:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:27:29.000Z", "avg_line_length": 37.9459459459, "max_line_length": 87, "alphanum_fraction": 0.7849002849, "include": true, "reason": "import numpy", "num_tokens": 574}
|
import sys
import glob
import os
import math
import numpy as np
import cv2
from cv2 import aruco
class GeometricCalibration:
def __init__(self, cam, projector, checker_pattern=(23, 7), checker_size=0.01, checker_pixels=60,
width_mirror=0.3556, height_mirror=0.254, marker_offset=0.005, dict_aruco=aruco.DICT_6X6_250,
aruco_size=0.04, imgPattern='*.PNG'):
# For the extrinsic/geometric calibration you are required to print 8 Aruco markers
# Checkerboard: CalibrationImages/6x6_40mm.pdf
# Glue or mount them on the edges of you mirror as well as halfway between the edges
# The Aruco markers will determine the position of the object relative to the camera
# To determine the position of the screen, we display a checkerboard pattern on the screen
# Make sure that the screen is visible on the mirror from the position of the camera
# Camera:
self.cam = cam
# Projector:
self.proj = projector
# Checkerboard:
# Tuple of checkers in width and height
self.checker_pattern = checker_pattern
# Checker file:
self.checker_file = '/CalibrationNumpyData/8_24_checker.npz'
# Display size of checker
self.checker_size = checker_size
# Pixel size
self.checker_pixels = checker_pixels
# Per pixel Size
self.checker_pixel_size = checker_size/checker_pixels
# Display dimensions:
self.display_width, self.display_height = self.proj.getResolution()
# Aruco marker dimension:
self.aruco_size = aruco_size
self.aruco_dict = aruco.Dictionary_get(dict_aruco)
# Mirror/Object dimensions: height and length of mirror
# Subtract the white marker offset on a marker patch
self.board_width = width_mirror
self.board_height = height_mirror
self.half_width = (width_mirror/2) - marker_offset
self.half_height = (height_mirror/2) - marker_offset
# TODO: paths
# File pattern of images
self.imgPattern = imgPattern
def readFileList(self, imgFolder):
imgFileList = glob.glob(os.path.join(imgFolder, self.imgPattern))
imgFileList.sort()
return imgFileList
def detectChecker(self, img, debug=True):
if len(img.shape) == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif len(img.shape) == 2:
gray = img
ret, corners = cv2.findChessboardCorners(gray, self.checker_size,
cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_EXHAUSTIVE)
corners_refine = corners
if ret:
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
corners_refine = cv2.cornerSubPix(gray, corners, (3, 3), (-1, -1), criteria)
if debug:
cv2.drawChessboardCorners(img, self.checker_size, corners_refine, ret)
cv2.namedWindow('Checker', cv2.WINDOW_NORMAL)
cv2.imshow('Checker', img)
cv2.waitKey(0) # any key
cv2.destroyWindow('Checker')
return ret, corners_refine
@staticmethod
def readCheckerObjPoint(fname):
data = np.load(fname)
objp = data["objp"]
return objp
def arucoBoard(self):
# Aruco marker length, board height, board width
m = self.aruco_size
h = self.board_height
w = self.board_width
# create objPoints for calibration target
h0 = (0 - h / 2)
hm = (m - h / 2)
h1 = (((h - m) / 2) - h / 2)
h2 = (((h + m) / 2) - h / 2)
h3 = ((h - m) - h / 2)
h4 = (h - h / 2)
w0 = (0 - w / 2)
wm = (m - w / 2)
w1 = (((w - m) / 2) - w / 2)
w2 = (((w + m) / 2) - w / 2)
w3 = ((w - m) - w / 2)
w4 = (w - w / 2)
objPoints = []
objPoints.append(np.array([[w0, h0, 0], [wm, h0, 0], [wm, hm, 0], [w0, hm, 0]], dtype=np.float32)) # 0
objPoints.append(np.array([[w0, h1, 0], [wm, h1, 0], [wm, h2, 0], [w0, h2, 0]], dtype=np.float32)) # 1
objPoints.append(np.array([[w0, h3, 0], [wm, h3, 0], [wm, h4, 0], [w0, h4, 0]], dtype=np.float32)) # 2
objPoints.append(np.array([[w1, h3, 0], [w2, h3, 0], [w2, h4, 0], [w1, h4, 0]], dtype=np.float32)) # 3
objPoints.append(np.array([[w3, h3, 0], [w4, h3, 0], [w4, h4, 0], [w3, h4, 0]], dtype=np.float32)) # 4
objPoints.append(np.array([[w3, h1, 0], [w4, h1, 0], [w4, h2, 0], [w3, h2, 0]], dtype=np.float32)) # 5
objPoints.append(np.array([[w3, h0, 0], [w4, h0, 0], [w4, hm, 0], [w3, hm, 0]], dtype=np.float32)) # 6
objPoints.append(np.array([[w1, h0, 0], [w2, h0, 0], [w2, hm, 0], [w1, hm, 0]], dtype=np.float32)) # 7
ids = np.linspace(0, 7, 8).astype(np.int32)[:, None]
arucoCornerBoard = aruco.Board_create(objPoints, self.aruco_dict, ids)
return arucoCornerBoard, objPoints
def detectAruco(self, img, debug=True):
if len(img.shape) == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif len(img.shape) == 2:
gray = img
parameters = aruco.DetectorParameters_create()
# corners, ids, rejectedImgPoints = aruco.detectMarkers(img, aruco_dict, parameters=parameters)
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, self.aruco_dict, parameters=parameters)
if debug:
frame_markers = aruco.drawDetectedMarkers(img.copy(), corners, ids)
cv2.namedWindow('Aruco', cv2.WINDOW_NORMAL)
cv2.imshow('Aruco', frame_markers)
cv2.waitKey(0) # any key
cv2.destroyWindow('Aruco')
return corners, ids
def postEst(self, corners, ids, camMat, distCoeffs):
arucoCornerBoard, _ = self.arucoBoard()
retval, rvec, tvec = aruco.estimatePoseBoard(corners, ids, arucoCornerBoard, camMat, distCoeffs, None, None)
return rvec, tvec
def reProjAruco(self, img, camMat, distCoeffs, rvec, tvec, cornersAruco):
print("reProjAruco")
_, objPoints = self.arucoBoard() # yunhao
ids = np.linspace(0, 7, 8).astype(np.int32)[:, None]
corners_reproj = []
for i in range(len(objPoints)):
imgPoints, _ = cv2.projectPoints(np.array(objPoints[i]), rvec, tvec, camMat, distCoeffs)
corners_reproj.append(imgPoints)
frame_markers = aruco.drawDetectedMarkers(img.copy(), corners_reproj, ids)
# TODO: change path
cv2.imwrite("./reproejct_markers.png", frame_markers)
cv2.namedWindow('Reproject', cv2.WINDOW_NORMAL)
cv2.imshow('Reproject', frame_markers)
cv2.waitKey(0) # any key
cv2.destroyWindow('Reproject')
@staticmethod
def householderTransform(n, d):
I3 = np.identity(3, dtype=np.float32)
e = np.array([0, 0, 1])
p1 = I3 - 2 * np.outer(n, n)
p2 = I3 - 2 * np.outer(e, e)
p3 = 2 * d * n
return p1, p2, p3
@staticmethod
def invTransformation(R, t):
Rinv = R.T
Tinv = -(Rinv @ t)
return Rinv, Tinv
def calib(self, imgPath, cameraCalibPath):
# imgFileList = readFileList(imgFolder)
data = np.load(cameraCalibPath)
camMtx = data["mtx"]
dist = data["dist"]
objP_pixel = np.ceil(self.readCheckerObjPoint(self.checker_file))
objP_pixel[:, 2] = 0
objP = np.array(objP_pixel)
for i in range(self.checker_pattern[1]):
for j in range(math.floor(self.checker_pattern[0] / 2)):
tmp = objP[self.checker_pattern[0] * i + j, 0]
objP[self.checker_pattern[0] * i + j, 0] = objP[
self.checker_pattern[0] * i + self.checker_pattern[0] - j - 1, 0]
objP[self.checker_pattern[0] * i + self.checker_pattern[0] - j - 1, 0] = tmp
objP[:, 0] -= (self.display_width / 2 - 1)
objP[:, 1] -= (self.display_height / 2 - 1)
objP *= self.checker_size
rtA = []
rB = []
tB = []
rC2Ss = []
tC2Ss = []
# define valid image
validImg = -1
# for i in trange(len(imgFileList), desc="Images"):
for i in range(1):
img = cv2.imread(imgPath, cv2.IMREAD_UNCHANGED)
# Yunhao
# img = (img/65535*255).astype(np.uint8)
# Aruco marker for Mirror position
cornersAruco, ids = self.detectAruco(img, debug=False)
if cornersAruco is None and ids is None and len(cornersAruco) <= 3:
continue
# Checker for Display
ret, cornersChecker = self.detectChecker(img, debug=False)
if not ret:
print("no Checker!!!")
continue
# for a valid image, aruco and checker must be both detected
validImg += 1
# Calibrate Mirror Pose with Aruco
rvecMirror, tvecMirror = self.postEst(cornersAruco, ids, camMtx, dist)
img_axis = aruco.drawAxis(img, camMtx, dist, rvecMirror, tvecMirror, self.aruco_size)
cv2.namedWindow('Img_axis', cv2.WINDOW_NORMAL)
cv2.imshow('Img_axis', img_axis)
cv2.waitKey(0) # any key
cv2.destroyWindow('Img_axis')
## Reproejct Camera Extrinsic
self.reProjAruco(img, camMtx, dist, rvecMirror, tvecMirror, cornersAruco)
rMatMirror, _ = cv2.Rodrigues(rvecMirror) # rotation vector to rotation matrix
normalMirror = rMatMirror[:, 2]
rC2W, tC2W = self.invTransformation(rMatMirror, tvecMirror)
dW2C = abs(np.dot(normalMirror, tvecMirror))
# Householder transformation
p1, p2, p3 = self.householderTransform(normalMirror, dW2C)
# Calibrate virtual to Camera with Checker
rpe, rvecVirtual, tvecVirtual = cv2.solvePnP(objP, cornersChecker, camMtx, dist,
flags=cv2.SOLVEPNP_IPPE) # cv2.SOLVEPNP_IPPE for 4 point solution #cv2.SOLVEPNP_ITERATIVE
# iterationsCount=200, reprojectionError=8.0,
rvecVirtual, tvecVirtual = cv2.solvePnPRefineLM(objP, cornersChecker, camMtx, dist, rvecVirtual,
tvecVirtual)
proj, jac = cv2.projectPoints(objP, rvecVirtual, tvecVirtual, camMtx, dist)
img_rep = img
cv2.drawChessboardCorners(img_rep, self.checker_size, proj, True)
width = 960
height = int(img_rep.shape[0] * 960 / img_rep.shape[1])
smallimg = cv2.resize(img_rep, (width, height))
cv2.imshow("img_rep", smallimg)
cv2.waitKey(0) # any key
cv2.destroyWindow("img_rep")
rMatVirtual, _ = cv2.Rodrigues(rvecVirtual) # rotation vector to rotation matrix
print(tvecVirtual)
if validImg == 0:
rtA = p1
rB = np.matmul(rMatVirtual, p2)
tB = np.squeeze(tvecVirtual) + p3
else:
rtA = np.concatenate((rtA, p1))
rB = np.concatenate((rB, np.matmul(rMatVirtual, p2)))
tB = np.concatenate((tB, np.squeeze(tvecVirtual) + p3))
rS2C = p1 @ rMatVirtual
tS2C = p1 @ np.squeeze(tvecVirtual) + p3
rC2S, tC2S = self.invTransformation(rS2C, tS2C)
print("rC2S:", rC2S)
print("tC2S:", tC2S)
rC2Ss.append(rC2S)
tC2Ss.append(tC2S)
# rC2Ss = np.array(rC2Ss)
# tC2Ss = np.array(tC2Ss)
# fout = os.path.join(imgFolder, "Cam2Screen.npz")
# np.savez(fout, rC2S=rC2Ss, tC2S=tC2Ss)
return rC2Ss, tC2Ss
|
{"hexsha": "290dedde904ca1dd8263fc6149c619cee0e9bf97", "size": 11888, "ext": "py", "lang": "Python", "max_stars_repo_path": "Calibrations/.ipynb_checkpoints/GeometricCalibration-checkpoint.py", "max_stars_repo_name": "merlzbert/SkinScan", "max_stars_repo_head_hexsha": "684129c20b671db0a338ab832fa512c095f0cb60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-07-23T07:57:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T17:43:24.000Z", "max_issues_repo_path": "Calibrations/.ipynb_checkpoints/GeometricCalibration-checkpoint.py", "max_issues_repo_name": "merlzbert/SkinScan", "max_issues_repo_head_hexsha": "684129c20b671db0a338ab832fa512c095f0cb60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Calibrations/.ipynb_checkpoints/GeometricCalibration-checkpoint.py", "max_forks_repo_name": "merlzbert/SkinScan", "max_forks_repo_head_hexsha": "684129c20b671db0a338ab832fa512c095f0cb60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-04-08T14:40:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T17:41:49.000Z", "avg_line_length": 40.7123287671, "max_line_length": 147, "alphanum_fraction": 0.578230148, "include": true, "reason": "import numpy", "num_tokens": 3454}
|
################ Define Asymmetric Symbolic Error-based Intervals ##############
struct AsymESIP{F<:AbstractPolytope}
equation
factors
errors
domain::F
end
function init_asym_esip(input::AbstractHyperrectangle)
n = dim(input)
equation = [I zeros(n)]
factors = zeros(n, 0)
errors = zeros(0, n + 1) # extra dimension for constants
return AsymESIP(equation, factors, errors, input)
end
function sym_bounds_matrix(a::AsymESIP)
eq = a.equation
sym_lo = eq + min.(0, a.factors) * a.errors
sym_up = eq + max.(0, a.factors) * a.errors
return sym_lo, sym_up
end
function bounds_matrix(a::AsymESIP)
sym_lo, sym_up = sym_bounds_matrix(a)
los = low(a.domain)
his = high(a.domain)
lb = max.(0, sym_lo[:, 1:end-1]) * los + min.(0, sym_lo[:, 1:end-1]) * his + sym_lo[:, end]
ub = max.(0, sym_up[:, 1:end-1]) * his + min.(0, sym_up[:, 1:end-1]) * los + sym_up[:, end]
return lb, ub
end
|
{"hexsha": "3b4197a496a267fd4bbead0abe07879e994fb176", "size": 958, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/reachability/AsymESIP/asym_esip.jl", "max_stars_repo_name": "phK3/NeuralVerification.jl", "max_stars_repo_head_hexsha": "6c71231279c9474908f6db08a573c4b2b8cf2f01", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/reachability/AsymESIP/asym_esip.jl", "max_issues_repo_name": "phK3/NeuralVerification.jl", "max_issues_repo_head_hexsha": "6c71231279c9474908f6db08a573c4b2b8cf2f01", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/reachability/AsymESIP/asym_esip.jl", "max_forks_repo_name": "phK3/NeuralVerification.jl", "max_forks_repo_head_hexsha": "6c71231279c9474908f6db08a573c4b2b8cf2f01", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.95, "max_line_length": 95, "alphanum_fraction": 0.614822547, "num_tokens": 303}
|
import numpy as np
from .context import alma
def test_emit():
e = alma.image.Emit()
def test_list_models():
e = alma.image.Emit()
assert(isinstance(e.select(list_models=True),list))
def test_blackbody():
e = alma.image.Emit(model='blackbody')
assert(np.allclose(e.emit(1,[1,100],1000),
e.emit(1,[1,100],2000)*4))
assert(np.allclose(e.emit(1,[1,100],1000),
e.emit(1,[1,200],1000)/2))
assert(np.allclose(e.emit(1,[1,100],1000),
e.emit(0.5,[1,100],1000)/np.sqrt(2)))
def test_rj_tail():
r = 1 + np.arange(10)
t = 1 / r**0.5
e = alma.image.Emit(model='rj_tail')
assert(np.all(np.equal(t,e.emit(r,np.inf))))
def test_other_emit_function():
r = 1 + np.arange(10)
f = lambda x,p: 1 / x**0.4
t = f(r,np.inf)
e = alma.image.Emit()
e.select(func=f, params=[])
assert(np.all(np.equal(t,e.emit(r,np.inf))))
def test_other_emit_function_with_params():
r = 1 + np.arange(10)
f = lambda x,p: p / x**0.4
t = f(r,5)
e = alma.image.Emit()
e.select(func=f, params=['exp','norm'])
assert(np.all(np.equal(t,e.emit(r,5))))
|
{"hexsha": "aa74fbc23fd788c87e3036a940f2a1ebc347069d", "size": 1169, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_emit.py", "max_stars_repo_name": "drgmk/alma", "max_stars_repo_head_hexsha": "85e0a1d4bca179af4889c1687edf9b066cdc07ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-06-28T09:33:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-25T15:13:33.000Z", "max_issues_repo_path": "tests/test_emit.py", "max_issues_repo_name": "drgmk/alma", "max_issues_repo_head_hexsha": "85e0a1d4bca179af4889c1687edf9b066cdc07ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_emit.py", "max_forks_repo_name": "drgmk/alma", "max_forks_repo_head_hexsha": "85e0a1d4bca179af4889c1687edf9b066cdc07ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8333333333, "max_line_length": 60, "alphanum_fraction": 0.5705731394, "include": true, "reason": "import numpy", "num_tokens": 360}
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Array-Creation-Function" data-toc-modified-id="Array-Creation-Function-1"><span class="toc-item-num">1 </span>Array Creation Function</a></span><ul class="toc-item"><li><span><a href="#Generate-arrays-using-zeros()" data-toc-modified-id="Generate-arrays-using-zeros()-1.1"><span class="toc-item-num">1.1 </span>Generate arrays using <code>zeros()</code></a></span></li><li><span><a href="#Generate-arrays-using-ones()" data-toc-modified-id="Generate-arrays-using-ones()-1.2"><span class="toc-item-num">1.2 </span>Generate arrays using <code>ones()</code></a></span></li><li><span><a href="#Generate-arrays-using-arange()" data-toc-modified-id="Generate-arrays-using-arange()-1.3"><span class="toc-item-num">1.3 </span>Generate arrays using <code>arange()</code></a></span></li><li><span><a href="#Generate-arrays-using-linspace()" data-toc-modified-id="Generate-arrays-using-linspace()-1.4"><span class="toc-item-num">1.4 </span>Generate arrays using <code>linspace()</code></a></span></li><li><span><a href="#Generate-arrays-using-logspace()" data-toc-modified-id="Generate-arrays-using-logspace()-1.5"><span class="toc-item-num">1.5 </span>Generate arrays using <code>logspace()</code></a></span></li><li><span><a href="#Generate-constant-arrays-using-full()" data-toc-modified-id="Generate-constant-arrays-using-full()-1.6"><span class="toc-item-num">1.6 </span>Generate constant arrays using <code>full()</code></a></span></li><li><span><a href="#Creating-identity-matrix-using-eye()" data-toc-modified-id="Creating-identity-matrix-using-eye()-1.7"><span class="toc-item-num">1.7 </span>Creating identity matrix using <code>eye()</code></a></span></li><li><span><a href="#Generate-arrays-using-random.rand()" data-toc-modified-id="Generate-arrays-using-random.rand()-1.8"><span class="toc-item-num">1.8 </span>Generate arrays using random.rand()</a></span></li><li><span><a href="#Generate-empty-arrays-using-empty()" data-toc-modified-id="Generate-empty-arrays-using-empty()-1.9"><span class="toc-item-num">1.9 </span>Generate empty arrays using <code>empty()</code></a></span></li><li><span><a href="#Arrays-using-specific-data-type" data-toc-modified-id="Arrays-using-specific-data-type-1.10"><span class="toc-item-num">1.10 </span>Arrays using specific data type</a></span></li><li><span><a href="#References" data-toc-modified-id="References-1.11"><span class="toc-item-num">1.11 </span>References</a></span></li></ul></li></ul></div>
# Array Creation Function
# import numpy
import numpy as np
## Generate arrays using `zeros()`
- Returns an array of given shape and type filled with zeros
- **Syntax:** `np.zeros(shape, dtype)`
- shape - integer or sequence of integers
- dtype - data type(default: float)
# 1D array of length 3 with all values 0
Z1 = np.zeros(3)
print(Z1)
# 2D array of 3x4 with all values 0
Z2 = np.zeros((3,4))
print(Z2)
## Generate arrays using `ones()`
- Returns an array of given shape and type filled with ones
- **Syntax:** `np.ones(shape, dtype)`
- shape - integer or sequence of integers
- dtype - data type(default: float)
# 1D array of length 3 with all values 1
A1 = np.ones(3)
print(A1)
__Note__
- Rows = 3
- Columns = 4
# 2D array of 3x4 with all values 1
A2 = np.ones((3,4))
A2
print(A2)
## Generate arrays using `arange()`
- Returns equally spaced numbers with in the given range based on step size.
- **Syntax:** `np.arange(start, stop, step)`
- start- starts of interval range
- stop - end of interval range '
- step - step size of interval
# not specify start and step
A1 = np.arange(10)
print(A1)
# specifying start and step
A2 = np.arange(start=1, stop=10, step=2)
print(A2)
# another way
A3 = np.arange(10, 25, 2)
print(A3)
## Generate arrays using `linspace()`
- Returns equally spaced numbers within the given range based on the sample number.
- **Syntax:** `np.linspace(start, stop, num, dtype, retstep)`
- start-start of interval range
- stop-end of the interval range
- num- number of samples to be generated
- dtype-type of output array
- retstep-return the samples, step values
# array of evenly spaced values 0 to 2, here sample size = 9
L1 = np.linspace(0,2,9)
print(L1)
# Array of 6 evenly divided values from 0 to 100
L2 = np.linspace(0, 100, 6)
print(L2)
# Array of 1 to 5
L3 = np.linspace(start=1, stop=5, endpoint=True, retstep=False)
print(L3)
# Array of 1 to 5
L4 = np.linspace(start=1, stop=5, endpoint=True, retstep=True)
print(L4)
__Specifying Endpoint__
- `endpoint=True`, inlcude 5
- `endpoint=False`,exclude 5
__Specifying Retstep__
- `retstep=False`, doesn't return the step value
- `endpoint=False`, returns the samples as well step value
## Generate arrays using `logspace()`
- Returns equally spaced numbers within the given range based on the log scale.
- **Syntax:** `np.logspace(start, stop, num, endpoint, base, dtype, retstep)`
- start- start of the sequence
- stop- end of the sequence
- num- number of samples to be generated(default: 50)
- dtype- type of output array
- retstep- return the samples, step values
- endpoint - if true, stop is the last sample
- base - base of the log space(default: 10.0)
# generate an array with 5 samples with base 10.0
np.logspace(1, 10, num=5, endpoint=True)
# generate an array with 5 samples with base 2.0
np.logspace(1, 10, num=5, endpoint=True, base=2.0)
## Generate constant arrays using `full()`
- Return a new array of given shape and type, filled with `fill_value`.
- **Syntax:** `np.full(shape,fill_value, dtype)`
- shape - Shape of the new array, e.g., ``(2, 3)`` or ``2``.
- fill_value - Fill value(scaler).
- dtype - The desired data-type for the array
# generate 2x2 constant array, constant = 7
C = np.full((2, 2), 7)
print(C)
## Creating identity matrix using `eye()`
- An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one
- **Syntax:** `np.eye(N, M, k, dtype)`
- N : Number of rows(int) in the output
- M : Number of columns in the output. If None, defaults to `N`.
- k : Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal
- dtype: Data-type of the returned array.
# generate 2x2 identity matrix
I = np.eye(2)
print(I)
## Generate arrays using random.rand()
- Returns an array of given shape filled with random values.
- **Syntax:** `np.random.rand(shape)`
- shape - integer or sequence of integer
# create an array with randomly generated 5 values
R = np.random.rand(5)
print(R)
# generate 2x2 array of random values
R1 = np.random.random((2, 2))
print(R1)
# generate 4x5 array of random floats between 0-1
R2 = np.random.rand(4,5)
print(R2)
# generate 6x7 array of random floats between 0-100
R3 = np.random.rand(6,7)*100
print(R3)
# generate 2x3 array of random ints between 0-4
R4 = np.random.randint(5, size=(2,3))
print(R4)
## Generate empty arrays using `empty()`
- Return a new array of given shape and type, without initializing entries.
- **Syntax:** `np.empty(shape, dtype)`
- shape - integer or tuple of integer
- dtype - data-type
# generate an empty array
E1 = np.empty(2)
print(E1)
# 2x2 empty array
E2 = np.empty((2, 2))
print(E2)
## Arrays using specific data type
- float16
- float32
- int8
__SEE MORE__
- https://numpy.org/devdocs/user/basics.types.html
# generate an array of floats
D = np.ones((2, 3, 4), dtype=np.float16)
D
## References
- https://numpy.org/
- https://www.edureka.co/blog/python-numpy-tutorial/
- https://github.com/enthought/Numpy-Tutorial-SciPyConf-2019
- [Python Machine Learning Cookbook](https://www.amazon.com/Python-Machine-Learning-Cookbook-Prateek/dp/1786464470)
<hr>
*This notebook was created by [Jubayer Hossain](https://jhossain.me/) | Copyright © 2020, [Jubayer Hossain](https://jhossain.me/)*
|
{"hexsha": "675c6811022394958e67a04c4dcc739b6a85aa71", "size": 8225, "ext": "py", "lang": "Python", "max_stars_repo_path": "book/_build/jupyter_execute/numpy/01-array _creation_function.py", "max_stars_repo_name": "hossainlab/dsnotes", "max_stars_repo_head_hexsha": "fee64e157f45724bba1f49ad1b186dcaaf1e6c02", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "book/_build/jupyter_execute/numpy/01-array _creation_function.py", "max_issues_repo_name": "hossainlab/dsnotes", "max_issues_repo_head_hexsha": "fee64e157f45724bba1f49ad1b186dcaaf1e6c02", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "book/_build/jupyter_execute/numpy/01-array _creation_function.py", "max_forks_repo_name": "hossainlab/dsnotes", "max_forks_repo_head_hexsha": "fee64e157f45724bba1f49ad1b186dcaaf1e6c02", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9642857143, "max_line_length": 2633, "alphanum_fraction": 0.6936170213, "include": true, "reason": "import numpy", "num_tokens": 2400}
|
'''## Multiple Linear Regression ##'''
'''## importing libraries ##'''
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
'''## importing dataset ##'''
dataset = pd.read_csv("50_Startups.csv")
'''### segragating the values ##'''
X = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,4].values
'''### label encode the state columns ##'''
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
label_x = LabelEncoder()
X[:,3] = label_x.fit_transform(X[:,3])
hotenc = OneHotEncoder(categorical_features=[3]) ## categorical feature == Column number you want to onehotencode
X = hotenc.fit_transform(X).toarray() ## convert to array else cant be seen in the variables
'''## Removing extra dummy variable ##'''
X = X[:,1:]
'''### splitting the dataset into Test / Train set'''
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size=0.20, random_state=0)
'''### Fitting the Multiple linear Regression model on the Data set ##'''
from sklearn.linear_model import LinearRegression
LR = LinearRegression()
LR.fit(X_train,y_train)
'''## Predicting the Output ##'''
y_pred = LR.predict(X_test)
|
{"hexsha": "fbedb8f59550635d6d28bb0e97209ec99f7dd824", "size": 1187, "ext": "py", "lang": "Python", "max_stars_repo_path": "Multiple_Linear_Regression/MLR.py", "max_stars_repo_name": "ranjankaul/Machine_Learning", "max_stars_repo_head_hexsha": "c64510e450ecbae2cd4b040093370fdd3b5a1f94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Multiple_Linear_Regression/MLR.py", "max_issues_repo_name": "ranjankaul/Machine_Learning", "max_issues_repo_head_hexsha": "c64510e450ecbae2cd4b040093370fdd3b5a1f94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-03T07:46:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-03T07:46:00.000Z", "max_forks_repo_path": "Multiple_Linear_Regression/MLR.py", "max_forks_repo_name": "ranjankaul/Machine_Learning", "max_forks_repo_head_hexsha": "c64510e450ecbae2cd4b040093370fdd3b5a1f94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.675, "max_line_length": 113, "alphanum_fraction": 0.7110362258, "include": true, "reason": "import numpy", "num_tokens": 279}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.