text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
[STATEMENT]
lemma [simp]: "eff i P pc et None = []"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eff i P pc et None = []
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eff i P pc et None = []
[PROOF STEP]
by (simp add: Effect.eff_def)
|
{"llama_tokens": 115, "file": "JinjaDCI_Compiler_TypeComp", "length": 2}
|
from . import common
import sys
import os
def show_cpu_features():
from numpy.lib.utils import _opt_info
info = _opt_info()
info = "NumPy CPU features: " + (info if info else 'nothing enabled')
# ASV wrapping stdout & stderr, so we assume having a tty here
if 'SHELL' in os.environ and sys.platform != 'win32':
# to avoid the red color that imposed by ASV
print(f"\033[33m{info}\033[0m")
else:
print(info)
def dirty_lock(lock_name, lock_on_count=1):
# this lock occurred before each round to avoid duplicate printing
if not hasattr(os, "getppid"):
return False
ppid = os.getppid()
if not ppid or ppid == os.getpid():
# not sure if this gonna happen, but ASV run each round in
# a separate process so the lock should be based on the parent
# process id only
return False
lock_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "..", "env", lock_name)
)
# ASV load the 'benchmark_dir' to discovering the available benchmarks
# the issue here is ASV doesn't capture any strings from stdout or stderr
# during this stage so we escape it and lock on the second increment
try:
with open(lock_path, 'a+') as f:
f.seek(0)
count, _ppid = (f.read().split() + [0, 0])[:2]
count, _ppid = int(count), int(_ppid)
if _ppid == ppid:
if count >= lock_on_count:
return True
count += 1
else:
count = 0
f.seek(0)
f.truncate()
f.write(f"{str(count)} {str(ppid)}")
except IOError:
pass
return False
# FIXME: there's no official way to provide extra information to the test log
if not dirty_lock("print_cpu_features.lock"):
show_cpu_features()
|
{"hexsha": "7b9f1d3e688d4206e9d7de31f618fc71f86a5736", "size": 1866, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/benchmarks/__init__.py", "max_stars_repo_name": "iam-abbas/numpy", "max_stars_repo_head_hexsha": "2fb5e969fded3cd468f2ca01d5b954c953545dd9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20453, "max_stars_repo_stars_event_min_datetime": "2015-01-02T09:00:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:35:56.000Z", "max_issues_repo_path": "benchmarks/benchmarks/__init__.py", "max_issues_repo_name": "iam-abbas/numpy", "max_issues_repo_head_hexsha": "2fb5e969fded3cd468f2ca01d5b954c953545dd9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14862, "max_issues_repo_issues_event_min_datetime": "2015-01-01T01:28:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:48:52.000Z", "max_forks_repo_path": "benchmarks/benchmarks/__init__.py", "max_forks_repo_name": "iam-abbas/numpy", "max_forks_repo_head_hexsha": "2fb5e969fded3cd468f2ca01d5b954c953545dd9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9362, "max_forks_repo_forks_event_min_datetime": "2015-01-01T15:49:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T21:26:51.000Z", "avg_line_length": 34.5555555556, "max_line_length": 77, "alphanum_fraction": 0.6028938907, "include": true, "reason": "from numpy", "num_tokens": 468}
|
import sys
from pyspark import SparkConf, SparkContext
import re
import numpy as np
# Load and parse the data
def parseToFrom(line):
data = line.split(":")
pageTo = int(data[0])
# check if data[1] is empty
if not data[1]:
inLinksNum = 0
else:
# use unique links in data[1]
linkData = set(data[1].split())
# linkData = data[1].split()
inLinksNum = len(linkData)
return [pageTo, inLinksNum]
def parseFromTo(line):
data = line.split(":")
pageFrom = int(data[0])
# check if data[1] is empty
if not data[1]:
outLinksNum = 0
else:
# use unique links in data[1]
linkData = set(data[1].split())
# linkData = data[1].split()
outLinksNum = len(linkData)
return [pageFrom, outLinksNum]
def pageList(line):
data = line.split(":")
key = int(data[0])
temp = data[1]
value = temp[2:-5]
return [key, value]
if __name__ == '__main__':
# create spark context, sets app and cluster information
sc = SparkContext(appName = 'wez311_task2')
# create distributed data
inLinkData = sc.textFile('/home/globalscratch/ISE495/hw4/lehighWeb/links_to_from.dat').map(parseToFrom).sortBy(lambda x: x[1], ascending = False)
# get the maximum number of in-going links
maxIndex1, maxInLinks = inLinkData.first()
# convert the page list into a dictionary
pageDict = sc.textFile('/home/globalscratch/ISE495/hw4/lehighWeb/pages.dat').map(pageList).collectAsMap()
# save results in file
file = open('4.2_task2.txt','w')
print '================================================='
file.write('The websites with the most in-going links ('+str(maxInLinks)+') are:\n')
print 'The websites with the most in-going links (%d) are:' % maxInLinks
zeroInLink = 0
for key, value in inLinkData.collect():
if value == maxInLinks:
file.write("http://www1.lehigh.edu/"+pageDict[key]+'\n')
print "http://www1.lehigh.edu/"+pageDict[key]
elif value == 0:
zeroInLink += 1
print 'The number of pages with no in-going links is %d.' %zeroInLink
file.write('The number of pages with no in-going links is '+str(zeroInLink)+'.\n')
outLinkData = sc.textFile('/home/globalscratch/ISE495/hw4/lehighWeb/links_from_to.dat').map(parseFromTo).sortBy(lambda x: x[1], ascending = False)
# get the maximum number of out-going links
maxIndex2, maxOutLinks = outLinkData.first()
print '================================================='
file.write('\nThe websites with the most out-going links ('+str(maxOutLinks)+') are:\n')
print 'The websites with the most out-going links (%d) are:' % maxOutLinks
zeroOutLink = 0
for key, value in outLinkData.collect():
if value == maxOutLinks:
file.write("http://www1.lehigh.edu/"+pageDict[key]+'\n')
print "http://www1.lehigh.edu/"+pageDict[key]
elif value == 0:
zeroOutLink += 1
print 'The number of pages with no out-going links is %d.' % zeroOutLink
file.write('The number of pages with no out-going links is '+str(zeroOutLink)+'.\n')
file.close()
sc.stop()
|
{"hexsha": "916d5fd102afc1c510445d561655e0f19727f774", "size": 3191, "ext": "py", "lang": "Python", "max_stars_repo_path": "part4-SimRank PageRank/wez311_task2.py", "max_stars_repo_name": "weizh888/mining-massive-dataset", "max_stars_repo_head_hexsha": "d98891023a8a38db5c52512607587a47d7145920", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "part4-SimRank PageRank/wez311_task2.py", "max_issues_repo_name": "weizh888/mining-massive-dataset", "max_issues_repo_head_hexsha": "d98891023a8a38db5c52512607587a47d7145920", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "part4-SimRank PageRank/wez311_task2.py", "max_forks_repo_name": "weizh888/mining-massive-dataset", "max_forks_repo_head_hexsha": "d98891023a8a38db5c52512607587a47d7145920", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9880952381, "max_line_length": 150, "alphanum_fraction": 0.6195549984, "include": true, "reason": "import numpy", "num_tokens": 874}
|
module Data.JSON.ToString
import Data.JSON.Value
import Data.Scientific
-- TODO: check if this is working correctly!
private
isControlChar : Char -> Bool
isControlChar c =
or [ cast c >= (0*1) && cast c <= (15*1 + 1*16)
, cast c == (15*1 + 7*16)
, cast c == (5*1 + 8*16)
]
-- TODO: this nice escaped version doesn't parse unfortunately
-- or [ c >= '\u0000' && c <= '\u001F'
-- , c == '\u007F'
-- , c == '\u0085'
-- ]
-- TODO: check if this is working correctly!
private
escapeUnicodeChar : Char -> List Char
escapeUnicodeChar c = ['\\', 'u'] ++ map fst [hex3, hex2, hex1, hex0] where
f : Int -> (Char, Int)
f x = (toChar (c), r) where
c : Int
c = x `mod` 16
r : Int
r = x `div` 16
toChar : Int -> Char
toChar x = cast $ if x < 10
then x + cast '0'
else x + cast 'A'
hex0 : (Char,Int)
hex0 = f $ cast c
hex1 : (Char,Int)
hex1 = f $ snd hex0
hex2 : (Char,Int)
hex2 = f $ snd hex1
hex3 : (Char,Int)
hex3 = f $ snd hex2
private
escapeControlChar : Char -> List Char
escapeControlChar c = if isControlChar c
then escapeUnicodeChar c
else [c]
private
escapeChars : List Char -> List Char
escapeChars [] = []
escapeChars (c :: cs) = c' ++ escapeChars cs where
prependBackslash : Char -> List Char
prependBackslash c = ['\\', c]
c' : List Char
c' = case c of
'"' => prependBackslash '"'
'\\' => prependBackslash '\\'
'\b' => prependBackslash 'b'
'\f' => prependBackslash 'f'
'\n' => prependBackslash 'n'
'\r' => prependBackslash 'r'
_ => escapeControlChar c
public export
toString : JSONValue -> String
toString JSONNull = "null"
toString (JSONBool x) = case x of
True => "true"
False => "false"
toString (JSONNumber x) = prettyShowScientific x
toString (JSONString x) = pack $ escapeChars $ unpack $ x where
toString (JSONObject xs) = "{" ++ pairsToString xs ++ "}" where
pairsToString : List (String, JSONValue) -> String
pairsToString [] = ""
pairsToString ((name, value) :: []) = toString (JSONString name) ++ ":" ++ toString value
pairsToString (pair :: pairs) = pairsToString [pair] ++ "," ++ pairsToString pairs
toString (JSONArray xs) = "[" ++ valuesToString xs ++ "]" where
valuesToString : List JSONValue -> String
valuesToString [] = ""
valuesToString (x :: []) = toString x
valuesToString (x :: xs) = valuesToString [x] ++ "," ++ valuesToString xs
|
{"hexsha": "9a9d54d48929f58dc279555512f7c059994d857e", "size": 2599, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "src/Data/JSON/ToString.idr", "max_stars_repo_name": "jumper149/idris-json", "max_stars_repo_head_hexsha": "82543f1135011d0ac70a6fcd8f1eb70e1eb40fd4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-21T10:51:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-01T02:20:32.000Z", "max_issues_repo_path": "src/Data/JSON/ToString.idr", "max_issues_repo_name": "jumper149/idris2-json", "max_issues_repo_head_hexsha": "82543f1135011d0ac70a6fcd8f1eb70e1eb40fd4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Data/JSON/ToString.idr", "max_forks_repo_name": "jumper149/idris2-json", "max_forks_repo_head_hexsha": "82543f1135011d0ac70a6fcd8f1eb70e1eb40fd4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.313253012, "max_line_length": 91, "alphanum_fraction": 0.5629088111, "num_tokens": 751}
|
"""Simple loader class that reads any of Carl Olsson's datasets from a folder on disk.
Authors: John Lambert
"""
import glob
import os
from pathlib import Path
from typing import Optional
import numpy as np
from gtsam import Cal3Bundler, Pose3, Rot3
from scipy.io import loadmat
import gtsfm.utils.io as io_utils
from gtsfm.common.image import Image
from gtsfm.loader.loader_base import LoaderBase
class OlssonLoader(LoaderBase):
"""Simple loader class that reads any of Carl Olsson's datasets from a folder on disk.
Ref: http://www.maths.lth.se/matematiklth/personal/calle/dataset/dataset.html
Folder layout structure:
- RGB Images: images/
- Intrinsics + Extrinsics data (optional): data.mat
If explicit intrinsics are not provided, the exif data will be used.
"""
def __init__(
self,
folder: str,
image_extension: str = "jpg",
use_gt_intrinsics: bool = True,
use_gt_extrinsics: bool = True,
max_frame_lookahead: int = 20,
max_resolution: int = 760,
) -> None:
"""Initializes to load from a specified folder on disk.
Args:
folder: the base folder for a given scene
image_extension: file extension for the image files. Defaults to 'jpg'.
use_gt_intrinsics: whether to use ground truth intrinsics
use_gt_extrinsics: whether to use ground truth extrinsics
max_resolution: integer representing maximum length of image's short side, i.e.
the smaller of the height/width of the image. e.g. for 1080p (1920 x 1080),
max_resolution would be 1080. If the image resolution max(height, width) is
greater than the max_resolution, it will be downsampled to match the max_resolution.
"""
super().__init__(max_resolution)
self._use_gt_intrinsics = use_gt_intrinsics
self._use_gt_extrinsics = use_gt_extrinsics
self._max_frame_lookahead = max_frame_lookahead
# fetch all the file names in /images folder
search_path = os.path.join(folder, "images", f"*.{image_extension}")
self._image_paths = glob.glob(search_path)
# sort the file names
self._image_paths.sort()
self._num_imgs = len(self._image_paths)
if self._num_imgs == 0:
raise RuntimeError(f"Loader could not find any images with the specified file extension in {folder}")
cam_matrices_fpath = os.path.join(folder, "data.mat")
if not Path(cam_matrices_fpath).exists():
# not available, so no choice
self._use_gt_intrinsics = False
self._use_gt_extrinsics = False
return
# stores camera poses (extrinsics) and intrinsics as 3x4 projection matrices
# 'P' array will have shape (1,num_imgs), and each element will be a (3,4) matrix
data = loadmat(cam_matrices_fpath)
# M = K [R | t]
# in GTSAM notation, M = K @ cTw
M_list = [data['P'][0][i] for i in range(self._num_imgs)]
# first pose is identity, so K is immediate given
self._K = M_list[0][:3,:3]
Kinv = np.linalg.inv(self._K)
# decode camera poses as:
# K^{-1} @ M = cTw
iTw_list = [ Kinv @ M_list[i] for i in range(self._num_imgs)]
self._wTi_list = [Pose3(Rot3(iTw[:3,:3]), iTw[:,3]).inverse() for iTw in iTw_list ]
def __len__(self) -> int:
"""The number of images in the dataset.
Returns:
the number of images.
"""
return self._num_imgs
def get_image_full_res(self, index: int) -> Image:
"""Get the image at the given index, at full resolution.
Args:
index: the index to fetch.
Raises:
IndexError: if an out-of-bounds image index is requested.
Returns:
Image: the image at the query index.
"""
if index < 0 or index >= len(self):
raise IndexError("Image index is invalid")
return io_utils.load_image(self._image_paths[index])
def get_camera_intrinsics_full_res(self, index: int) -> Optional[Cal3Bundler]:
"""Get the camera intrinsics at the given index, valid for a full-resolution image.
Args:
the index to fetch.
Returns:
intrinsics for the given camera.
"""
if not self._use_gt_intrinsics:
# get intrinsics from exif
intrinsics = io_utils.load_image(self._image_paths[index]).get_intrinsics_from_exif()
else:
intrinsics = Cal3Bundler(
fx=min(self._K[0, 0], self._K[1, 1]),
k1=0,
k2=0,
u0=self._K[0, 2],
v0=self._K[1, 2],
)
return intrinsics
def get_camera_pose(self, index: int) -> Optional[Pose3]:
"""Get the camera pose (in world coordinates) at the given index.
Args:
index: the index to fetch.
Returns:
the camera pose w_T_index.
"""
if not self._use_gt_extrinsics:
return None
wTi = self._wTi_list[index]
return wTi
def is_valid_pair(self, idx1: int, idx2: int) -> bool:
"""Checks if (idx1, idx2) is a valid pair.
Args:
idx1: first index of the pair.
idx2: second index of the pair.
Returns:
validation result.
"""
return idx1 < idx2 and abs(idx1 - idx2) <= self._max_frame_lookahead
|
{"hexsha": "fe9af4a775eafe82522940803cfbf5fbe4400df7", "size": 5576, "ext": "py", "lang": "Python", "max_stars_repo_path": "gtsfm/loader/olsson_loader.py", "max_stars_repo_name": "PratyushaMaiti/gtsfm", "max_stars_repo_head_hexsha": "0d03dca0b6fb9293c9a3fb619a2141903168269a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gtsfm/loader/olsson_loader.py", "max_issues_repo_name": "PratyushaMaiti/gtsfm", "max_issues_repo_head_hexsha": "0d03dca0b6fb9293c9a3fb619a2141903168269a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gtsfm/loader/olsson_loader.py", "max_forks_repo_name": "PratyushaMaiti/gtsfm", "max_forks_repo_head_hexsha": "0d03dca0b6fb9293c9a3fb619a2141903168269a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4186046512, "max_line_length": 113, "alphanum_fraction": 0.6119081779, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1352}
|
%% build_system
% Build a system to solve by MaxwellFDFD.
%%% Syntax
% [osc, grid3d, s_factor_cell, eps_cell, mu_cell, J_cell] = build_system(ge, OSC, DOM, OBJ, SRC, [progmark])
% [..., obj_array, src_array, mat_array] = build_system(ge, OSC, DOM, OBJ, SRC, [pragmark])
% [..., eps_node_array, mu_node_array] = build_system(ge, OSC, DOM, OBJ, SRC, [pragmark])
%%% Description
% |build_system(ge, OSC, DOM, OBJ, SRC, [progmark])| constructs a system from
% given objects and sources. The constructed system is typically used inside
% <maxwell_run.html maxwell_run>.
%
% |ge| is an instance of |GT| and indicates the grid type of the _E_-field.
% Each following argument, |OSC|, |DOM|, |OBJ|, and |SRC|, represents a
% group of parameters. Each group supports several flexible expressions.
% For more details, see the relevant sections about the input parameter
% groups in <maxwell_run.html |maxwell_run|>.
%
% An additional input parameter |progmark| is an instance of <ProgMark.html
% ProgMark>, which outputs the progress of the system build procedure as the
% standard output. If it is not given, then it is created internally.
%
% |[osc, grid3d, s_factor_cell, eps_cell, mu_cell, J_cell, M_cell] = build_system(...)|
% returns
%
% * |osc|, an instance of <Oscillation.html Oscillation>
% * |grid3d|, an instance of <Grid3d.html Grid3d>,
% * |s_factor_cell|, a cell array of PML s-factors: |{sx_array, sy_array,
% sz_array}|
% * |eps_cell|, a cell array of electric permittivity evaluated at the E-field
% positions: |{eps_xx_array, eps_yy_array, eps_zz_array}|
% * |mu_cell|, a cell array of magnetic permeability evaluated at the H-field
% positions: |{mu_xx_array, mu_yy_array, mu_zz_array}|
% * |J_cell|, a cell array of electric current sources: |{Jx_array, Jy_array,
% Jz_array}|
% * |M_cell|, a cell array of electric current sources: |{Mx_array, My_array,
% Mz_array}|
%
% |[..., obj_array, src_array, mat_array] = build_system(...)| returns
% additionally arrays of instances of <EMObject.html |EMObject|>, <Source.html
% |Source|>, and <Material.html |Material|>. The |EMObject| and |Source|
% elements represent the objects and sources placed in the simulation
% domain, so they can be used to visualize the simulation domain.
%
% |[..., eps_node_array, mu_node_array] = build_system(...)| returns
% additionally arrays of electric permittivity and magnetic permeability
% evaluated at the nodes of the finite-difference grid.
%%% Example
% gray = [0.5 0.5 0.5]; % [r g b]
% [osc, grid3d, s_factor_cell, eps_cell, mu_cell, J_cell, M_cell, ...
% obj_array, src_array, mat_array, eps_node_array, mu_node_array] = build_system(...
% 'OSC', 1e-9, 1550, ...
% 'DOM', {['Palik/SiO2'], 'none'}, [-700, 700; -600, 600; -200, 1700], 20, BC.p, 200, ...
% 'OBJ', ...
% {['Palik/SiO2'], 'none'}, Box([-50, 50; -50, 50; -200, 1700], [2, 2, 20]), ... % OBJ1
% {['CRC/Ag'], gray}, [Box([-700, -25; -25, 25; -200, 1700], 20), Box([25, 700; -25, 25; -200, 1700], 20)], ... % OBJ2
% 'SRC', PointSrc(Axis.x, [0, 0, 200]) ...
% );
function [osc, grid3d, s_factor_cell, eps_cell, mu_cell, J_cell, M_cell, ...
obj_array, src_array, mat_array, eps_node, mu_node, isiso] = build_system(varargin)
iarg = nargin; arg = varargin{iarg};
if istypesizeof(arg, 'ProgMark')
pm = arg;
narglim = nargin - 1;
else
pm = ProgMark();
varargin = [varargin, {pm}];
narglim = nargin;
end
iarg = 1; arg = varargin{iarg};
chkarg(istypesizeof(arg, 'GT'), 'argument #%d should be "ge" (GT).', iarg);
ge = arg;
iarg = iarg + 1; arg = varargin{iarg};
chkarg(istypesizeof(arg, 'PML'), 'argument #%d should be "pml" (PML).', iarg);
pml = arg;
function material = create_material(varargin)
narg = nargin;
if istypesizeof(varargin{end}, 'logical')
narg = narg - 1;
end
chkarg(narg >= 2, '# of arguments should be at least 2.')
matname = varargin{1};
if isempty(strfind(matname, '/')) % data table is not specified
material = Material(varargin{:});
else % data table is specified
material = Material.fromtable(osc, varargin{:});
end
end
osc = Oscillation.empty();
obj_dom = EMObject.empty();
shape_array = Shape.empty();
sshape_array = Shape.empty();
mat_array = Material.empty();
obj_array = EMObject.empty();
sobj_array = EMObject.empty();
srcj_array = [];
srcm_array = [];
isepsgiven = false;
isTFSF = false;
while iarg < narglim
iarg = iarg + 1; arg = varargin{iarg};
if ischar(arg) && strcmpi(arg,'OSC')
% Set up OSC.
iarg = iarg + 1; arg = varargin{iarg};
chkarg((istypesizeof(arg, 'real') && arg > 0) || istypesizeof(arg, 'Oscillation'), ...
'"argument #%d should be either "L0" (positive) or "osc" (instance of Oscillation).', iarg);
if istypesizeof(arg, 'real')
L0 = arg;
iarg = iarg + 1; wvlen = varargin{iarg};
chkarg(istypesizeof(wvlen, 'complex'), 'argument #%d should be "wvlen" (complex).', iarg);
unit = PhysUnit(L0);
osc = Oscillation(wvlen, unit);
else % arg is instance of Oscillation
osc = arg;
end
elseif ischar(arg) && strcmpi(arg,'DOM')
% Set up DOM.
iarg = iarg + 1; arg = varargin{iarg};
chkarg(iscell(arg) || istypesizeof(arg, 'Material') || istypesizeof(arg, 'EMObject'), ...
'argument #%d should be cell, instance of Material, or instance of EMObject.', iarg);
if istypesizeof(arg, 'EMObject')
obj_dom = arg;
domain = obj_dom.shape;
chkarg(istypesizeof(domain, 'Domain'), 'argument #%d should be instance of EMObject with Domain as its shape.', iarg);
else
if iscell(arg)
mat_dom = create_material(arg{:});
else
assert(istypesizeof(arg, 'Material'));
mat_dom = arg;
end
iarg = iarg + 1; arg = varargin{iarg};
chkarg(istypesizeof(arg, 'real', [Axis.count, Sign.count]) || istypesizeof(arg, 'Domain'), ...
'argument #%d should be either "box_dom" ([xmin xmax; ymin ymax; zmin zmax]) or "domain" (instance of Domain).', iarg);
if istypesizeof(arg, 'real', [Axis.count, Sign.count])
box_domain = arg;
iarg = iarg + 1; dl_domain = varargin{iarg};
chkarg(istypeof(dl_domain, 'real') && isexpandable2row(dl_domain, Axis.count), ...
'"argument #%d should be dl_domain (positive number or length-%d row vector of positive numbers).', iarg, Axis.count);
domain = Domain(box_domain, expand2row(dl_domain, Axis.count));
else % arg is instance of Domain
domain = arg;
end
obj_dom = EMObject(domain, mat_dom);
end
mat_array = [mat_array(1:end), obj_dom.material];
% Set up boundary conditions and PML thicknesses.
iarg = iarg + 1; bc = varargin{iarg};
chkarg(istypeof(bc, 'BC') && isexpandable2mat(bc, Axis.count, Sign.count), ...
'argument #%d should be "bc" (scalar, length-%d row vector, or %d-by-%d matrix with BC as elements).', iarg, Axis.count, Axis.count, Sign.count);
iarg = iarg + 1; Lpml = varargin{iarg};
chkarg(istypeof(Lpml, 'real') && isexpandable2mat(Lpml, Axis.count, Sign.count) && all(all(Lpml>=0)), ...
'argument #%d should be "Lpml" (scalar, length-%d row vector, or %d-by-%d matrix with nonnegative numbers as elements).', iarg, Axis.count, Axis.count, Sign.count);
% Set up the degree of the polynomial grading of the PML scale
% factors.
iarg = iarg + 1; arg = varargin{iarg};
deg_pml = 4; % polynomial degree
if istypeof(arg, 'real')
deg_pml = arg;
else
iarg = iarg - 1; % because deg_pml is optional argument
end
% Set up the target reflection coefficient of the PML.
iarg = iarg + 1; arg = varargin{iarg};
R_pml = exp(-16); % target reflection coefficient
if istypeof(arg, 'real')
R_pml = arg;
else
iarg = iarg - 1; % because R_pml is optional argument
end
% Set up a flag to generate a grid dynamically.
iarg = iarg + 1; arg = varargin{iarg};
withuniformgrid = false; % generate a grid dynamically by default.
if istypesizeof(arg, 'logical')
withuniformgrid = arg;
else
iarg = iarg - 1; % because withuniformgrid is optional argument
end
elseif ischar(arg) && (strcmpi(arg,'OBJ') || strcmpi(arg,'SOBJ'))
% Set up OBJ.
is_scatterer = strcmpi(arg,'SOBJ');
iarg = iarg + 1; arg = varargin{iarg};
if istypesizeof(arg, 'complex', [0 0 0]) % 3D complex array with arbitrary size
isepsgiven = true;
eps_node_cell = {arg, arg, arg};
mu_node_temp = ones(size(arg));
mu_node_cell = {mu_node_temp, mu_node_temp, mu_node_temp};
else
% Set up objects.
obj_array_temp = EMObject.empty();
shape_array_temp = Shape.empty();
while iscell(arg) || istypesizeof(arg, 'Material') || istypesizeof(arg, 'EMObject', [1 0])
if istypesizeof(arg, 'EMObject', [1 0])
objs = arg;
obj_array_temp = [obj_array_temp(1:end), objs];
for obj = objs
shape_array_temp = [shape_array_temp(1:end), obj.shape];
mat_array = [mat_array(1:end), obj.material];
end
iarg = iarg + 1; arg = varargin{iarg};
else
if iscell(arg)
mat = create_material(arg{:});
else
assert(istypesizeof(arg, 'Material'));
mat = arg;
end
mat_array = [mat_array(1:end), mat];
iarg = iarg + 1; arg = varargin{iarg};
while istypesizeof(arg, 'Shape', [1 0])
shapes = arg;
shape_array_temp = [shape_array_temp(1:end), shapes];
objs = EMObject(shapes, mat);
obj_array_temp = [obj_array_temp(1:end), objs];
iarg = iarg + 1; arg = varargin{iarg};
end
end
end
iarg = iarg - 1;
if is_scatterer
sshape_array = [sshape_array(1:end), shape_array_temp];
sobj_array = [sobj_array(1:end), obj_array_temp];
else
shape_array = [shape_array(1:end), shape_array_temp];
obj_array = [obj_array(1:end), obj_array_temp];
end
end
elseif ischar(arg) && strcmpi(arg,'SRCJ')
% Set up sources.
iarg = iarg + 1; arg = varargin{iarg};
if ~istypesizeof(arg, 'Source', [1 0])
warning('Maxwell:buildSys', 'no source is given.');
end
while istypesizeof(arg, 'Source', [1 0])
if istypesizeof(arg, 'TFSFPlaneSrc')
isTFSF = true;
end
srcj_array_curr = arg;
for src = srcj_array_curr
src.set_gridtype(ge);
end
srcj_array = [srcj_array(1:end), srcj_array_curr];
iarg = iarg + 1; arg = varargin{iarg};
end
iarg = iarg - 1;
elseif ischar(arg) && strcmpi(arg,'SRCM')
% Set up sources.
iarg = iarg + 1; arg = varargin{iarg};
if ~istypesizeof(arg, 'Source', [1 0])
warning('Maxwell:buildSys', 'no source is given.');
end
while istypesizeof(arg, 'Source', [1 0])
if istypesizeof(arg, 'TFSFPlaneSrc')
isTFSF = true;
end
srcm_array_curr = arg;
for src = srcm_array_curr
src.set_gridtype(alter(ge));
end
srcm_array = [srcm_array(1:end), srcm_array_curr];
iarg = iarg + 1; arg = varargin{iarg};
end
iarg = iarg - 1;
elseif iarg == narglim
chkarg(false, ['some arguments are not used.\n', ...
'Suggestion: check if each parameter group is specified with beginning specifier.']);
end
end
chkarg(~isempty(osc), 'OSC parameter groups should be set.');
chkarg(~isempty(obj_dom), 'DOM parameter groups should be set.');
obj_array = [obj_dom, obj_array];
src_array = [srcj_array, srcm_array];
if isTFSF && isempty(sobj_array)
warning('Maxwell:objAssign', 'TF/SF source is used, but scatteres are not defined in SOBJ group.');
end
chkarg(iarg <= narglim, 'more arguments than expected.');
pm.mark('initial setup');
fprintf('\tLength Unit: %s m\n', osc.unit.value(PhysQ.L));
fprintf('\twvlen = %s, freq = %s eV\n', num2str(osc.in_L0()), num2str(osc.in_eV()));
mat_array = unique(mat_array);
isiso = true;
fprintf('materials used:\n');
for mat = mat_array
epstext = mat.eps;
if length(unique(epstext)) == 1
epstext = epstext(Axis.x);
end
mutext = mat.mu;
if length(unique(mutext)) == 1
mutext = mutext(Axis.x);
end
fprintf('\t%s: eps = %s, mu = %s\n', mat.name, mat2str(epstext), mat2str(mutext));
isiso = isiso && mat.isiso;
end
% Generate a grid.
[lprim, Npml] = generate_lprim3d(domain, Lpml, [shape_array, sshape_array], src_array, withuniformgrid);
grid3d = Grid3d(osc.unit, lprim, Npml, bc);
if withuniformgrid
pm.mark('uniform grid generation');
else
pm.mark('nonuniform grid generation');
end
fprintf('\t[Nx Ny Nz] = %s\n', mat2str(grid3d.N));
% Generate a warning when a seemingly 2D simulation is defined on a 3D grid.
[like2d, normal_axis] = is2dlike(grid3d.N);
if like2d && grid3d.N(normal_axis) >= 2 % possible user mistake
warning(['If this a 2D structure, N%s should be 1; ', ...
'check d%s''s of objects and locations of sources'], char(normal_axis), char(normal_axis));
end
% Construct material parameters.
if ~isepsgiven
[eps_node_cell, mu_node_cell] = assign_material_node(grid3d, obj_array); % Nx x Ny x Nz
end
eps_cell = mean_material_node(grid3d, ge, eps_node_cell);
mu_cell = mean_material_node(grid3d, alter(ge), mu_node_cell);
% Construct PML s-factors.
s_factor_cell = generate_s_factor(osc.in_omega0(), grid3d, deg_pml, R_pml);
pm.mark('eps and mu assignment');
if ~isTFSF
% Solve for modes.
for src = src_array
if istypesizeof(src, 'ModalSrc')
modalsrc = src;
if ~modalsrc.ispreped
prep_modalsrc(ge, pml, osc, grid3d, eps_cell, mu_cell, s_factor_cell, modalsrc);
end
neff = modalsrc.neff;
beta = 2*pi*neff / osc.in_L0();
pm.mark('mode calculation');
fprintf('\tbeta = %s, n_eff = %s\n', num2str(beta), num2str(neff));
end
end
else % isTFSF == true
% Set up J for TF/SF.
for src = src_array
if istypesizeof(src, 'TFSFPlaneSrc')
tfsfsrc = src;
cb_center = num2cell(tfsfsrc.shape.cb_center);
for bgobj = fliplr(obj_array)
if bgobj.shape.contains(cb_center{:})
break; % assume that last object containing TF box center fills TF box
end
end
tfsfsrc.set_bg_material(bgobj.material);
F0 = tfsfsrc.create_incidentF(osc, grid3d);
JM = cell(1, Axis.count);
for w = Axis.elems
JM{w} = zeros(grid3d.N);
end
if tfsfsrc.gt == ge
eqtype_tfsf = EquationType(FT.e, ge); % for SRCJ, create E-field eq
else
eqtype_tfsf = EquationType(FT.h, ge); % for SRCM, create H-field eq
end
% A = create_eq(eqtype_tfsf, pml, osc.in_omega0(), eps_cell, mu_cell, s_factor_cell, JM, JM, grid3d);
eq = MatrixEquation(eqtype_tfsf, pml, osc.in_omega0(), eps_cell, mu_cell, s_factor_cell, JM, JM, grid3d);
Op = eq.matrixfree_op();
x0 = [F0{Axis.x}(:); F0{Axis.y}(:); F0{Axis.z}(:)];
r = reordering_indices(Axis.count, grid3d.N);
x0 = x0(r);
% JM = (A*x0) ./ (-1i*osc.in_omega0());
JM = Op(x0, 'notransp') ./ (-1i*osc.in_omega0());
JM = reshape(JM, [Axis.count grid3d.N]);
JM = permute(JM, [Axis.elems+1, 1]);
JM = {JM(:,:,:,Axis.x), JM(:,:,:,Axis.y), JM(:,:,:,Axis.z)};
tfsfsrc.setJM(JM, grid3d);
end
end
% Add sobj_array to the already-generated eps and mu.
[eps_node_cell, mu_node_cell] = assign_material_node(grid3d, sobj_array, eps_node_cell, mu_node_cell); % Nx x Ny x Nz
eps_cell = mean_material_node(grid3d, ge, eps_node_cell); % Nx x Ny x Nz
mu_cell = mean_material_node(grid3d, alter(ge), mu_node_cell); % Nx x Ny x Nz
pm.mark('TF/SF source assignment');
end
obj_array = [obj_array, sobj_array];
eps_node = cell(1, Axis.count);
mu_node = cell(1, Axis.count);
for w = Axis.elems
eps_node_cell{w} = expand_node_array(grid3d, eps_node_cell{w}); % (Nx+2) x (Ny+2) x (Nz+2)
mu_node_cell{w} = expand_node_array(grid3d, mu_node_cell{w}); % (Nx+2) x (Ny+2) x (Nz+2)
eps_node{w} = Scalar3d(eps_node_cell{w}, grid3d, [GT.dual GT.dual GT.dual], osc, PhysQ.eps, '\epsilon');
mu_node{w} = Scalar3d(mu_node_cell{w}, grid3d, [GT.dual GT.dual GT.dual], osc, PhysQ.mu, '\mu');
end
% Construct sources.
J_cell = assign_source(grid3d, srcj_array);
M_cell = assign_source(grid3d, srcm_array);
pm.mark('J assignment');
end
|
{"author": "wsshin", "repo": "maxwellfdfd", "sha": "f7d583813781694c8a6f0533a91f56c2a78a9ee5", "save_path": "github-repos/MATLAB/wsshin-maxwellfdfd", "path": "github-repos/MATLAB/wsshin-maxwellfdfd/maxwellfdfd-f7d583813781694c8a6f0533a91f56c2a78a9ee5/io/build_system.m"}
|
from IPython import embed
import numpy
from synospec.etc.source import OnSkyMoffat, OnSkyGaussian
def test_moffat():
fwhm = 0.7
beta = 3.5
sampling = 0.05
size = 5.
gau = OnSkyGaussian(fwhm, sampling=sampling, size=size)
mean_gau_x = numpy.sum(gau.data*gau.X)/numpy.sum(gau.data)
mean_gau_y = numpy.sum(gau.data*gau.Y)/numpy.sum(gau.data)
assert numpy.allclose([mean_gau_x, mean_gau_y], [0,0]), 'Gaussian not centered'
peak = numpy.amax(gau.data)
indx = (gau.data > 0.47*peak) & (gau.data < 0.53*peak)
gau_fwhm = numpy.mean(2*numpy.sqrt(gau.X[indx]**2 + gau.Y[indx]**2))
assert numpy.absolute(fwhm - gau_fwhm) < sampling, 'Bad FWHM'
mof = OnSkyMoffat(fwhm, beta, sampling=sampling, size=size)
mean_mof_x = numpy.sum(mof.data*mof.X)/numpy.sum(mof.data)
mean_mof_y = numpy.sum(mof.data*mof.Y)/numpy.sum(mof.data)
assert numpy.allclose([mean_mof_x, mean_mof_y], [0,0]), 'Moffat not centered'
peak = numpy.amax(mof.data)
indx = (mof.data > 0.47*peak) & (mof.data < 0.53*peak)
mof_fwhm = numpy.mean(2*numpy.sqrt(mof.X[indx]**2 + mof.Y[indx]**2))
assert numpy.absolute(fwhm - mof_fwhm) < sampling, 'Bad FWHM'
assert numpy.array_equal(gau.x, mof.x), 'Coordinate arrays are different'
assert numpy.isclose(gau_fwhm, mof_fwhm), 'Gaussian and Moffat FWHM too different'
|
{"hexsha": "8fcec1f11019a613c55301328004151bd64c33d2", "size": 1360, "ext": "py", "lang": "Python", "max_stars_repo_path": "synospec/tests/test_source.py", "max_stars_repo_name": "kbwestfall/synospec", "max_stars_repo_head_hexsha": "1b882b3834b957a14a511db091421166ac24abfb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synospec/tests/test_source.py", "max_issues_repo_name": "kbwestfall/synospec", "max_issues_repo_head_hexsha": "1b882b3834b957a14a511db091421166ac24abfb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synospec/tests/test_source.py", "max_forks_repo_name": "kbwestfall/synospec", "max_forks_repo_head_hexsha": "1b882b3834b957a14a511db091421166ac24abfb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-07T19:54:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-07T19:54:49.000Z", "avg_line_length": 34.8717948718, "max_line_length": 86, "alphanum_fraction": 0.6772058824, "include": true, "reason": "import numpy", "num_tokens": 477}
|
#!/usr/bin/env python
# coding: utf-8
# ## First Impressions Bayesian Deep Learning
#
# #### How does Laplace Approximation work in determining accuracy of facial impressions?
# In[1]:
#import libraries
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import cv2
import glob as glob
from PIL import Image
# In[2]:
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense,Flatten,MaxPooling2D
from keras.layers import Conv2D
from tensorflow import keras
from tensorflow.keras import datasets, layers, models
from sklearn import metrics
# ### Prepping the image data
# In[3]:
#function for splitting array into 2 column array
def split_odd_even(df,col_name1, col_name2):
even = df.loc[::2]
even = even.reset_index(drop=True)
even.columns = [col_name1]
odd = df.loc[1::2]
odd = odd.reset_index(drop=True)
odd.columns = [col_name2]
split_df = pd.concat([even,odd], axis = 1)
return split_df
# In[4]:
#Image directory path
img_folder = 'C:\\Users\\hinds\\CSResearch\\Data\\Image'
#Reading images and filenames into array
def create_dataset_PIL(img_folder):
img_data_array = []
for file in os.listdir(os.path.join(img_folder)):
img_data_array.append(file)
image_path = os.path.join(img_folder, file)
image = np.array(Image.open(image_path))
image = image.astype('float32')
image /= 255
img_data_array.append(image)
img_arr = pd.DataFrame(img_data_array)
img_arr = img_arr.astype(object)
#img_arr.columns = ["Image", "FileName"]
return img_arr
#Array of files and images
img_arr = create_dataset_PIL(img_folder)
# In[5]:
#Seperating file and image
img_df = split_odd_even(img_arr, "FileName", "Image")
# In[6]:
#Directory for annotations
annotations_folder = 'C:\\Users\\hinds\\CSResearch\\Data\\Annotation'
#functions for reading annotations to multidimensional array
def get_annotations(path):
os.chdir(path)
all_files = glob.glob(path)
li = []
for filename in os.listdir()[1:]:
df = pd.read_csv(filename, index_col=None, header=None)
li.append(df)
return li
ann_arr = get_annotations(annotations_folder)
# In[7]:
#Function for creating data frame splitting training, testing and validation sets
def make_ann_df(arr):
#seperate testing, index 0-3
testing = pd.concat([arr[0],arr[1], arr[2], arr[3]], axis = 1)
testing.columns = ["TestFile1", "Age","TestFile2", "Dominance","TestFile3", "IQ","TestFile4", "Trustworthiness"]
#seperate training, index 4-7
training = pd.concat([arr[4],arr[5], arr[6], arr[7]], axis = 1)
training.columns = ["TrainFile1", "Age","TrainFile2", "Dominance","TrainFile3", "IQ","TrainFile4", "Trustworthiness"]
#seperate validation, index 8-11
validation = pd.concat([arr[8],arr[9], arr[10], arr[11]], axis = 1)
validation.columns = ["ValFile1", "Age","ValFile2", "Dominance","ValFile3", "IQ","ValFile4", "Trustworthiness"]
return testing, training, validation
testing,training,validation = make_ann_df(ann_arr)
# In[8]:
#function for matching images with their respective features.
def make_image_ann_df(image,ann,ttv):
merged_df = pd.merge(image, ann, left_on="FileName", right_on= ttv + "File1", how="inner")
merged_df = merged_df.drop([ttv+'File1', ttv+'File2',ttv+'File3',ttv+'File4'], axis=1)
return merged_df
# In[9]:
#Create sets
#test set
test = make_image_ann_df(img_df,testing,"Test").dropna()
#train set
train = make_image_ann_df(img_df,training,"Train").dropna()
#validation set
validate = make_image_ann_df(img_df,validation,"Val").dropna()
# In[90]:
def train_test_val_set(train,test,val,response):
x_train,y_train = train['Image'].values,train[response].values
x_train = np.array([np.array(val) for val in x_train])
y_train = np.array([np.array(val) for val in y_train])
x_test,y_test = test['Image'].values,test[response].values
x_test = np.array([np.array(val) for val in x_test])
y_test = np.array([np.array(val) for val in y_test])
x_validate,y_validate = val['Image'].values,val[response].values
x_validate = np.array([np.array(val) for val in x_validate])
y_validate = np.array([np.array(val) for val in y_validate])
return x_train, y_train, x_test, y_test, x_validate, y_validate
# In[91]:
#Set which response value you want to predict: Age, Dominance, IQ, Trustworthiness
response = "Age"
x_train, y_train, x_test, y_test, x_validate, y_validate = train_test_val_set(train,test,validate,response)
# In[85]:
#Viewing images with their response value
fig, axes = plt.subplots(3, 3, figsize=(10,10))
for i in range(3):
for j in range(3):
axes[i,j].imshow(x_train[i*4 + j], cmap='gray')
axes[i,j].set_title(response+": " + str(y_train[i*4+j]))
axes[i,j].axis('off')
plt.show()
# ### Pretrain the Model
#
# The Laplace redux model takes in a pretrain neural network so we have a few options for which to pretrain the model on.
#
# Option 1: Pretrain model using a bayesian neural network
# Option 2: pretrain model using a cnn
# In[86]:
np.random.seed(420)
# define the variable input_s here, which is the size of the images in CIFAR10.(width, height, channel)
#need to fix input sizing because of value error: Shapes (None, 1) and (None, 10) are incompatible
input_s = (150,130,1)
#base model
model = keras.Sequential()
#first layer
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',kernel_initializer='he_normal',input_shape=input_s))
model.add(MaxPooling2D((2,2)))
#second layer
model.add(Conv2D(64, (3, 3), activation='relu'))
#third layer
model.add(Conv2D(128, (3, 3), activation='relu'))
#convert matrix to single array
model.add(Flatten())
#Transform vector
model.add(Dense(64, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))
#add dropout to reduce overfitting
model.add(layers.Dropout(0.2))
#add softmax prediction layer
model.add(Dense(10, activation='softmax'))
#compile
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
#set epochs
epo = 10
#fit the model
hist = model.fit(x_train, y_train, epochs=epo, batch_size=64, validation_data=(x_test, y_test), verbose=1)
|
{"hexsha": "fbf529e9bc69c2b8e5239f3cfa57fc82239f87d4", "size": 6437, "ext": "py", "lang": "Python", "max_stars_repo_path": "FMRI and First Impressions(1).py", "max_stars_repo_name": "markhinds/research", "max_stars_repo_head_hexsha": "bcfb07e4809f8d4b12e9f663d91a82252372737d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FMRI and First Impressions(1).py", "max_issues_repo_name": "markhinds/research", "max_issues_repo_head_hexsha": "bcfb07e4809f8d4b12e9f663d91a82252372737d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FMRI and First Impressions(1).py", "max_forks_repo_name": "markhinds/research", "max_forks_repo_head_hexsha": "bcfb07e4809f8d4b12e9f663d91a82252372737d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6454183267, "max_line_length": 122, "alphanum_fraction": 0.6993941277, "include": true, "reason": "import numpy", "num_tokens": 1735}
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based einsum dense layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers import einsum_dense
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters(
{
"testcase_name": "_1d_end_weight",
"equation": "ab,b->a",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": [],
"expected_weight_shape": [32],
"expected_bias_shape": None,
"expected_output_shape": (None,)
}, {
"testcase_name": "_2d_middle_weight",
"equation": "ab,bc->ac",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 64)
}, {
"testcase_name": "_3d_bert",
"equation": "abc,cde->abde",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [4],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_2_bias",
"equation": "abc,cde->abde",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (1, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 1],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_1_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "be",
"input_shape": (None, 7, 2),
"output_shape": (7, 3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [7, 1, 4],
"expected_output_shape": (None, 7, 3, 4)
}, {
"testcase_name": "_3d_bert_projection",
"equation": "BFNH,NHD->BFD",
"bias_axes": None,
"input_shape": (None, 1, 2, 3),
"output_shape": (1, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 4)
}, {
"testcase_name": "_2d_bert",
"equation": "abc,cd->abd",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (1, 4),
"expected_weight_shape": [2, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 4)
}, {
"testcase_name": "_embedding_1d",
"equation": "i,d->id",
"bias_axes": None,
"input_shape": (None,),
"output_shape": (2),
"expected_weight_shape": [2],
"expected_bias_shape": None,
"expected_output_shape": (None, 2)
}, {
"testcase_name": "_xlnet_lm",
"equation": "ibd,nd->ibn",
"bias_axes": None,
"input_shape": (None, None, 1),
"output_shape": (None, 2),
"expected_weight_shape": [2, 1],
"expected_bias_shape": None,
"expected_output_shape": (None, None, 2)
}, {
"testcase_name": "_2d_precast",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, 64)
}, {
"testcase_name": "_2d_precast_multiple_elided_dims",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (None, None, 32),
"output_shape": (64),
"expected_weight_shape": [32, 64],
"expected_bias_shape": None,
"expected_output_shape": (None, None, 64)
}, {
"testcase_name": "_3d_precast",
"equation": "...c,cde->...de",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_precast_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [4],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_precast_2_bias",
"equation": "...c,cde->...de",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 1],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_3d_precast_2_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "de",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [2, 3, 4],
"expected_bias_shape": [3, 4],
"expected_output_shape": (None, 1, 3, 4)
}, {
"testcase_name": "_2d_postcast",
"equation": "bc...,cd->bd...",
"bias_axes": None,
"input_shape": (None, 1, 2, 3),
"output_shape": (4),
"expected_weight_shape": [1, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 4, 2, 3)
}, {
"testcase_name": "_3d_postcast",
"equation": "bc...,cde->bde...",
"bias_axes": None,
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": None,
"expected_output_shape": (None, 3, 4, 2)
}, {
"testcase_name": "_3d_postcast_1_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "d",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [3, 1, 1],
"expected_output_shape": (None, 3, 4, 2)
}, {
"testcase_name": "_3d_postcast_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "e",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [4, 1],
"expected_output_shape": (None, 3, 4, 2)
}, {
"testcase_name": "_3d_postcast_1_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "de",
"input_shape": (None, 1, 2),
"output_shape": (3, 4),
"expected_weight_shape": [1, 3, 4],
"expected_bias_shape": [3, 4, 1],
"expected_output_shape": (None, 3, 4, 2)
})
class TestEinsumDenseLayer(keras_parameterized.TestCase):
def test_weight_shapes(self, equation, bias_axes, input_shape, output_shape,
expected_weight_shape, expected_bias_shape,
expected_output_shape):
del expected_output_shape # Not used in this test.
weight_shape, bias_shape, _ = einsum_dense._analyze_einsum_string(
equation, bias_axes, input_shape, output_shape)
self.assertAllEqual(expected_weight_shape, weight_shape)
self.assertAllEqual(expected_bias_shape, bias_shape)
def test_layer_creation(self, equation, bias_axes, input_shape, output_shape,
expected_weight_shape, expected_bias_shape,
expected_output_shape):
# Keras elides the 0-dimension of the input shape when constructing inputs.
non_batch_input_shape = list(input_shape)[1:]
input_tensor = keras.Input(shape=non_batch_input_shape)
layer = einsum_dense.EinsumDense(
equation=equation, output_shape=output_shape, bias_axes=bias_axes)
output_tensor = layer(input_tensor)
self.assertAllEqual(expected_weight_shape, layer.kernel.shape.as_list())
if expected_bias_shape is None:
self.assertIsNone(layer.bias)
else:
self.assertAllEqual(expected_bias_shape, layer.bias.shape.as_list())
self.assertAllEqual(expected_output_shape, output_tensor.shape.as_list())
@keras_parameterized.run_all_keras_modes
class TestEinsumLayerAPI(keras_parameterized.TestCase):
def test_layer_api(self):
input_data = np.array([[1.0, 2.0], [3.0, 4.0]])
kwargs = {
"equation": "...b,bc->...c",
"bias_axes": "c",
"output_shape": 4,
"bias_initializer": keras.initializers.constant(0.03),
"kernel_initializer": keras.initializers.constant(0.5),
"dtype": input_data.dtype
}
expected_output = np.array([[1.53, 1.53, 1.53, 1.53],
[3.53, 3.53, 3.53, 3.53]])
output_data = testing_utils.layer_test(
einsum_dense.EinsumDense,
kwargs=kwargs,
input_shape=(None, 2),
input_data=input_data)
self.assertAllClose(expected_output, output_data)
def test_unspecified_bias_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(
equation="ab,bc->ac", output_shape=64, bias_axes="y")
with self.assertRaisesRegex(
ValueError, ".*is not a part of the output specification.*"):
_ = layer(input_tensor)
def test_incompatible_input_output_shape_fails(self):
input_tensor = keras.Input(shape=(32, 64))
layer = einsum_dense.EinsumDense(
equation="abc,cd->abd", output_shape=(10, 96))
with self.assertRaisesRegex(
ValueError, ".*Input shape and output shape do not match at shared "
"dimension 'b'.*"):
_ = layer(input_tensor)
def test_unspecified_output_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(equation="ab,bc->cd", output_shape=64)
with self.assertRaisesRegex(
ValueError, ".*Dimension 'd' was specified in the output 'cd' but has "
"no corresponding dim.*"):
_ = layer(input_tensor)
def test_unspecified_weight_dim_fails(self):
input_tensor = keras.Input(shape=(32,))
layer = einsum_dense.EinsumDense(equation="ab,zd->ad", output_shape=64)
with self.assertRaisesRegex(ValueError,
".*Weight dimension 'z' did not have a match "):
_ = layer(input_tensor)
if __name__ == "__main__":
test.main()
|
{"hexsha": "f7ab34aed3bdfb95a40ac27ff8b3e09a33e12b43", "size": 11486, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/keras/layers/einsum_dense_test.py", "max_stars_repo_name": "yage99/tensorflow", "max_stars_repo_head_hexsha": "c7fa71b32a3635eb25596ae80d007b41007769c4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-07-06T17:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T06:31:28.000Z", "max_issues_repo_path": "tensorflow/python/keras/layers/einsum_dense_test.py", "max_issues_repo_name": "sseung0703/tensorflow", "max_issues_repo_head_hexsha": "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 88, "max_issues_repo_issues_event_min_datetime": "2020-11-24T08:18:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T20:28:30.000Z", "max_forks_repo_path": "tensorflow/python/keras/layers/einsum_dense_test.py", "max_forks_repo_name": "sseung0703/tensorflow", "max_forks_repo_head_hexsha": "be084bd7a4dd241eb781fc704f57bcacc5c9b6dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-07-08T07:27:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T08:54:27.000Z", "avg_line_length": 36.3481012658, "max_line_length": 101, "alphanum_fraction": 0.5908061989, "include": true, "reason": "import numpy", "num_tokens": 3220}
|
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
import cv2
def equalize(img):
x_max = img.max()
s = img.size
h = np.zeros(256)
for i in range(256):
h[i] = np.count_nonzero(img == i)
out = np.zeros_like(img)
for i in range(256):
out[img == i] = x_max / s * h[:i+1].sum()
return out.astype(np.uint8)
img = io.imread("./dataset/images/imori_256x256_dark.png")
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ans = equalize(gray)
plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)
plt.title("gray")
plt.imshow(gray, cmap="gray")
plt.subplot(1, 2, 2)
plt.title("answer")
plt.imshow(ans, cmap="gray")
plt.show()
|
{"hexsha": "f195fd9e8478f7feba81149ce730312509b545c6", "size": 677, "ext": "py", "lang": "Python", "max_stars_repo_path": "answers/question23.py", "max_stars_repo_name": "Yamahitsuji/Gasyori100knock", "max_stars_repo_head_hexsha": "62b3f776124c25dfb36e45a647d573b36b45d2b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "answers/question23.py", "max_issues_repo_name": "Yamahitsuji/Gasyori100knock", "max_issues_repo_head_hexsha": "62b3f776124c25dfb36e45a647d573b36b45d2b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "answers/question23.py", "max_forks_repo_name": "Yamahitsuji/Gasyori100knock", "max_forks_repo_head_hexsha": "62b3f776124c25dfb36e45a647d573b36b45d2b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.15625, "max_line_length": 58, "alphanum_fraction": 0.6440177253, "include": true, "reason": "import numpy", "num_tokens": 208}
|
# Module for running CNN-BiLSTM vad model,
# may also be run directly as a script
# Author: Nick Wilkinson 2021
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from typing import Tuple
from tensorflow.keras import models
from voxseg import utils
from scipy.signal import medfilt
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU, quick enough for decoding
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=10,inter_op_parallelism_threads=10)
sess = tf.compat.v1.Session(config=session_conf)
def decode(targets: pd.DataFrame, speech_thresh: float = 0.5, speech_w_music_thresh: float = 0.5, filt: int = 1) -> pd.DataFrame:
'''Function for converting target sequences within a pd.DataFrame to endpoints.
Args:
targets: A pd.DataFrame containing predicted targets (in array form) and metadata.
speech_thresh (optional): A decision threshold between 0 and 1 for the speech class, lower values
result in more frames being classified as speech. (Default: 0.5)
speech_w_music_thresh (optional): A decision threshold between 0 and 1 for the speech_with_music class.
Setting this threshold higher will filter out more music which may be desirable for ASR. (Default: 0.5)
filt (optional): a kernel size for the median filter to apply to the output labels for smoothing. (Default: 1)
Returns:
A pd.DataFrame containing speech segment endpoints and metadata.
'''
targets = targets.copy()
if targets['predicted-targets'].iloc[0].shape[-1] == 4:
prior = np.array([(1-speech_thresh) * speech_w_music_thresh,
speech_thresh * speech_w_music_thresh,
(1-speech_thresh) * (1-speech_w_music_thresh),
(1-speech_thresh) * speech_w_music_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 1 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
elif targets['predicted-targets'].iloc[0].shape[-1] == 2:
prior = np.array([speech_thresh,
1-speech_thresh])
temp = pd.concat([_targets_to_endpoints(medfilt([0 if (j*prior).argmax() == 0 else 1 for j in i], filt), 0.32) \
for i in targets['predicted-targets']], ignore_index=True)
else:
print(f'ERROR: model provided has {targets["predicted-targets"].iloc[0].shape[-1]} outputs. Model expected to have 2 or 4 outputs.')
if 'start' in targets.columns:
targets['end'] = targets['start'] + temp['end']
targets['start'] = targets['start'] + temp['start']
else:
targets['start'] = temp['start']
targets['end'] = temp['end']
targets = targets.drop(['predicted-targets'], axis=1)
targets = targets.apply(pd.Series.explode).reset_index(drop=True)
targets['utterance-id'] = targets['recording-id'].astype(str) + '_' + \
((targets['start'] * 100).astype(int)).astype(str).str.zfill(7) + '_' + \
((targets['end'] * 100).astype(int)).astype(str).str.zfill(7)
return targets
def predict_targets(model: tf.keras.Model, features: pd.DataFrame) -> pd.DataFrame:
'''Function for applying a pretrained model to predict targets from features.
Args:
model: A pretrained tf.keras model.
features: A pd.DataFrame containing features and metadata.
Returns:
A pd.DataFrame containing predicted targets and metadata.
'''
targets = features.drop(['normalized-features'], axis=1)
print('------------------- Running VAD -------------------')
targets['predicted-targets'] = _predict(model, features['normalized-features'])
return targets
def to_data_dir(endpoints: pd.DataFrame, out_dir: str) -> None:
'''A function for generating a Kaldi-style data directory output of the dicovered speech segments.
Args:
endpoints: A pd.DataFrame containing speech segment endpoints and metadata.
out_dir: A path to an output directory where data files will be placed.
'''
if not os.path.exists(out_dir):
print(f'Directory {out_dir} does not exist, creating it.')
os.mkdir(out_dir)
endpoints[['recording-id', 'extended filename']].drop_duplicates().to_csv(
f'{out_dir}/wav.scp',sep=' ', index=False, header=False)
pd.concat([endpoints[['utterance-id', 'recording-id']], endpoints[['start', 'end']].astype(float).round(3)],
axis=1).to_csv(f'{out_dir}/segments', sep=' ', index=False, header=False)
def _predict(model: tf.keras.Model, col: pd.Series) -> pd.Series:
'''Auxiliary function used by predict_targets(). Applies a pretrained model to
each feature set in the 'normalized-features' or 'features' column of a pd.DataFrame
containing features and metadata.
Args:
model: A pretrained tf.keras model.
col: A column of a pd.DataFrame containing features.
Returns:
A pd.Series containing the predicted target sequences.
'''
targets = []
for features in col:
#temp = model.predict(utils.time_distribute(features, 15)[:,:,:,:,np.newaxis])
temp = model.predict(features[np.newaxis,:,:,:,np.newaxis])
targets.append(temp.reshape(-1, temp.shape[-1]))
return pd.Series(targets)
def _targets_to_endpoints(targets: np.ndarray, frame_length: float) -> pd.DataFrame:
'''Auxilory function used by decode() for converting a target sequence to endpoints.
Args:
targets: A binary np.ndarray of speech/nonspeech targets where 1 indicates the presence of speech.
frame_length: The length of each target in seconds.
Returns:
A pd.DataFrame, containing the speech segment start and end boundaries in arrays.
'''
starts = []
ends = []
state = 0
for n, i in enumerate(targets):
state, emmision = _update_fst(state, i)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n)
state, emmision = _update_fst(state, None)
if emmision == 'start':
starts.append(n)
elif emmision == 'end':
ends.append(n + 1)
starts = np.around(np.array([i * frame_length for i in starts]), 3)
ends = np.around(np.array([i * frame_length for i in ends]), 3)
return pd.DataFrame({'start': [starts],'end': [ends]})
def _update_fst(state: int, transition: int) -> Tuple[int, str]:
'''Auxiliary function used by _targets_to_endpoints() for updating finite state
transducer.
Args:
state: The current state.
transition: The input (the next binary target).
Returns:
A tuple consisting of the new state and the output ('start', 'end' or None,
representing a start, end or no endpoint detections respectively).
'''
if state == 0:
if transition == 0:
state = 1
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif state == 1:
if transition == 0:
return state, None
elif transition == 1:
state = 2
return state, 'start'
elif transition is None:
state = 3
return state, None
elif state == 2:
if transition == 0:
state = 1
return state, 'end'
elif transition == 1:
return state, None
elif transition is None:
state = 3
return state, 'end'
# Handle args when run directly
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='run_cnnlstm.py',
description='Run a trained voice activity detector on extracted feature set.')
parser.add_argument('-s', '--speech_thresh', type=float,
help='a decision threshold value between (0,1) for speech vs non-speech, defaults to 0.5')
parser.add_argument('-m', '--speech_w_music_thresh', type=float,
help='a decision threshold value between (0,1) for speech_with_music vs non-speech, defaults to 0.5, \
increasing will remove more speech_with_music, useful for downsteam ASR')
parser.add_argument('-f', '--median_filter_kernel', type=int,
help='a kernel size for a median filter to smooth the output labels, defaults to 1 (no smoothing)')
parser.add_argument('-M', '--model_path', type=str,
help='a path to a trained vad model saved as in .h5 format, overrides default pretrained model')
parser.add_argument('feat_dir', type=str,
help='a path to a directory containing a feats.h5 file with extracted features')
parser.add_argument('out_dir', type=str,
help='a path to an output directory where the output segments will be saved')
args = parser.parse_args()
if args.speech_thresh is not None:
speech_thresh = args.speech_thresh
else:
speech_thresh = 0.5
if args.speech_w_music_thresh is not None:
speech_w_music_thresh = args.speech_w_music_thresh
else:
speech_w_music_thresh = 0.5
if args.median_filter_kernel is not None:
filt = args.median_filter_kernel
else:
filt = 1
feats = pd.read_hdf(f'{args.feat_dir}/feats.h5')
if args.model_path is not None:
model = models.load_model(args.model_path)
else:
model = models.load_model(f'{os.path.dirname(os.path.realpath(__file__))}/models/cnn_bilstm.h5')
targets = predict_targets(model, feats)
endpoints = decode(targets, speech_thresh, speech_w_music_thresh, filt)
to_data_dir(endpoints, args.out_dir)
|
{"hexsha": "f73c8bb84783e010fba58c16673e44bb4d95fadd", "size": 10126, "ext": "py", "lang": "Python", "max_stars_repo_path": "voxseg/run_cnnlstm.py", "max_stars_repo_name": "NickWilkinson37/voxseg", "max_stars_repo_head_hexsha": "6402a67c0b4ee68115070b6aa870199d1f43c5a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2021-03-11T11:32:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T11:54:35.000Z", "max_issues_repo_path": "voxseg/run_cnnlstm.py", "max_issues_repo_name": "parkitny/voxseg", "max_issues_repo_head_hexsha": "6402a67c0b4ee68115070b6aa870199d1f43c5a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-05-21T08:42:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T11:36:43.000Z", "max_forks_repo_path": "voxseg/run_cnnlstm.py", "max_forks_repo_name": "parkitny/voxseg", "max_forks_repo_head_hexsha": "6402a67c0b4ee68115070b6aa870199d1f43c5a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-10T05:55:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T11:19:44.000Z", "avg_line_length": 42.1916666667, "max_line_length": 140, "alphanum_fraction": 0.6405293304, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2329}
|
/*
* @Description: velocity 数据
* @Author: Ren Qian
* @Date: 2019-07-17 18:27:40
*/
#ifndef LIDAR_LOCALIZATION_SENSOR_DATA_VELOCITY_DATA_HPP_
#define LIDAR_LOCALIZATION_SENSOR_DATA_VELOCITY_DATA_HPP_
#include <deque>
#include <Eigen/Dense>
namespace lidar_localization {
class VelocityData {
public:
struct LinearVelocity {
double x = 0.0;
double y = 0.0;
double z = 0.0;
};
struct AngularVelocity {
double x = 0.0;
double y = 0.0;
double z = 0.0;
};
double time = 0.0;
LinearVelocity linear_velocity;
AngularVelocity angular_velocity;
public:
static bool SyncData(std::deque<VelocityData>& UnsyncedData, std::deque<VelocityData>& SyncedData, double sync_time);
void TransformCoordinate(Eigen::Matrix4f transform_matrix);
};
}
#endif
|
{"hexsha": "0fe5d1f5d972b390e9652b00feb75b7eb680eebb", "size": 819, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "LiDAR-SLAM/03-mapping-and-matching/src/lidar_localization/include/lidar_localization/sensor_data/velocity_data.hpp", "max_stars_repo_name": "lanqing30/SensorFusionCourse", "max_stars_repo_head_hexsha": "3fcf935d6a4191563afcf2d95b34718fba7f705a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2021-03-19T05:51:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-16T06:10:16.000Z", "max_issues_repo_path": "03-localization with map/lidar_localization/include/lidar_localization/sensor_data/velocity_data.hpp", "max_issues_repo_name": "WeihengXia0123/LiDar-SLAM", "max_issues_repo_head_hexsha": "834060da7ee0125cefd310d6215821551bac16c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "03-localization with map/lidar_localization/include/lidar_localization/sensor_data/velocity_data.hpp", "max_forks_repo_name": "WeihengXia0123/LiDar-SLAM", "max_forks_repo_head_hexsha": "834060da7ee0125cefd310d6215821551bac16c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2021-02-17T12:31:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-22T17:12:44.000Z", "avg_line_length": 22.75, "max_line_length": 121, "alphanum_fraction": 0.6910866911, "num_tokens": 225}
|
import asyncio
import binascii
from dataclasses import asdict, dataclass
import io
import json
from pathlib import Path
from aiohttp import web
from aiohttp_index import IndexMiddleware
import numpy as np
from PIL import Image
MODULE_DIR = Path(__file__).parent.resolve()
STATIC_PATH = MODULE_DIR / 'web_static'
@dataclass
class Iterate:
"""A message containing a new iterate."""
step: int
steps: int
time: float
update_size: float
loss: float
tv: float
image: Image.Image
@dataclass
class IterationFinished:
"""A message to notify the client that iteration has stopped."""
# pylint: disable=redefined-builtin
def pil_to_data_url(image, format='png', **kwargs):
mime_types = {'jpeg': 'image/jpeg', 'png': 'image/png'}
header = f'data:{mime_types[format]};base64,'
buf = io.BytesIO()
image.save(buf, format=format, **kwargs)
return header + binascii.b2a_base64(buf.getvalue()).decode()
# pylint: disable=method-hidden
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, np.floating):
return float(o)
if isinstance(o, np.integer):
return int(o)
return super().default(o)
json_encoder = JSONEncoder()
async def handle_websocket(request):
app = request.app
ws = web.WebSocketResponse()
await ws.prepare(request)
app.wss.append(ws)
async for _ in ws:
pass
try:
app.wss.remove(ws)
except ValueError:
pass
return ws
async def send_message(app, msg):
for ws in app.wss:
try:
await ws.send_json(msg, dumps=json_encoder.encode)
except ConnectionError:
try:
app.wss.remove(ws)
except ValueError:
pass
async def process_events(app):
while True:
event = await app.event_queue.get()
if app.wss:
msg = asdict(event)
msg['_type'] = type(event).__name__
if 'image' in msg:
msg['image'] = pil_to_data_url(msg['image'], **app.image_encode_settings)
await send_message(app, msg)
class WebInterface:
def __init__(self):
self.app = None
self.loop = None
def run(self, port=8000):
"""Runs the web interface."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.app = web.Application(middlewares=[IndexMiddleware()], loop=self.loop)
self.app.event_queue = asyncio.Queue()
self.app.image_encode_settings = {'format': 'png'}
self.app.wss = []
self.app.task_process_events = self.loop.create_task(process_events(self.app))
self.app.router.add_route('GET', '/websocket', handle_websocket)
self.app.router.add_static('/', STATIC_PATH)
try:
web.run_app(self.app, port=port, shutdown_timeout=1, handle_signals=False)
except KeyboardInterrupt:
pass
def put_event(self, event):
self.loop.call_soon_threadsafe(self.app.event_queue.put_nowait, event)
|
{"hexsha": "69a94137ff3cff083d6ca4f5d7c1cda21ba6f304", "size": 3088, "ext": "py", "lang": "Python", "max_stars_repo_path": "web_interface.py", "max_stars_repo_name": "Alienvlg/style_transfer", "max_stars_repo_head_hexsha": "9ec69db05a2522da7e4c88b2a50d8392e2b2e958", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 122, "max_stars_repo_stars_event_min_datetime": "2016-09-02T09:25:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T04:13:36.000Z", "max_issues_repo_path": "web_interface.py", "max_issues_repo_name": "Alienvlg/style_transfer", "max_issues_repo_head_hexsha": "9ec69db05a2522da7e4c88b2a50d8392e2b2e958", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2016-09-02T09:23:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-29T01:07:22.000Z", "max_forks_repo_path": "web_interface.py", "max_forks_repo_name": "Alienvlg/style_transfer", "max_forks_repo_head_hexsha": "9ec69db05a2522da7e4c88b2a50d8392e2b2e958", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2016-09-16T19:20:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T13:30:23.000Z", "avg_line_length": 25.1056910569, "max_line_length": 89, "alphanum_fraction": 0.6392487047, "include": true, "reason": "import numpy", "num_tokens": 700}
|
[STATEMENT]
lemma (in is_functor) cat_cf_obj_comma_is_arrI[cat_comma_cs_intros]:
assumes "b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>"
and "ABF = [A, B, F]\<^sub>\<circ>"
and "A = [a, 0, f]\<^sub>\<circ>"
and "B = [a', 0, f']\<^sub>\<circ>"
and "F = [g, 0]\<^sub>\<circ>"
and "g : a \<mapsto>\<^bsub>\<AA>\<^esub> a'"
and "f : \<FF>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>\<BB>\<^esub> b"
and "f' : \<FF>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> \<mapsto>\<^bsub>\<BB>\<^esub> b"
and "f' \<circ>\<^sub>A\<^bsub>\<BB>\<^esub> \<FF>\<lparr>ArrMap\<rparr>\<lparr>g\<rparr> = f"
shows "ABF : A \<mapsto>\<^bsub>\<FF> \<^sub>C\<^sub>F\<down> b\<^esub> B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ABF : A \<mapsto>\<^bsub>\<FF> \<^sub>C\<^sub>F\<down> b\<^esub> B
[PROOF STEP]
proof(intro is_arrI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. ABF \<in>\<^sub>\<circ> \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Arr\<rparr>
2. \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Dom\<rparr>\<lparr>ABF\<rparr> = A
3. \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Cod\<rparr>\<lparr>ABF\<rparr> = B
[PROOF STEP]
from assms(1,6,7,8)
[PROOF STATE]
proof (chain)
picking this:
b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>
g : a \<mapsto>\<^bsub>\<AA>\<^esub> a'
f : \<FF>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>\<BB>\<^esub> b
f' : \<FF>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> \<mapsto>\<^bsub>\<BB>\<^esub> b
[PROOF STEP]
show "ABF \<in>\<^sub>\<circ> \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Arr\<rparr>"
[PROOF STATE]
proof (prove)
using this:
b \<in>\<^sub>\<circ> \<BB>\<lparr>Obj\<rparr>
g : a \<mapsto>\<^bsub>\<AA>\<^esub> a'
f : \<FF>\<lparr>ObjMap\<rparr>\<lparr>a\<rparr> \<mapsto>\<^bsub>\<BB>\<^esub> b
f' : \<FF>\<lparr>ObjMap\<rparr>\<lparr>a'\<rparr> \<mapsto>\<^bsub>\<BB>\<^esub> b
goal (1 subgoal):
1. ABF \<in>\<^sub>\<circ> \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Arr\<rparr>
[PROOF STEP]
by
(
cs_concl cs_shallow
cs_simp: assms(2,3,4,5,9) cs_intro: cat_comma_cs_intros
)
[PROOF STATE]
proof (state)
this:
ABF \<in>\<^sub>\<circ> \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Arr\<rparr>
goal (2 subgoals):
1. \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Dom\<rparr>\<lparr>ABF\<rparr> = A
2. \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Cod\<rparr>\<lparr>ABF\<rparr> = B
[PROOF STEP]
with assms(2)
[PROOF STATE]
proof (chain)
picking this:
ABF = [A, B, F]\<^sub>\<circ>
ABF \<in>\<^sub>\<circ> \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Arr\<rparr>
[PROOF STEP]
show "\<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Dom\<rparr>\<lparr>ABF\<rparr> = A" "\<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Cod\<rparr>\<lparr>ABF\<rparr> = B"
[PROOF STATE]
proof (prove)
using this:
ABF = [A, B, F]\<^sub>\<circ>
ABF \<in>\<^sub>\<circ> \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Arr\<rparr>
goal (1 subgoal):
1. \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Dom\<rparr>\<lparr>ABF\<rparr> = A &&& \<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Cod\<rparr>\<lparr>ABF\<rparr> = B
[PROOF STEP]
by (cs_concl cs_shallow cs_simp: cat_comma_cs_simps)+
[PROOF STATE]
proof (state)
this:
\<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Dom\<rparr>\<lparr>ABF\<rparr> = A
\<FF> \<^sub>C\<^sub>F\<down> b\<lparr>Cod\<rparr>\<lparr>ABF\<rparr> = B
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1640, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_Comma", "length": 8}
|
#########################
######## Imports ########
#########################
import matplotlib.pyplot as plt
import numpy as np
import json
import random
import networkx as nx
#########################
####### Constants #######
#########################
WEIGHT_MIN_DEFAULT = 0
WEIGHT_MAX_DEFAULT = 10
NUM_NODES_DEFAULT = 10
NUM_EDGES_DEFAULT = 10
#########################
#### Data Structure #####
#########################
class UserInput:
def __init__(self, numOfNodes, numOfEdges, isDirected, isMultigraph, hasSelfLoops, isWeighted, weightMin=WEIGHT_MIN_DEFAULT, weightMax=WEIGHT_MAX_DEFAULT, weightIsFloat=False):
self.numOfNodes = numOfNodes
self.numOfEdges = numOfEdges
self.isDirected = isDirected
self.isMultigraph = isMultigraph
# self.numOfConnectedComponents = numOfConnectedComponents
# self.isTree = isTree
self.hasSelfLoops = hasSelfLoops
self.isWeighted = isWeighted
if self.isWeighted:
self.weightMin = weightMin
self.weightMax = weightMax
self.weightIsFloat = weightIsFloat
@classmethod
def fromJSON(cls, data):
numOfNodes = int(data.get("numOfNodes", NUM_NODES_DEFAULT))
numOfEdges = int(data.get("numOfEdges", NUM_EDGES_DEFAULT))
isDirected = bool(data.get("isDirected", False))
isMultigraph = bool(data.get("isMultigraph", False))
# numOfConnectedComponents = bool(data.get("numOfConnectedComponents", False))
# isTree = bool(data.get("isTree", False))
hasSelfLoops = bool(data.get("hasSelfLoops", False))
isWeighted = bool(data.get("isWeighted", False))
if isWeighted:
weightMin = int(data.get('weightMin', WEIGHT_MIN_DEFAULT))
weightMax = int(data.get('weightMax', WEIGHT_MAX_DEFAULT))
weightIsFloat = bool(data.get('isFloat', False))
return cls(numOfNodes, numOfEdges, isDirected, isMultigraph, hasSelfLoops, isWeighted, weightMin, weightMax, weightIsFloat)
else:
return cls(numOfNodes, numOfEdges, isDirected, isMultigraph, hasSelfLoops, isWeighted)
@classmethod
def fromPath(cls, pathname):
with open(pathname) as json_file:
data = json.loads(json_file.read())
return cls.fromJSON(data)
@classmethod
def fromRequest(cls, request):
data = request.args.to_dict()
return cls.fromJSON(data)
#########################
### Helper Functions ####
#########################
def add_weights(G, userInput):
for (source, target) in G.edges:
G[source][target]['weight'] = random.uniform(userInput.weightMin, userInput.weightMax) if userInput.weightIsFloat else random.randint(userInput.weightMin, userInput.weightMax)
return G
# Remove self loops from edgeList
def remove_self_loops(edgeList):
return list(filter(lambda x : x[0] != x[1], edgeList))
# Remove directed edges from edgeList
def remove_directed_edges(edgeList):
return list(filter(lambda x : x[0] >= x[1], edgeList))
#########################
## Generator Functions ##
#########################
def generate_graph(userInput):
D = userInput.isDirected
M = userInput.isMultigraph
n = userInput.numOfNodes
allEdges = [(i, j) for i in range(n) for j in range(n)]
if (not D) and (not M):
G = generate_Graph(userInput, allEdges)
elif (not D) and (M):
G = generate_MultiGraph(userInput, allEdges)
elif (D) and (not M):
G = generate_DiGraph(userInput, allEdges)
elif (D) and (M):
G = generate_MultiDiGraph(userInput, allEdges)
if userInput.isWeighted:
G = add_weights(G, userInput)
return G
def generate_Graph(userInput, allEdges):
if not(userInput.hasSelfLoops):
allEdges = remove_self_loops(allEdges)
allEdges = remove_directed_edges(allEdges)
finalEdges = random.sample(allEdges, userInput.numOfEdges)
G = nx.from_edgelist(finalEdges, create_using=nx.Graph)
return G
def generate_MultiGraph(userInput, allEdges):
if not(userInput.hasSelfLoops):
allEdges = remove_self_loops(allEdges)
allEdges = list(filter(lambda x : x[0] >= x[1], allEdges))
finalEdges = []
for _ in range(userInput.numOfEdges):
finalEdges.append(random.choice(allEdges))
G = nx.from_edgelist(finalEdges, create_using=nx.MultiGraph)
return G
def generate_DiGraph(userInput, allEdges):
if not(userInput.hasSelfLoops):
allEdges = remove_self_loops(allEdges)
finalEdges = random.sample(allEdges, userInput.numOfEdges)
G = nx.from_edgelist(finalEdges, create_using=nx.DiGraph)
return G
def generate_MultiDiGraph(userInput, allEdges):
if not(userInput.hasSelfLoops):
allEdges = remove_self_loops(allEdges)
allEdges = list(filter(lambda x : x[0] >= x[1], allEdges))
finalEdges = []
for _ in range(userInput.numOfEdges):
finalEdges.append(random.choice(allEdges))
G = nx.from_edgelist(finalEdges, create_using=nx.MultiDiGraph)
return G
#########################
## Analysis Functions ###
#########################
def analyze_graph(G):
return {
"edge_list": get_edge_list(G),
"adjacency_list": get_adjacency_list(G),
"adjacency_matrix": get_adjacency_matrix(G),
"degree_list": get_degree_list(G),
}
def get_edge_list(G):
if not nx.is_directed(G):
return sorted(list(G.edges()) + list(map(lambda x: x[::-1], G.edges())))
else:
return sorted(list(G.edges()))
def get_adjacency_list(G):
d = {}
for (x, y) in G.edges():
d[x] = d.get(x, []) + [y]
if not nx.is_directed(G):
d[y] = d.get(y, []) + [x]
return d
def get_adjacency_matrix(G):
return nx.adjacency_matrix(G).toarray()
def get_degree_list(G):
return dict(G.degree())
def draw_graph(G):
labels = nx.get_edge_attributes(G,'weight')
nx.draw(G, with_labels=True)
pos = nx.drawing.layout.spring_layout(G)
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
plt.show()
#########################
##### Main Function #####
#########################
def main():
test_sample()
def test_sample():
path = "./samples/sample1.json"
# path = "./samples/sample2.json"
# path = "./samples/sample3.json"
userInput = UserInput.fromPath(path)
G = generate_graph(userInput)
print(analyze_graph(G))
if __name__ == "__main__":
main()
|
{"hexsha": "d0e90be64b805d21c2db04dae71300d2fda43237", "size": 6494, "ext": "py", "lang": "Python", "max_stars_repo_path": "generator.py", "max_stars_repo_name": "parmita52/graph-generator", "max_stars_repo_head_hexsha": "670d018d2694778f25c4ee2fa052fc8b33f819b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generator.py", "max_issues_repo_name": "parmita52/graph-generator", "max_issues_repo_head_hexsha": "670d018d2694778f25c4ee2fa052fc8b33f819b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-09-06T20:59:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T16:25:28.000Z", "max_forks_repo_path": "generator.py", "max_forks_repo_name": "parmita52/graph-generator", "max_forks_repo_head_hexsha": "670d018d2694778f25c4ee2fa052fc8b33f819b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.47, "max_line_length": 183, "alphanum_fraction": 0.6287342162, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1566}
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under a NVIDIA Open Source Non-commercial license
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.modules.utils as utils
from torch.utils.checkpoint import checkpoint
## Utilities
@torch.jit.script
def fuse_mul_add_mul(f, cell_states, i, g):
return f * cell_states + i * g
def chkpt_blk(cc_i, cc_f, cc_o, cc_g, cell_states):
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
cell_states = fuse_mul_add_mul(f, cell_states, i, g)
outputs = o * torch.tanh(cell_states)
return outputs, cell_states
## Standard Convolutional-LSTM Module
class ConvLSTMCell(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size = 5, bias = True):
"""
Construction of convolutional-LSTM cell.
Arguments:
----------
(Hyper-parameters of input/output interfaces)
input_channels: int
Number of channels of the input tensor.
hidden_channels: int
Number of channels of the hidden/cell states.
(Hyper-parameters of the convolutional opeations)
kernel_size: int or (int, int)
Size of the (squared) convolutional kernel.
Note: If the size is a single scalar k, it will be mapped to (k, k)
default: 3
bias: bool
Whether or not to add the bias in each convolutional operation.
default: True
"""
super(ConvLSTMCell, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
kernel_size = utils._pair(kernel_size)
padding = kernel_size[0] // 2, kernel_size[1] // 2
self.conv = nn.Conv2d(
in_channels = input_channels + hidden_channels,
out_channels = 4 * hidden_channels,
kernel_size = kernel_size, padding = padding, bias = bias)
# Note: hidden/cell states are not intialized in construction
self.hidden_states, self.cell_state = None, None
def initialize(self, inputs):
"""
Initialization of convolutional-LSTM cell.
Arguments:
----------
inputs: a 4-th order tensor of size
[batch_size, input_channels, input_height, input_width]
Input tensor of convolutional-LSTM cell.
"""
device = inputs.device # "cpu" or "cuda"
batch_size, _, height, width = inputs.size()
# initialize both hidden and cell states to all zeros
self.hidden_states = torch.zeros(batch_size,
self.hidden_channels, height, width, device = device)
self.cell_states = torch.zeros(batch_size,
self.hidden_channels, height, width, device = device)
def forward(self, inputs, first_step = False, checkpointing = False):
"""
Computation of convolutional-LSTM cell.
Arguments:
----------
inputs: a 4-th order tensor of size
[batch_size, input_channels, height, width]
Input tensor to the convolutional-LSTM cell.
first_step: bool
Whether the tensor is the first step in the input sequence.
Note: If so, both hidden and cell states are intialized to zeros tensors.
default: False
checkpointing: bool
Whether to use the checkpointing technique to reduce memory expense.
default: True
Returns:
--------
hidden_states: another 4-th order tensor of size
[batch_size, hidden_channels, height, width]
Hidden states (and outputs) of the convolutional-LSTM cell.
"""
if first_step: self.initialize(inputs)
concat_conv = self.conv(torch.cat([inputs, self.hidden_states], dim = 1))
cc_i, cc_f, cc_o, cc_g = torch.split(concat_conv, self.hidden_channels, dim = 1)
if checkpointing:
self.hidden_states, self.cell_states = checkpoint(chkpt_blk, cc_i, cc_f, cc_o, cc_g, self.cell_states)
else:
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
self.cell_states = fuse_mul_add_mul(f, self.cell_states, i, g)
self.hidden_states = o * torch.tanh(self.cell_states)
return self.hidden_states
## Convolutional Tensor-Train LSTM Module
class ConvTTLSTMCell(nn.Module):
def __init__(self,
# interface of the Conv-TT-LSTM
input_channels, hidden_channels,
# convolutional tensor-train network
order = 3, steps = 3, ranks = 8,
# convolutional operations
kernel_size = 5, bias = True):
"""
Initialization of convolutional tensor-train LSTM cell.
Arguments:
----------
(Hyper-parameters of the input/output channels)
input_channels: int
Number of input channels of the input tensor.
hidden_channels: int
Number of hidden/output channels of the output tensor.
Note: the number of hidden_channels is typically equal to the one of input_channels.
(Hyper-parameters of the convolutional tensor-train format)
order: int
The order of convolutional tensor-train format (i.e. the number of core tensors).
default: 3
steps: int
The total number of past steps used to compute the next step.
default: 3
ranks: int
The ranks of convolutional tensor-train format (where all ranks are assumed to be the same).
default: 8
(Hyper-parameters of the convolutional operations)
kernel_size: int or (int, int)
Size of the (squared) convolutional kernel.
Note: If the size is a single scalar k, it will be mapped to (k, k)
default: 5
bias: bool
Whether or not to add the bias in each convolutional operation.
default: True
"""
super(ConvTTLSTMCell, self).__init__()
## Input/output interfaces
self.input_channels = input_channels
self.hidden_channels = hidden_channels
## Convolutional tensor-train network
self.steps = steps
self.order = order
self.lags = steps - order + 1
## Convolutional operations
kernel_size = utils._pair(kernel_size)
padding = kernel_size[0] // 2, kernel_size[1] // 2
Conv2d = lambda in_channels, out_channels: nn.Conv2d(
in_channels = in_channels, out_channels = out_channels,
kernel_size = kernel_size, padding = padding, bias = bias)
## Convolutional layers
self.layers = nn.ModuleList()
self.layers_ = nn.ModuleList()
for l in range(order):
self.layers.append(Conv2d(
in_channels = ranks if l < order - 1 else ranks + input_channels,
out_channels = ranks if l < order - 1 else 4 * hidden_channels))
self.layers_.append(Conv2d(
in_channels = self.lags * hidden_channels, out_channels = ranks))
def initialize(self, inputs):
"""
Initialization of the hidden/cell states of the convolutional tensor-train cell.
Arguments:
----------
inputs: 4-th order tensor of size
[batch_size, input_channels, height, width]
Input tensor to the convolutional tensor-train LSTM cell.
"""
device = inputs.device # "cpu" or "cuda"
batch_size, _, height, width = inputs.size()
# initialize both hidden and cell states to all zeros
self.hidden_states = [torch.zeros(batch_size, self.hidden_channels,
height, width, device = device) for t in range(self.steps)]
self.hidden_pointer = 0 # pointing to the position to be updated
self.cell_states = torch.zeros(batch_size,
self.hidden_channels, height, width, device = device)
def forward(self, inputs, first_step = False, checkpointing = False):
"""
Computation of the convolutional tensor-train LSTM cell.
Arguments:
----------
inputs: a 4-th order tensor of size
[batch_size, input_channels, height, width]
Input tensor to the convolutional-LSTM cell.
first_step: bool
Whether the tensor is the first step in the input sequence.
Note: If so, both hidden and cell states are intialized to zeros tensors.
default: False
checkpointing: bool
Whether to use the checkpointing technique to reduce memory expense.
default: True
Returns:
--------
hidden_states: a list of 4-th order tensor of size
[batch_size, input_channels, height, width]
Hidden states (and outputs) of the convolutional-LSTM cell.
"""
if first_step: self.initialize(inputs) # intialize the states at the first step
## (1) Convolutional tensor-train module
for l in range(self.order):
input_pointer = self.hidden_pointer if l == 0 else (input_pointer + 1) % self.steps
input_states = self.hidden_states[input_pointer:] + self.hidden_states[:input_pointer]
input_states = input_states[:self.lags]
input_states = torch.cat(input_states, dim = 1)
input_states = self.layers_[l](input_states)
if l == 0:
temp_states = input_states
else: # if l > 0:
temp_states = input_states + self.layers[l-1](temp_states)
## (2) Standard convolutional-LSTM module
concat_conv = self.layers[-1](torch.cat([inputs, temp_states], dim = 1))
cc_i, cc_f, cc_o, cc_g = torch.split(concat_conv, self.hidden_channels, dim = 1)
if checkpointing:
outputs, self.cell_states = checkpoint(chkpt_blk, cc_i, cc_f, cc_o, cc_g, self.cell_states)
else:
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
self.cell_states = fuse_mul_add_mul(f, self.cell_states, i, g)
outputs = o * torch.tanh(self.cell_states)
self.hidden_states[self.hidden_pointer] = outputs
self.hidden_pointer = (self.hidden_pointer + 1) % self.steps
return outputs
|
{"hexsha": "2b8796b9f1b3d53f78d5d1a46f6b8d89a4466598", "size": 10987, "ext": "py", "lang": "Python", "max_stars_repo_path": "conv-tt-lstm/code_opt/utils/convlstmcell.py", "max_stars_repo_name": "kidrabit/Data-Visualization-Lab-RND", "max_stars_repo_head_hexsha": "baa19ee4e9f3422a052794e50791495632290b36", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-18T01:53:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T01:53:34.000Z", "max_issues_repo_path": "conv-tt-lstm/code_opt/utils/convlstmcell.py", "max_issues_repo_name": "kidrabit/Data-Visualization-Lab-RND", "max_issues_repo_head_hexsha": "baa19ee4e9f3422a052794e50791495632290b36", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "conv-tt-lstm/code_opt/utils/convlstmcell.py", "max_forks_repo_name": "kidrabit/Data-Visualization-Lab-RND", "max_forks_repo_head_hexsha": "baa19ee4e9f3422a052794e50791495632290b36", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7560137457, "max_line_length": 115, "alphanum_fraction": 0.5967962137, "include": true, "reason": "import numpy", "num_tokens": 2370}
|
# Author: Vincent Arel-Bundock
# Contact: varel@umich.edu
# License: BSD-3
# Original code from the python statsmodels project
# https://github.com/statsmodels/statsmodels
function qreg_coef(y, X::Matrix, q::Real, method::IRLS)
n, p = size(X)
xstar = copy(X)
diff = Inf
beta0 = zeros(p)
beta = zeros(p)
xtx = Array{Float64}(undef, p, p)
xty = Array{Float64}(undef, p)
xbeta = Array{Float64}(undef, n)
resid = Array{Float64}(undef, n)
for itr in 1:method.maxiter
if diff > method.tol
copyto!(beta0, beta)
mul!(xtx, xstar', X)
mul!(xty, xstar', y)
beta .= xtx \ xty
mul!(xbeta, X, beta)
@. resid = y - xbeta
for i in 1:n
if abs(resid[i]) < method.threshold
@inbounds resid[i] = sign(resid[i]) * method.threshold
end
if resid[i] < 0
@inbounds resid[i] = abs(q * resid[i])
else
@inbounds resid[i] = abs((1 - q) * resid[i])
end
end
xstar .= X ./ resid
diff = norm(beta0 - beta, Inf)
end
end
return beta
end
|
{"hexsha": "9ad5b37b1452f0f637565d4f1d0bd0bde6fb83f7", "size": 1239, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/IRLS.jl", "max_stars_repo_name": "andreasnoack/QuantileRegressions.jl", "max_stars_repo_head_hexsha": "581981d0ee599c43cad8c3917f39018f2ef12720", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-01-29T11:46:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T13:25:34.000Z", "max_issues_repo_path": "src/IRLS.jl", "max_issues_repo_name": "andreasnoack/QuantileRegressions.jl", "max_issues_repo_head_hexsha": "581981d0ee599c43cad8c3917f39018f2ef12720", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-03-04T02:15:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T21:02:35.000Z", "max_forks_repo_path": "src/IRLS.jl", "max_forks_repo_name": "andreasnoack/QuantileRegressions.jl", "max_forks_repo_head_hexsha": "581981d0ee599c43cad8c3917f39018f2ef12720", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2015-03-07T11:30:27.000Z", "max_forks_repo_forks_event_max_datetime": "2016-08-23T09:38:32.000Z", "avg_line_length": 25.8125, "max_line_length": 74, "alphanum_fraction": 0.4939467312, "num_tokens": 354}
|
import os.path
import shutil
import unittest
import warnings
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, Optional, Type, Union
import fsspec
import numpy as np
import xarray as xr
import xcube.core.mldataset
from test.s3test import MOTO_SERVER_ENDPOINT_URL
from test.s3test import S3Test
from xcube.core.gridmapping import GridMapping
from xcube.core.mldataset import MultiLevelDataset
from xcube.core.new import new_cube
from xcube.core.store import DataDescriptor
from xcube.core.store import DataStoreError
from xcube.core.store import DatasetDescriptor
from xcube.core.store import MultiLevelDatasetDescriptor
from xcube.core.store import MutableDataStore
from xcube.core.store.fs.registry import new_fs_data_store
from xcube.core.store.fs.store import FsDataStore
from xcube.util.temp import new_temp_dir
from xcube.util.tilegrid import TileGrid
ROOT_DIR = 'xcube'
DATA_PATH = 'testing/data'
def new_cube_data():
width = 360
height = 180
time_periods = 5
shape = (time_periods, height, width)
var_a = np.full(shape, 8.5, dtype=np.float64)
var_b = np.full(shape, 9.5, dtype=np.float64)
var_c = np.full(shape, 255, dtype=np.uint8)
var_a[0, 0, 0] = np.nan
var_b[0, 0, 0] = np.nan
cube = new_cube(width=width,
height=height,
x_name='x',
y_name='y',
crs='CRS84',
crs_name='spatial_ref',
time_periods=time_periods,
variables=dict(var_a=var_a,
var_b=var_b,
var_c=var_c))
# Set var_b encodings
cube.var_b.encoding['dtype'] = np.int16
cube.var_b.encoding['_FillValue'] = -9999
cube.var_b.encoding['scale_factor'] = 0.001
cube.var_b.encoding['add_offset'] = -10
return cube.chunk(dict(time=1, y=90, x=180))
class NewCubeDataTestMixin(unittest.TestCase):
path = f'{DATA_PATH}/data.zarr'
@classmethod
def setUpClass(cls) -> None:
data = new_cube_data()
data.to_zarr(cls.path, mode="w")
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree(cls.path)
def test_open_unpacked(self):
"""open data un-packed (the default)"""
data_1 = xr.open_zarr(self.path, mask_and_scale=True)
self.assertEqual(np.float64, data_1.var_a.dtype)
self.assertEqual(np.float32, data_1.var_b.dtype)
self.assertEqual(np.uint8, data_1.var_c.dtype)
self.assertTrue(np.isnan(data_1.var_a[0, 0, 0]))
self.assertEqual(8.5, data_1.var_a[1, 0, 0].values)
self.assertTrue(np.isnan(data_1.var_b[0, 0, 0]))
self.assertEqual(9.5, data_1.var_b[1, 0, 0].values)
self.assertEqual(255, data_1.var_c[0, 0, 0].values)
self.assertEqual(255, data_1.var_c[1, 0, 0].values)
def test_open_packed(self):
"""open data packed, ignoring related encodings"""
data_2 = xr.open_zarr(self.path, mask_and_scale=False)
self.assertEqual(np.float64, data_2.var_a.dtype)
self.assertEqual(np.int16, data_2.var_b.dtype)
self.assertEqual(np.uint8, data_2.var_c.dtype)
self.assertTrue(np.isnan(data_2.var_a[0, 0, 0]))
self.assertEqual(8.5, data_2.var_a[1, 0, 0].values)
self.assertEqual(-9999, data_2.var_b[0, 0, 0].values)
self.assertEqual((9.5 - (-10)) / 0.001, data_2.var_b[1, 0, 0].values)
self.assertEqual(255, data_2.var_c[0, 0, 0].values)
self.assertEqual(255, data_2.var_c[1, 0, 0].values)
# noinspection PyUnresolvedReferences,PyPep8Naming
class FsDataStoresTestMixin(ABC):
@abstractmethod
def create_data_store(self) -> FsDataStore:
pass
@classmethod
def prepare_fs(cls, fs: fsspec.AbstractFileSystem, root: str):
if fs.isdir(root):
# print(f'{fs.protocol}: deleting {root}')
fs.delete(root, recursive=True)
# print(f'{fs.protocol}: making root {root}')
fs.mkdirs(root)
# Write a text file into each subdirectory, so
# we also test that store.get_data_ids() scans
# recursively.
dir_path = root
for subdir_name in DATA_PATH.split('/'):
dir_path += '/' + subdir_name
# print(f'{fs.protocol}: making {dir_path}')
fs.mkdir(dir_path)
file_path = dir_path + '/README.md'
# print(f'{fs.protocol}: writing {file_path}')
with fs.open(file_path, 'w') as fp:
fp.write('\n')
def test_mldataset_levels(self):
data_store = self.create_data_store()
self._assert_multi_level_dataset_format_supported(data_store)
self._assert_multi_level_dataset_format_with_link_supported(
data_store)
self._assert_multi_level_dataset_format_with_tile_size(data_store)
def test_dataset_zarr(self):
data_store = self.create_data_store()
self._assert_dataset_format_supported(data_store, '.zarr')
def test_dataset_netcdf(self):
data_store = self.create_data_store()
self._assert_dataset_format_supported(data_store, '.nc')
# TODO: add assertGeoDataFrameSupport
def _assert_multi_level_dataset_format_supported(
self,
data_store: MutableDataStore
):
self._assert_dataset_supported(
data_store,
'.levels',
'mldataset',
MultiLevelDataset,
MultiLevelDatasetDescriptor,
assert_data_ok=self._assert_multi_level_dataset_data_ok
)
# Test that use_saved_levels works
self._assert_dataset_supported(
data_store,
'.levels',
'mldataset',
MultiLevelDataset,
MultiLevelDatasetDescriptor,
write_params=dict(
use_saved_levels=True,
),
assert_data_ok=self._assert_multi_level_dataset_data_ok
)
def _assert_multi_level_dataset_format_with_link_supported(
self,
data_store: MutableDataStore
):
base_dataset = new_cube_data()
base_dataset_id = f'{DATA_PATH}/base-ds.zarr'
data_store.write_data(base_dataset, base_dataset_id)
# Test that base_dataset_id works
self._assert_dataset_supported(
data_store,
'.levels',
'mldataset',
MultiLevelDataset,
MultiLevelDatasetDescriptor,
write_params=dict(
base_dataset_id=base_dataset_id,
),
assert_data_ok=self._assert_multi_level_dataset_data_ok
)
# Test that base_dataset_id + use_saved_levels works
self._assert_dataset_supported(
data_store,
'.levels',
'mldataset',
MultiLevelDataset,
MultiLevelDatasetDescriptor,
write_params=dict(
base_dataset_id=base_dataset_id,
use_saved_levels=True,
),
assert_data_ok=self._assert_multi_level_dataset_data_ok
)
data_store.delete_data(base_dataset_id)
def _assert_multi_level_dataset_data_ok(
self,
ml_dataset: xcube.core.mldataset.MultiLevelDataset
):
self.assertEqual(2, ml_dataset.num_levels)
self.assertIsInstance(ml_dataset.tile_grid, TileGrid)
self.assertIsInstance(ml_dataset.grid_mapping, GridMapping)
self.assertIsInstance(ml_dataset.base_dataset, xr.Dataset)
self.assertIsInstance(ml_dataset.ds_id, str)
# assert encoding
for level in range(ml_dataset.num_levels):
dataset = ml_dataset.get_dataset(level)
self.assertEqual({'var_a',
'var_b',
'var_c',
'spatial_ref'},
set(dataset.data_vars))
# assert dtype is as expected
self.assertEqual(np.float64, dataset.var_a.dtype)
self.assertEqual(np.float32, dataset.var_b.dtype)
self.assertEqual(np.uint8, dataset.var_c.dtype)
# assert dtype encoding is as expected
self.assertEqual(np.float64,
dataset.var_a.encoding.get('dtype'))
self.assertEqual(np.int16,
dataset.var_b.encoding.get('dtype'))
self.assertEqual(np.uint8,
dataset.var_c.encoding.get('dtype'))
# assert _FillValue encoding is as expected
self.assertTrue(np.isnan(
dataset.var_a.encoding.get('_FillValue')
))
self.assertEqual(-9999,
dataset.var_b.encoding.get('_FillValue'))
self.assertEqual(None,
dataset.var_c.encoding.get('_FillValue'))
def _assert_multi_level_dataset_format_with_tile_size(
self,
data_store: MutableDataStore
):
base_dataset = new_cube_data()
base_dataset_id = f'{DATA_PATH}/base-ds.zarr'
data_store.write_data(base_dataset, base_dataset_id)
# Test that base_dataset_id works
self._assert_dataset_supported(data_store,
'.levels',
'mldataset',
MultiLevelDataset,
MultiLevelDatasetDescriptor,
open_params=dict(
cache_size=2 ** 20,
),
write_params=dict(
tile_size=90,
))
# Test that base_dataset_id + use_saved_levels works
self._assert_dataset_supported(data_store,
'.levels',
'mldataset',
MultiLevelDataset,
MultiLevelDatasetDescriptor,
open_params=dict(
cache_size=2 ** 20,
),
write_params=dict(
tile_size=90,
use_saved_levels=True,
))
data_store.delete_data(base_dataset_id)
def _assert_dataset_format_supported(self,
data_store: MutableDataStore,
filename_ext: str):
self._assert_dataset_supported(data_store,
filename_ext,
'dataset',
xr.Dataset,
DatasetDescriptor)
def _assert_dataset_supported(
self,
data_store: MutableDataStore,
filename_ext: str,
expected_data_type_alias: str,
expected_type: Union[Type[xr.Dataset],
Type[MultiLevelDataset]],
expected_descriptor_type: Union[
Type[DatasetDescriptor],
Type[MultiLevelDatasetDescriptor]
],
write_params: Optional[Dict[str, Any]] = None,
open_params: Optional[Dict[str, Any]] = None,
assert_data_ok: Optional[Callable[[Any], Any]] = None
):
"""
Call all DataStore operations to ensure data of type
xr.Dataset//MultiLevelDataset is supported by *data_store*.
:param data_store: The filesystem data store instance.
:param filename_ext: Filename extension that identifies
a supported dataset format.
:param expected_data_type_alias: The expected data type alias.
:param expected_type: The expected data type.
:param expected_descriptor_type: The expected data descriptor type.
:param write_params: Optional write parameters
:param open_params: Optional open parameters
:param assert_data_ok: Optional function to assert read data is ok
"""
data_id = f'{DATA_PATH}/ds{filename_ext}'
write_params = write_params or {}
open_params = open_params or {}
self.assertIsInstance(data_store, MutableDataStore)
self.assertEqual({'dataset', 'mldataset', 'geodataframe'},
set(data_store.get_data_types()))
with self.assertRaises(DataStoreError):
data_store.get_data_types_for_data(data_id)
self.assertEqual(False, data_store.has_data(data_id))
self.assertNotIn(data_id, set(data_store.get_data_ids()))
data = new_cube_data()
written_data_id = data_store.write_data(data, data_id, **write_params)
self.assertEqual(data_id, written_data_id)
self.assertEqual({expected_data_type_alias},
set(data_store.get_data_types_for_data(data_id)))
self.assertEqual(True, data_store.has_data(data_id))
self.assertIn(data_id, set(data_store.get_data_ids()))
data_descriptors = list(data_store.search_data(
data_type=expected_type)
)
self.assertEqual(1, len(data_descriptors))
self.assertIsInstance(data_descriptors[0], DataDescriptor)
self.assertIsInstance(data_descriptors[0], expected_descriptor_type)
data = data_store.open_data(data_id, **open_params)
self.assertIsInstance(data, expected_type)
if assert_data_ok:
assert_data_ok(data)
try:
data_store.delete_data(data_id)
except PermissionError as e: # May occur on win32 due to fsspec
warnings.warn(f'{e}')
return
with self.assertRaises(DataStoreError):
data_store.get_data_types_for_data(data_id)
self.assertEqual(False, data_store.has_data(data_id))
self.assertNotIn(data_id, set(data_store.get_data_ids()))
class FileFsDataStoresTest(FsDataStoresTestMixin, unittest.TestCase):
def create_data_store(self) -> FsDataStore:
root = os.path.join(new_temp_dir(prefix='xcube'), ROOT_DIR)
self.prepare_fs(fsspec.filesystem('file'), root)
return new_fs_data_store('file', root=root, max_depth=3)
class MemoryFsDataStoresTest(FsDataStoresTestMixin, unittest.TestCase):
def create_data_store(self) -> FsDataStore:
root = ROOT_DIR
self.prepare_fs(fsspec.filesystem('memory'), root)
return new_fs_data_store('memory', root=root, max_depth=3)
class S3FsDataStoresTest(FsDataStoresTestMixin, S3Test):
def create_data_store(self) -> FsDataStore:
root = ROOT_DIR
storage_options = dict(
anon=False,
client_kwargs=dict(
endpoint_url=MOTO_SERVER_ENDPOINT_URL,
)
)
self.prepare_fs(fsspec.filesystem('s3', **storage_options), root)
return new_fs_data_store('s3',
root=root,
max_depth=3,
storage_options=storage_options)
|
{"hexsha": "c35cf5e7374172c16dbf0cb5e4a787364cc00476", "size": 15411, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/core/store/fs/test_registry.py", "max_stars_repo_name": "bcdev/xcube", "max_stars_repo_head_hexsha": "9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/core/store/fs/test_registry.py", "max_issues_repo_name": "bcdev/xcube", "max_issues_repo_head_hexsha": "9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/core/store/fs/test_registry.py", "max_forks_repo_name": "bcdev/xcube", "max_forks_repo_head_hexsha": "9d275ef3baef8fbcea5c1fbbfb84c3d0164aecd3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3358208955, "max_line_length": 78, "alphanum_fraction": 0.5930179742, "include": true, "reason": "import numpy", "num_tokens": 3147}
|
using Test
using Bigsimr
using LinearAlgebra
using Distributions
@testset "Correlation Functions" begin
r_negdef = [
1.00 0.82 0.56 0.44
0.82 1.00 0.28 0.85
0.56 0.28 1.00 0.22
0.44 0.85 0.22 1.00
]
@testset "Nearest positive definite correlation matrix" begin
r = cor_nearPD(r_negdef)
@test Bigsimr.iscorrelation(r)
# Must respect input eltype
for T in [Float64, Float32, Float64]
@test eltype(cor_nearPD(Matrix{T}(r_negdef))) === T
end
end
@testset "Nearest positive semi-definite correlation matrix" begin
r = cor_nearPD(r_negdef, 0.0)
λ = eigvals(r)
@test issymmetric(r)
@test all(λ .≥ 0)
@test all(diag(r) .== 1.0)
@test all(-1.0 .≤ r .≤ 1.0)
end
@testset "Fast near positive definite correlation matrix" begin
r = cor_fastPD(r_negdef)
@test Bigsimr.iscorrelation(r)
# Must respect input eltype
test_types = [Float64, Float32, Float16]
for T in test_types
@test eltype(cor_fastPD(Matrix{T}(r_negdef))) === T
end
end
end
@testset "Random Correlation Generation" begin
@testset "Random postive definite correlation matrix" begin
r = cor_randPD(100)
@test Bigsimr.iscorrelation(r)
# The element type must be respected
test_types = [Float64, Float32, Float16]
for T in test_types
@test eltype(cor_randPD(T, 4)) === T
end
# Must work for numbers with integer representations
test_types = [Float64, Float32, Float16, Rational, Int64, Int32, Int16]
for T in test_types
@test_nowarn cor_randPD(T(4))
@test_nowarn cor_randPD(Float64, T(4))
@test_nowarn cor_randPD(Float32, T(4))
@test_nowarn cor_randPD(Float16, T(4))
for S in test_types
@test_nowarn cor_randPD(T(4), S(3))
@test_nowarn cor_randPD(Float64, T(4), S(3))
@test_nowarn cor_randPD(Float32, T(4), S(3))
@test_nowarn cor_randPD(Float16, T(4), S(3))
end
end
# `d` must not be less than 1
@test_throws AssertionError cor_randPD(-1)
@test_throws AssertionError cor_randPD(Float64, -1)
@test_throws AssertionError cor_randPD(Float32, -1)
@test_throws AssertionError cor_randPD(Float16, -1)
# `k` must not be larger than `d`
@test_throws AssertionError cor_randPD(4, 5)
@test_throws AssertionError cor_randPD(Float64, 4, 5)
@test_throws AssertionError cor_randPD(Float32, 4, 5)
@test_throws AssertionError cor_randPD(Float16, 4, 5)
# `k` must not be less than 1
@test_throws AssertionError cor_randPD(4, 0)
@test_throws AssertionError cor_randPD(Float64, 4, 0)
@test_throws AssertionError cor_randPD(Float32, 4, 0)
@test_throws AssertionError cor_randPD(Float16, 4, 0)
end
@testset "Random positive semi-definite correlation matrix" begin
r = cor_randPSD(100)
λ = eigvals(r)
@test issymmetric(r)
@test all(λ .≥ 0)
@test all(diag(r) .== 1.0)
@test all(-1.0 .≤ r .≤ 1.0)
# The element type must be respected
test_types = [Float64, Float32, Float16]
for T in test_types
@test eltype(cor_randPSD(T, 4)) === T
end
# Must work for numbers with integer representations
test_types = [Float64, Float32, Float16, Rational, Int64, Int32, Int16]
for T in test_types
@test_nowarn cor_randPSD(T(4))
@test_nowarn cor_randPSD(Float64, T(4))
@test_nowarn cor_randPSD(Float32, T(4))
@test_nowarn cor_randPSD(Float16, T(4))
for S in test_types
@test_nowarn cor_randPSD(T(4), S(3))
@test_nowarn cor_randPSD(Float64, T(4), S(3))
@test_nowarn cor_randPSD(Float32, T(4), S(3))
@test_nowarn cor_randPSD(Float16, T(4), S(3))
end
end
# `d` must not be less than 1
@test_throws AssertionError cor_randPSD(-1)
@test_throws AssertionError cor_randPSD(Float64, -1)
@test_throws AssertionError cor_randPSD(Float32, -1)
@test_throws AssertionError cor_randPSD(Float16, -1)
# `k` must not be larger than `d`
@test_throws AssertionError cor_randPSD(4, 5)
@test_throws AssertionError cor_randPSD(Float64, 4, 5)
@test_throws AssertionError cor_randPSD(Float32, 4, 5)
@test_throws AssertionError cor_randPSD(Float16, 4, 5)
# `k` must not be less than 1
@test_throws AssertionError cor_randPSD(4, 0)
@test_throws AssertionError cor_randPSD(Float64, 4, 0)
@test_throws AssertionError cor_randPSD(Float32, 4, 0)
@test_throws AssertionError cor_randPSD(Float16, 4, 0)
end
end
@testset "Correlation Utilities" begin
cor_types = (Pearson, Spearman, Kendall)
@testset "Correlation calculation" begin
# Must work for any floating point matrix or vector pair
test_types = [Float64, Float32, Float16]
for T in test_types
A = rand(T, 200, 4)
x, y = rand(T, 100), rand(T, 100)
for C in cor_types
@test_nowarn cor(A, C)
@test_nowarn cor(x, y, C)
@test_nowarn cor_fast(A, C)
end
end
end
@testset "Correlation to correlation conversion" begin
# Converting type A -> A must result in the same matrix
for C in cor_types
r = cor_randPD(4)
@test r == cor_convert(r, C, C)
end
# Must map (-1, 0, 1) onto itself within numerical error
for C1 in cor_types
for C2 in cor_types
@test cor_convert( 0.0, C1, C2) == 0.0
@test cor_convert( 1.0, C1, C2) ≤ 1.0
@test cor_convert( 1.0, C1, C2) ≈ 1.0
@test cor_convert(-1.0, C1, C2) ≥ -1.0
@test cor_convert(-1.0, C1, C2) ≈ -1.0
end
end
# Must work for each type
test_types = [Float64, Float32, Float16, Rational]
for T in test_types
for C1 in cor_types
for C2 in cor_types
@test_nowarn cor_convert(T(0.5), C1, C2)
end
end
end
# Must respect these input eltypes
test_types = [Float64, Float32, Float16]
for T in test_types
for C1 in cor_types
for C2 in cor_types
@test eltype(cor_convert(T(0.5), C1, C2)) === T
end
end
end
end
@testset "Constrain to Correlation" begin
test_types = [Float64, Float32, Float16]
for T in test_types
A = rand(T, 10, 10)
@test_nowarn cor_constrain(A)
@test eltype(cor_constrain(A)) === T
end
end
@testset "Covariance to Correlation" begin
test_types = [Float64, Float32, Float16]
for T in test_types
R = cor_randPSD(T, 4, 3)
@test_nowarn cov2cor(R)
@test eltype(cov2cor(R)) === T
end
end
@testset "Correlation Bounds" begin
A, B = NegativeBinomial(20, 0.2), LogNormal(3, 1)
dist_types = (A, B)
# Must work for any univariate distribution and correlation type
for D1 in dist_types
for D2 in dist_types
for C in cor_types
@test_nowarn cor_bounds(D1, D2, C)
end
end
end
# Must work for any number with an integer representation
test_types = [Float64, Float32, Float16, Int64, Int32, Int16]
for T in test_types
@test_nowarn cor_bounds(A, B, n_samples=T(10_000))
end
test_types = [Float64, Float32]
for T in test_types
@test_throws InexactError cor_bounds(A, B, n_samples=T(10_000.5))
end
end
end
|
{"hexsha": "44ea4535361dbaeb7325d95b3c7e97982cd1f262", "size": 8181, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Correlation.jl", "max_stars_repo_name": "adknudson/MvSim.jl", "max_stars_repo_head_hexsha": "6c3085289a5e23441f5f0db90f2b3cbbb9b49afc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-12T12:27:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-12T12:27:15.000Z", "max_issues_repo_path": "test/Correlation.jl", "max_issues_repo_name": "adknudson/MvSim.jl", "max_issues_repo_head_hexsha": "6c3085289a5e23441f5f0db90f2b3cbbb9b49afc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-07-08T20:07:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-16T01:56:40.000Z", "max_forks_repo_path": "test/Correlation.jl", "max_forks_repo_name": "adknudson/bigsimr.jl", "max_forks_repo_head_hexsha": "1843a0607bcaee9b7724842df22aa42e176c910c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8554216867, "max_line_length": 79, "alphanum_fraction": 0.5753575358, "num_tokens": 2366}
|
#!/usr/bin/env python
import sys
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
from collections import Counter
import math
from anti_instagram.calcLstsqTransform import calcTransform
from anti_instagram.AntiInstagram import *
from anti_instagram.scale_and_shift import *
# from .scale_and_shift import scaleandshift
# from .scale_and_shift import scaleandshift2
from anti_instagram.simpleColorBalanceClass import *
from colorBalanceKMeans import *
from outlierEstimation import *
class kMeanClass:
""" This class gives the ability to use the kMeans alg. with different numbers of initial centers """
input_image = []
resized_image = []
blurred_image = []
image_array = []
num_centers = -1
blur_alg = []
fac_resize = -1
blur_kernel = -1
trained_centers = []
labels = []
labelcount = Counter()
color_array = []
color_image_array = []
# initialize
def __init__(self, inputImage, numCenters, blurAlg, resize, blurKer):
self.input_image = inputImage
self.num_centers = int(numCenters)
self.blur_alg = blurAlg
self.fac_resize = float(resize)
self.blur_kernel = int(blurKer)
self.shiftB = None
self.shiftG = None
self.shiftR = None
# set up array for center colors
self.color_image_array = np.zeros((self.num_centers, 200, 200, 3), np.uint8)
print('created instance of kMeans with arguments:')
print(' number of centers = ' + str(self.num_centers))
print(' blur algorithm = ' + str(self.blur_alg))
print(' resize factor = ' + str(self.fac_resize))
print(' blurring kernel size = ' + str(self.blur_kernel))
# re-shape input image for kMeans
def _getimgdatapts(self, cv2img):
x, y, p = cv2img.shape
img_geom = cv2img[int(x*0):(x-1), :, :]
x_new, y_new, p = img_geom.shape
cv2_tpose = img_geom.transpose()
cv2_arr_tpose = np.reshape(cv2_tpose, [p, x_new * y_new])
npdata = np.transpose(cv2_arr_tpose)
return npdata
def _blurImg(self):
# blur image using median:
if self.blur_alg == 'median':
self.blurred_image = cv2.medianBlur(self.resized_image, self.blur_kernel)
# blur image using gaussian:
elif self.blur_alg == 'gaussian':
self.blurred_image = cv2.GaussianBlur(self.resized_image, (self.blur_kernel, self.blur_kernel), 0)
def _plotColors(self):
# loop over all centers
for center in np.arange(self.num_centers):
# get color
color_i = tuple(
[self.trained_centers[center, 2], self.trained_centers[center, 1], self.trained_centers[center, 0]])
self.color_array.append(color_i)
self.color_image_array[center, :] = color_i
plotRows = int(math.ceil(self.num_centers / 2.0))
f, axarr = plt.subplots(plotRows, 2)
for row in range(plotRows):
if self.num_centers % 2 == 0:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
if row != plotRows - 1:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].axis('off')
print(self.color_array)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
# apply kMeans alg
def applyKM(self):
# resize image
self.resized_image = cv2.resize(self.input_image, (0, 0), fx=self.fac_resize, fy=self.fac_resize)
print('resized image!')
# blur image
self._blurImg()
print('blurred image!')
# self.blurred_image, self.shiftB, self.shiftG, self.shiftR = blackBalance(self.blurred_image)
# prepare KMeans
kmc = KMeans(n_clusters=self.num_centers, init='k-means++', max_iter=20)
# try out color balance first
# self.blurred_image = simplest_cb(self.blurred_image, 1) # percentages around 1% are normal
cv2.namedWindow('blurred', flags=cv2.WINDOW_NORMAL)
cv2.imshow('blurred', self.blurred_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# prepare data points
self.image_array = self._getimgdatapts(self.blurred_image)
# debug
print(self.image_array.shape)
# run KMeans
kmc.fit(self.image_array)
# get centers, labels and labelcount from KMeans
self.trained_centers = kmc.cluster_centers_
self.labels = kmc.labels_
for i in np.arange(self.num_centers):
self.labelcount[i] = np.sum(self.labels == i)
# plot colors
self._plotColors()
def determineColor(self, withRed, trained_centers):
# define the true centers. This color is preset. The color transformation
# tries to transform a picture such that the black areas will become true black.
# The same applies for yellow, white and (if valid) red.
trueBlack = [60, 60, 60]
if (withRed):
trueRed = [60, 60, 240]
trueYellow = [50, 240, 240]
trueWhite = [240, 240, 240]
errorBlack = np.zeros(self.num_centers)
errorYellow = np.zeros(self.num_centers)
errorWhite = np.zeros(self.num_centers)
if (withRed):
errorRed = np.zeros(self.num_centers)
for i in range(self.num_centers):
print(trained_centers[i])
errorBlack[i] = np.linalg.norm(trueBlack - trained_centers[i])
errorYellow[i] = np.linalg.norm(trueYellow - trained_centers[i])
errorWhite[i] = np.linalg.norm(trueWhite - trained_centers[i])
if (withRed):
errorRed[i] = np.linalg.norm(trueRed - trained_centers[i])
print "black error:" + str(errorBlack)
print "yellow error:" + str(errorYellow)
print "white error:" + str(errorWhite)
print "red error:" + str(errorRed)
nTrueCenters = 3
errorBlackSortedIdx = np.argsort(errorBlack)
errorYellowSortedIdx = np.argsort(errorYellow)
errorWhiteSortedIdx = np.argsort(errorWhite)
if (withRed):
errorRedSortedIdx = np.argsort(errorRed)
if (withRed):
nTrueCenters = 4
ListOfIndices = []
blackIdxFound = False
whiteIdxFound = False
yellowIdxFound = False
if (withRed):
redIdxFound = False
centersFound = False
index = 0
print "errorBlackSortedIdx: " + str(errorBlackSortedIdx)
print "errorYellowSortedIdx: " + str(errorYellowSortedIdx)
print "errorWhiteSortedIdx: " + str(errorWhiteSortedIdx)
print "errorRedSortedIdx: " + str(errorRedSortedIdx)
while (not centersFound):
if errorBlackSortedIdx[index] not in ListOfIndices and not blackIdxFound:
ListOfIndices.append(errorBlackSortedIdx[index])
print str(index) + " in black " + str(ListOfIndices)
blackIdxFound = True
idxBlack = errorBlackSortedIdx[index]
if errorWhiteSortedIdx[index] not in ListOfIndices and not whiteIdxFound:
ListOfIndices.append(errorWhiteSortedIdx[index])
print str(index) + " in white " + str(ListOfIndices)
whiteIdxFound = True
idxWhite = errorWhiteSortedIdx[index]
if errorYellowSortedIdx[index] not in ListOfIndices and not yellowIdxFound:
ListOfIndices.append(errorYellowSortedIdx[index])
print str(index) + " in yellow " + str(ListOfIndices)
yellowIdxFound = True
idxYellow = errorYellowSortedIdx[index]
if withRed:
if errorRedSortedIdx[index] not in ListOfIndices and not redIdxFound:
ListOfIndices.append(errorRedSortedIdx[index])
redIdxFound = True
print str(index) + "in red" + str(ListOfIndices)
idxRed = errorRedSortedIdx[index]
print "True?: " + str(redIdxFound) + str(yellowIdxFound) + str(whiteIdxFound) + str(blackIdxFound)
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound and redIdxFound
print "centersFound: " + str(centersFound)
else:
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound
index = index + 1
print "End of while loop. Index: " + str(index)
print idxRed, idxWhite, idxYellow, idxBlack
if (withRed):
return idxBlack, idxRed, idxYellow, idxWhite
else:
return idxBlack, idxYellow, idxWhite
def plotDeterminedCenters(self, centerBlack, centerYellow, centerWhite, centerRed):
tupleBlack = tuple([centerBlack[2], centerBlack[1], centerBlack[0]])
tupleWhite = tuple([centerWhite[2], centerWhite[1], centerWhite[0]])
tupleYellow = tuple([centerYellow[2], centerYellow[1], centerYellow[0]])
tupleRed = tuple([centerRed[2], centerRed[1], centerRed[0]])
imageBlack = np.zeros((200, 200, 3), np.uint8)
imageBlack[:] = tupleBlack
imageWhite = np.zeros((200, 200, 3), np.uint8)
imageWhite[:] = tupleWhite
imageYellow = np.zeros((200, 200, 3), np.uint8)
imageYellow[:] = tupleYellow
imageRed = np.zeros((200, 200, 3), np.uint8)
imageRed[:] = tupleRed
f, axarr = plt.subplots(2, 2)
axarr[0, 0].imshow(imageBlack)
axarr[0, 0].axis('off')
axarr[0, 0].set_title("Black")
axarr[0, 1].imshow(imageWhite)
axarr[0, 1].axis('off')
axarr[0, 1].set_title("White")
axarr[1, 0].imshow(imageYellow)
axarr[1, 0].axis('off')
axarr[1, 0].set_title("Yellow")
axarr[1, 1].imshow(imageRed)
axarr[1, 1].axis('off')
axarr[1, 1].set_title("Red")
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
# define and parse command line arguments
parser = argparse.ArgumentParser(
description='Perform kMeans with n initial centers.')
parser.add_argument('img_path', help='path to the image')
parser.add_argument('n_centers', help='numbers of initial centers')
parser.add_argument('--resize', default='0.1',
help='factor of downsampling the input image. DEFAULT = 0.1')
parser.add_argument('--blur', default='median',
help="blur algorithm. 'median' or 'gaussian. DEFAULT = median")
parser.add_argument('--blur_kernel', default='5',
help='size of kernel for blurring. DEFAULT = 5')
parser.add_argument('--output_dir', default='./output_images',
help='directory for the output images. DEFAULT = ./output_images')
args = parser.parse_args()
# check if file exists
if not os.path.isfile(args.img_path):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# check resize factor
if (args.resize < 1) or (args.resize <= 0):
print('resize factor between 0 and 1')
sys.exit(2)
# check blur alg
if not (args.blur == "median" or args.blur == "gaussian"):
print('blur alg must be median or gaussian')
sys.exit(2)
# check kernel size
print "kernel: " + str(args.blur_kernel)
if (int(args.blur_kernel) % 2 == 0):
print('kernel size must be odd')
sys.exit(2)
# create instance of kMeans
print("all arguments have been read.")
inputImage = cv2.imread(args.img_path, cv2.IMREAD_UNCHANGED)
CB = simpleColorBalanceClass()
CB.thresholdAnalysis(inputImage, 1)
imageBalanced = CB.applyTrafo(inputImage)
KM = kMeanClass(imageBalanced, args.n_centers, args.blur, args.resize, args.blur_kernel)
cv2.namedWindow('input', flags=cv2.WINDOW_NORMAL)
cv2.imshow('input', inputImage)
cv2.namedWindow('balanced', flags=cv2.WINDOW_NORMAL)
cv2.imshow('balanced', imageBalanced)
cv2.waitKey(0)
cv2.destroyAllWindows()
KM.applyKM()
idxBlack, idxRed, idxYellow, idxWhite = KM.determineColor(True, KM.trained_centers)
trained_centers = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxRed],
KM.trained_centers[idxYellow], KM.trained_centers[idxWhite]])
print "the trained centers are: " + str(trained_centers)
KM.plotDeterminedCenters(KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite], KM.trained_centers[idxRed])
trained_centers_woRed = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite]])
true_centers = np.vstack([[70, 50, 60], [50, 70, 240], [60, 240, 230], [250, 250, 250]])
outlierIndex, outlierCenter = detectOutlier(trained_centers, true_centers)
true_centers_woOutlier = np.delete(true_centers, outlierIndex, 0)
trained_centers_woOutlier = np.delete(trained_centers, outlierIndex, 0)
print "outlier center is: " + str(outlierCenter)
print("transform instance will be created!")
T = calcTransform(3, trained_centers_woOutlier, true_centers_woOutlier)
T.calcTransform()
# corr_img1 = scaleandshift2(KM.input_image, [1, 1, 1], [KM.shiftB, KM.shiftG, KM.shiftR])
corrected_img = scaleandshift2(KM.input_image, T.scale, T.shift)
corrected_image_cv2 = np.clip(
corrected_img, 0, 255).astype(np.uint8)
cv2.namedWindow('corrected', flags=cv2.WINDOW_NORMAL)
cv2.imshow('corrected', corrected_image_cv2)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main())
"""
def batchExtraction(image, batchSideLength):
xSize, ySize, zSize = image.shape
xSizeNew = int(xSize / batchSideLength)
ySizeNew = int(ySize / batchSideLength)
newImage = np.zeros((xSizeNew,ySizeNew,zSize))
for i in range(xSizeNew):
for j in range(ySizeNew):
# create indices for the batches
xlow = i*batchSideLength
xhigh = (i+1)*batchSideLength
ylow = j*batchSideLength
yhigh = (j+1)*batchSideLength
if(i == (xSizeNew-1) ):
xhigh = xSize - 1
if(j == (ySizeNew - 1)):
yhigh = ySize -1
# average the batches
newImage[i, j, 0] = np.mean(image[xlow:xhigh, ylow:yhigh, 0])
newImage[i, j, 1] = np.mean(image[xlow:xhigh, ylow:yhigh, 1])
newImage[i, j, 2] = np.mean(image[xlow:xhigh, ylow:yhigh, 2])
return newImage
input_img = cv2.imread("test_images/pic3.jpg", cv2.IMREAD_UNCHANGED)
#input_img_converted = getimgdatapts(input_img)
#print(input_img_converted.shape)
width, height, channels = input_img.shape
trial = cv2.resize(input_img, (0, 0), fx=0.1, fy=0.1)
print(trial.shape)
# blur image using gaussian:
blurG = cv2.GaussianBlur(trial, (5,5), 0)
# blur image using median:
blurM = cv2.medianBlur(trial, 5)
# plot both blurred images
blurBoth = np.concatenate((blurG, blurM), axis=1)
# apply kmeans on blurred image:
# number of centers for kmeans
n_centers = 6
kmc = KMeans(n_clusters=n_centers, init='k-means++', max_iter=20)
trial_converted = getimgdatapts(blurM)
kmc.fit(trial_converted)
trained_centers = kmc.cluster_centers_
labels = kmc.labels_
# print centers and counts
labelcount = Counter()
for i in np.arange(n_centers):
labelcount[i] = np.sum(labels == i)
print(labelcount)
print(trained_centers)
print(kmc.cluster_centers_[1]/255)
str0 = tuple([kmc.cluster_centers_[0,2],kmc.cluster_centers_[0,1],kmc.cluster_centers_[0,0]])
str1 = tuple([kmc.cluster_centers_[1,2],kmc.cluster_centers_[1,1],kmc.cluster_centers_[1,0]])
str2 = tuple([kmc.cluster_centers_[2,2],kmc.cluster_centers_[2,1],kmc.cluster_centers_[2,0]])
str3 = tuple([kmc.cluster_centers_[3,2],kmc.cluster_centers_[3,1],kmc.cluster_centers_[3,0]])
str4 = tuple([kmc.cluster_centers_[4,2],kmc.cluster_centers_[4,1],kmc.cluster_centers_[4,0]])
str5 = tuple([kmc.cluster_centers_[5,2],kmc.cluster_centers_[5,1],kmc.cluster_centers_[5,0]])
print(str1)
image0 = np.zeros((200, 200, 3), np.uint8)
image0[:] = str0
image1 = np.zeros((200, 200, 3), np.uint8)
image1[:] = str1
image2 = np.zeros((200, 200, 3), np.uint8)
image2[:] = str2
image3 = np.zeros((200, 200, 3), np.uint8)
image3[:] = str3
image4 = np.zeros((200, 200, 3), np.uint8)
image4[:] = str4
image5 = np.zeros((200, 200, 3), np.uint8)
image5[:] = str5
labelArray = kmc.labels_
num0 = np.sum(labelArray==0)
num1 = np.sum(labelArray==1)
num2 = np.sum(labelArray==2)
num3 = np.sum(labelArray==3)
num4 = np.sum(labelArray==4)
num5 = np.sum(labelArray==5)
f, axarr = plt.subplots(3, 2)
axarr[0,0].imshow(image0)
axarr[0,0].axis('off')
axarr[0,0].set_title(str(num0))
axarr[0,1].imshow(image1)
axarr[0,1].axis('off')
axarr[0,1].set_title(str(num1))
axarr[1,0].imshow(image2)
axarr[1,0].axis('off')
axarr[1,0].set_title(str(num2))
axarr[1,1].imshow(image3)
axarr[1,1].axis('off')
axarr[1,1].set_title(str(num3))
axarr[2,0].imshow(image4)
axarr[2,0].axis('off')
axarr[2,0].set_title(str(num4))
axarr[2,1].imshow(image5)
axarr[2,1].axis('off')
axarr[2,1].set_title(str(num5))
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
for i in range(kmc.n_clusters):
print(np.sum(labelArray==i))
"""
|
{"hexsha": "c8ba72a1780db77ffdff49350ee858c429118054", "size": 18485, "ext": "py", "lang": "Python", "max_stars_repo_path": "catkin_ws/src/10-lane-control/anti_instagram/sandbox/KMeansTrialczuidema.py", "max_stars_repo_name": "johnson880319/Software", "max_stars_repo_head_hexsha": "045894227f359e0a3a3ec5b7a53f8d1ebc06acdd", "max_stars_repo_licenses": ["CC-BY-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "catkin_ws/src/10-lane-control/anti_instagram/sandbox/KMeansTrialczuidema.py", "max_issues_repo_name": "johnson880319/Software", "max_issues_repo_head_hexsha": "045894227f359e0a3a3ec5b7a53f8d1ebc06acdd", "max_issues_repo_licenses": ["CC-BY-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "catkin_ws/src/10-lane-control/anti_instagram/sandbox/KMeansTrialczuidema.py", "max_forks_repo_name": "johnson880319/Software", "max_forks_repo_head_hexsha": "045894227f359e0a3a3ec5b7a53f8d1ebc06acdd", "max_forks_repo_licenses": ["CC-BY-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2681451613, "max_line_length": 116, "alphanum_fraction": 0.632675142, "include": true, "reason": "import numpy", "num_tokens": 4869}
|
# from fairseq.data import BertDictionary
import os
import torch
import math
import numpy as np
import logging
import itertools
from fairseq import search, utils
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
data_utils,
encoders,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
)
from fairseq.criterions import FairseqCriterion, register_criterion
logger = logging.getLogger(__name__)
@register_task('translation_segment2')
class TranslationSegment2Task(TranslationTask):
def build_generator(self, args):
from .sequence_generator import SequenceGenerator, SequenceGeneratorWithAlignment
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, 'sampling', False)
sampling_topk = getattr(args, 'sampling_topk', -1)
sampling_topp = getattr(args, 'sampling_topp', -1.0)
diverse_beam_groups = getattr(args, 'diverse_beam_groups', -1)
diverse_beam_strength = getattr(args, 'diverse_beam_strength', 0.5),
match_source_len = getattr(args, 'match_source_len', False)
diversity_rate = getattr(args, 'diversity_rate', -1)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError('Provided Search parameters are mutually exclusive.')
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'
if sampling:
search_strategy = search.Sampling(self.target_dictionary, sampling_topk, sampling_topp)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(self.target_dictionary, diversity_rate)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
if getattr(args, 'print_alignment', False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
temperature=getattr(args, 'temperature', 1.),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
search_strategy=search_strategy,
)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:b
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(os.pathsep)
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return SegmentDataset(
src_tokens, src_lengths, self.source_dictionary,
left_pad_source=True, # self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
def load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_datasets.append(
data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
)
logger.info('{} {} {}-{} {} examples'.format(
data_path, split_k, src, tgt, len(src_datasets[-1])
))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
return SegmentDataset(
src_dataset, src_dataset.sizes, src_dict,
tgt_dataset, tgt_dataset.sizes, tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
max_source_positions=max_source_positions,
max_target_positions=max_target_positions,
align_dataset=align_dataset,
remove_token_id=src_dict.eos(),
)
class SegmentDataset(LanguagePairDataset):
def __init__(
self, src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
shuffle=True, input_feeding=True,
remove_eos_from_source=False, append_eos_to_target=False,
align_dataset=None,
append_bos=False,
remove_token_id=None,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.align_dataset = align_dataset
if self.align_dataset is not None:
assert self.tgt_sizes is not None, "Both source and target needed when alignments are provided"
self.append_bos = append_bos
self.remove_token_id = remove_token_id
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][-1] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
concept_padding_mask = np.clip(((src_item == self.src_dict.eos_index).cumsum(-1).cumsum(-1) >= 2), a_min=0, a_max=1)
src_item_list = list(src_item.numpy())
bos_position_id, eos_position_id = \
src_item_list.index(self.src_dict.bos_index), src_item_list.index(self.src_dict.eos_index)
position = [0] + [1] * (eos_position_id - bos_position_id - 1) + \
list(range(2, len(src_item) - eos_position_id + 2))
position = torch.Tensor(position).add(self.src_dict.pad()+1).long()
chunk = []
chunk_id = self.src_dict.pad() + 1
for token in src_item_list:
chunk.append(chunk_id)
if token == self.src_dict.eos_index:
chunk_id += 1
chunk = torch.Tensor(chunk).long()
label = [-1] * len(src_item_list)
if tgt_item is not None:
tgt_item_list = list(tgt_item.numpy())
for idx in range(eos_position_id+1, len(src_item_list)):
if src_item_list[idx] == self.src_dict.eos_index:
continue
elif src_item_list[idx] in tgt_item_list[:-1]:
label[idx] = 1
else:
label[idx] = 0
label = torch.Tensor(label).long()
n_concept = eos_position_id + 1
prototype_in_target = [1 for _ in range(n_concept)]
for src_token in src_item_list[n_concept:]:
if src_token in tgt_item_list[:-1]:
prototype_in_target.append(1)
else:
prototype_in_target.append(0)
prototype_in_target = torch.Tensor(prototype_in_target).long()
else:
label = None
prototype_in_target = None
if self.remove_token_id is not None:
# left_tokens = src_item.ne(self.remove_token_id)
remove_tokens = src_item.eq(self.remove_token_id)
remove_cumsum = remove_tokens.long().cumsum(-1)
left_tokens = ~remove_tokens + remove_cumsum.eq(remove_cumsum.max(-1)[0])
src_item = src_item[left_tokens]
position = position[left_tokens]
chunk = chunk[left_tokens]
if label is not None:
label = label[left_tokens]
if prototype_in_target is not None:
prototype_in_target = prototype_in_target[left_tokens]
concept_padding_mask = concept_padding_mask[left_tokens]
example = {
'id': index,
'source': src_item,
'target': tgt_item,
'segment': chunk,
'position': position,
'chunk': chunk,
'label': label,
'prototype_in_target': prototype_in_target,
'concept_padding_mask': concept_padding_mask
}
if self.align_dataset is not None:
example['alignment'] = self.align_dataset[index]
return example
def collater(self, samples):
return collate(
samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),
left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
)
def collate(
samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,
input_feeding=True,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, merge_pad_idx=pad_idx):
return data_utils.collate_tokens(
[s[key] for s in samples],
merge_pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
def expand_chunk_to_mask(chunks):
chunk_masks = chunks.unsqueeze(1).eq(chunks.unsqueeze(2))
concept_mask = chunks.eq(pad_idx+1)
chunk_masks.add_(concept_mask.unsqueeze(1)).add_(concept_mask.unsqueeze(2))
return ~chunk_masks
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
src_segments = merge('segment', left_pad=left_pad_source)
src_positions = merge('position', left_pad=left_pad_source)
src_chunks = merge('chunk', left_pad=left_pad_source)
src_chunk_masks = expand_chunk_to_mask(src_chunks)
concept_padding_mask = merge('concept_padding_mask', left_pad=left_pad_source, merge_pad_idx=1)
if samples[0].get('label') is not None:
src_labels = merge('label', left_pad=left_pad_source, merge_pad_idx=-1)
else:
src_labels = None
if samples[0].get('prototype_in_target') is not None:
prototype_in_target = merge('prototype_in_target', left_pad=left_pad_source, merge_pad_idx=-1)
else:
prototype_in_target = None
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
src_segments = src_segments.index_select(0, sort_order)
src_positions = src_positions.index_select(0, sort_order)
src_chunks = src_chunks.index_select(0, sort_order)
concept_padding_mask = concept_padding_mask.index_select(0, sort_order).bool()
if src_labels is not None:
src_labels = src_labels.index_select(0, sort_order)
if prototype_in_target is not None:
prototype_in_target = prototype_in_target.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
tgt_lengths = torch.LongTensor([s['target'].numel() for s in samples]).index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s['source']) for s in samples)
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
'src_segments': src_segments,
'src_positions': src_positions,
'src_chunks': src_chunks,
'src_chunk_masks': src_chunk_masks,
'concept_padding_mask': concept_padding_mask,
},
'target': target,
'src_label': src_labels
}
if src_labels is not None:
batch['src_label'] = src_labels
if prototype_in_target is not None:
batch['net_input']['prototype_in_target'] = prototype_in_target
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch
|
{"hexsha": "da121e9daa82b6144ae5304ad550de126d04b79a", "size": 18238, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/translation_segment2.py", "max_stars_repo_name": "a414351664/TRAB-IKE", "max_stars_repo_head_hexsha": "3dd07221e1854c974127d7f6d0d95779a25166c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-03T10:22:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-03T10:22:07.000Z", "max_issues_repo_path": "src/translation_segment2.py", "max_issues_repo_name": "LibertFan/EKI-BART", "max_issues_repo_head_hexsha": "b822384cf9d4aa9adda46f7f306c024782fa5f15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/translation_segment2.py", "max_forks_repo_name": "LibertFan/EKI-BART", "max_forks_repo_head_hexsha": "b822384cf9d4aa9adda46f7f306c024782fa5f15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8008948546, "max_line_length": 124, "alphanum_fraction": 0.637953723, "include": true, "reason": "import numpy", "num_tokens": 4054}
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monailabel.deepedit.multilabel.transforms import (
FindDiscrepancyRegionsCustomd,
PosNegClickProbAddRandomGuidanceCustomd,
)
from monailabel.deepedit.transforms import (
AddRandomGuidanced,
DiscardAddGuidanced,
PosNegClickProbAddRandomGuidanced,
ResizeGuidanceCustomd,
SingleLabelSingleModalityd,
)
IMAGE = np.array([[[[1, 0, 2, 0, 1], [0, 1, 2, 1, 0], [2, 2, 3, 2, 2], [0, 1, 2, 1, 0], [1, 0, 2, 0, 1]]]])
LABEL = np.array([[[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 1, 0], [0, 0, 0, 0, 0]]]])
MULTIMODALITY_IMAGE = np.random.rand(5, 5, 5)
MULTI_LABEL = np.random.randint(0, 6, (5, 5))
PRED = np.random.randint(0, 6, (5, 5))
DATA_1 = {
"image": IMAGE,
"label": LABEL,
"image_meta_dict": {
"dim": np.array(IMAGE.shape),
"pixdim": [1, 1, 1, 5, 1, 1, 1, 1],
"filename_or_obj": "IMAGE_NAME",
},
"label_meta_dict": {},
"foreground": [0, 0, 0],
"background": [0, 0, 0],
}
DISCARD_ADD_GUIDANCE_TEST_CASE = [
{"image": IMAGE, "label": LABEL},
DATA_1,
# Image
[
[
[
[1.0, 0.0, 2.0, 0.0, 1.0],
[0.0, 1.0, 2.0, 1.0, 0.0],
[2.0, 2.0, 3.0, 2.0, 2.0],
[0.0, 1.0, 2.0, 1.0, 0.0],
[1.0, 0.0, 2.0, 0.0, 1.0],
]
],
# Positive clicks in zeros
[
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
],
# Negative clicks in zeros
[
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
],
],
]
DATA_2 = {
"image": IMAGE,
"label": LABEL,
"guidance": np.array([[[1, 0, 2, 2]], [[-1, -1, -1, -1]]]),
"discrepancy": np.array(
[
[[[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
]
),
"probability": 1.0,
}
CLICK_RATIO_ADD_RANDOM_GUIDANCE_TEST_CASE_1 = [
{"guidance": "guidance", "discrepancy": "discrepancy", "probability": "probability"},
DATA_2,
"[[[1, 0, 2, 2], [-1, -1, -1, -1]], [[-1, -1, -1, -1], [1, 0, 2, 1]]]",
]
ADD_RANDOM_GUIDANCE_TEST_CASE_1 = [
{"guidance": "guidance", "discrepancy": "discrepancy", "probability": "probability"},
DATA_2,
"[[[1, 0, 2, 2], [1, 0, 1, 3]], [[-1, -1, -1, -1], [-1, -1, -1, -1]]]",
]
DATA_3 = {
"image": np.arange(1000).reshape((1, 5, 10, 20)),
"image_meta_dict": {
"foreground_cropped_shape": (1, 10, 20, 40),
"dim": [3, 512, 512, 128],
"spatial_shape": [512, 512, 128],
},
"guidance": [[[6, 10, 14], [8, 10, 14]], [[8, 10, 16]]],
"foreground": [[10, 14, 6], [10, 14, 8]],
"background": [[10, 16, 8]],
}
RESIZE_GUIDANCE_TEST_CASE_1 = [
{"ref_image": "image", "guidance": "guidance"},
DATA_3,
[[[0, 0, 0], [0, 0, 1]], [[0, 0, 1]]],
]
DATA_4 = {
"image": MULTIMODALITY_IMAGE,
"label": MULTI_LABEL,
"image_meta_dict": {
"dim": [1, 1, 1, 5, 2, 1, 1, 1],
"pixdim": [1, 1, 1, 5, 2, 1, 1, 1],
"filename_or_obj": "IMAGE_NAME",
},
"label_meta_dict": {
"dim": [1, 1, 1, 5, 2, 1, 1, 1],
"pixdim": [1, 1, 1, 5, 2, 1, 1, 1],
"filename_or_obj": "LABEL_NAME",
},
}
SINGLE_LABEL_SINGLE_MODALITY_TEST_CASE_1 = [
{"keys": ("image", "label")},
DATA_4,
(5, 5),
]
LABEL_NAMES = {
"spleen": 1,
"right kidney": 2,
"background": 0,
}
DATA_5 = {
"image": IMAGE,
"label": MULTI_LABEL,
"guidance": {
"spleen": np.array([[[1, 0, 2, 2], [-1, -1, -1, -1]]]),
"right kidney": np.array([[[1, 0, 2, 2], [-1, -1, -1, -1]]]),
"background": np.array([[[1, 0, 2, 2], [-1, -1, -1, -1]]]),
},
"discrepancy": {
"spleen": np.array(
[
[[[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
]
),
"right kidney": np.array(
[
[[[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
]
),
"background": np.array(
[
[[[[0, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
[[[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]]],
]
),
},
"probability": 1.0,
"label_names": LABEL_NAMES,
}
PosNegClickProbAddRandomGuidanceCustomd_TEST_CASE = [
{"guidance": "guidance", "discrepancy": "discrepancy", "probability": "probability"},
DATA_5,
{
"spleen": "[[[1, 0, 2, 2], [-1, -1, -1, -1], [1, 0, 1, 3]]]",
"right kidney": "[[[1, 0, 2, 2], [-1, -1, -1, -1], [1, 0, 1, 3]]]",
"background": "[[[1, 0, 2, 2], [-1, -1, -1, -1], [1, 0, 1, 3]]]",
},
]
DATA_6 = {
"image": IMAGE,
"label": MULTI_LABEL,
"guidance": {
"spleen": np.array([[[1, 0, 2, 2], [-1, -1, -1, -1]]]),
"right kidney": np.array([[[1, 0, 2, 2], [-1, -1, -1, -1]]]),
"background": np.array([[[1, 0, 2, 2], [-1, -1, -1, -1]]]),
},
"probability": 1.0,
"label_names": LABEL_NAMES,
"pred": PRED,
}
FindDiscrepancyRegionsCustomd_TEST_CASE = [
{"discrepancy": "discrepancy"},
DATA_6,
(5, 5),
]
# When checking tensor content use np.testing.assert_equal(result["image"], expected_values)
class TestDiscardAddGuidanced(unittest.TestCase):
@parameterized.expand([DISCARD_ADD_GUIDANCE_TEST_CASE])
def test_correct_results(self, arguments, input_data, expected_result):
add_fn = DiscardAddGuidanced(arguments)
result = add_fn(input_data)
np.testing.assert_equal(result["image"], expected_result)
class TestClickRatioAddRandomGuidanced(unittest.TestCase):
@parameterized.expand([CLICK_RATIO_ADD_RANDOM_GUIDANCE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
seed = 0
add_fn = PosNegClickProbAddRandomGuidanced(**arguments)
add_fn.set_random_state(seed)
result = add_fn(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestAddRandomGuidanced(unittest.TestCase):
@parameterized.expand([ADD_RANDOM_GUIDANCE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
seed = 0
add_fn = AddRandomGuidanced(**arguments)
add_fn.set_random_state(seed)
result = add_fn(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestResizeGuidanced(unittest.TestCase):
@parameterized.expand([RESIZE_GUIDANCE_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = ResizeGuidanceCustomd(**arguments)(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestSingleLabelSingleModalityd(unittest.TestCase):
@parameterized.expand([SINGLE_LABEL_SINGLE_MODALITY_TEST_CASE_1])
def test_correct_results(self, arguments, input_data, expected_result):
result = SingleLabelSingleModalityd(**arguments)(input_data)
self.assertEqual(result["image"].shape, expected_result)
# Tests for transforms used in multilabel deepedit
class TestPosNegClickProbAddRandomGuidanceCustomd(unittest.TestCase):
@parameterized.expand([PosNegClickProbAddRandomGuidanceCustomd_TEST_CASE])
def test_correct_results(self, arguments, input_data, expected_result):
seed = 0
add_fn = PosNegClickProbAddRandomGuidanceCustomd(keys="NA", **arguments)
add_fn.set_random_state(seed)
result = add_fn(input_data)
self.assertEqual(result[arguments["guidance"]], expected_result)
class TestFindDiscrepancyRegionsCustomd(unittest.TestCase):
@parameterized.expand([FindDiscrepancyRegionsCustomd_TEST_CASE])
def test_correct_results(self, arguments, input_data, expected_result):
add_fn = FindDiscrepancyRegionsCustomd(keys="label", **arguments)
result = add_fn(input_data)
self.assertEqual(result["discrepancy"]["spleen"][0].shape, expected_result)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "683cd600bce3888a3749e6eab575bfff6c8ae02a", "size": 9533, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/deepedit/test_transforms.py", "max_stars_repo_name": "IntroAI-termproject/MONAILabel", "max_stars_repo_head_hexsha": "6a0fcc797e24aff1a1582088bae71973b2b6582e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-06T15:08:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T12:19:53.000Z", "max_issues_repo_path": "tests/unit/deepedit/test_transforms.py", "max_issues_repo_name": "IntroAI-termproject/MONAILabel", "max_issues_repo_head_hexsha": "6a0fcc797e24aff1a1582088bae71973b2b6582e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/unit/deepedit/test_transforms.py", "max_forks_repo_name": "IntroAI-termproject/MONAILabel", "max_forks_repo_head_hexsha": "6a0fcc797e24aff1a1582088bae71973b2b6582e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5669014085, "max_line_length": 107, "alphanum_fraction": 0.5452638204, "include": true, "reason": "import numpy", "num_tokens": 3558}
|
from functools import partial
from typing import Optional, Dict
import datasets
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import BertTokenizerFast
from rouge_score import rouge_scorer
class RACEDataModuleForTinyChoice(pl.LightningDataModule):
def __init__(
self,
model_name_or_path: str = 'bert-large-uncased',
datasets_loader: str = 'race',
task_name: str = 'all',
max_seq_length: int = 640,
train_batch_size: int = 32,
eval_batch_size: int = 32,
num_workers: int = 8,
num_preprocess_processes: int = 8,
**kwargs
):
super().__init__()
self.model_name_or_path = model_name_or_path
self.dataset_loader = datasets_loader
self.task_name = task_name
self.max_seq_length = max_seq_length
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.num_workers = num_workers
self.num_preprocess_processes = num_preprocess_processes
self.tokenizer = BertTokenizerFast.from_pretrained(self.model_name_or_path, use_fast=True, do_lower_case=True)
self.dataset = None
def setup(self, stage: Optional[str] = None):
self.dataset = datasets.load_dataset(self.dataset_loader, self.task_name)
preprocessor = partial(self.preprocess, self.tokenizer, self.max_seq_length, )
for split in self.dataset.keys():
self.dataset[split] = self.dataset[split].map(
preprocessor,
# batched=True,
remove_columns=['example_id'],
num_proc=self.num_preprocess_processes,
keep_in_memory=True,
)
self.dataset[split].set_format(type='torch',
columns=['input_ids', 'token_type_ids', 'attention_mask', 'position_ids',
'label', 'indices'])
def prepare_data(self):
datasets.load_dataset(self.dataset_loader, self.task_name)
BertTokenizerFast.from_pretrained(self.model_name_or_path, use_fast=True)
def train_dataloader(self):
return DataLoader(self.dataset['train'],
sampler=RandomSampler(self.dataset['train']),
batch_size=self.train_batch_size,
num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(self.dataset['validation'],
sampler=SequentialSampler(self.dataset['validation']),
batch_size=self.eval_batch_size,
num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.dataset['test'],
sampler=SequentialSampler(self.dataset['test']),
batch_size=self.eval_batch_size,
num_workers=self.num_workers)
# auto cache tokens
@staticmethod
def preprocess(tokenizer: BertTokenizerFast, max_seq_length: int, x: Dict) -> Dict:
label_map = {"A": 0, "B": 1, "C": 2, "D": 3}
all_input_ids = np.zeros(640, dtype=int)
attention_mask = np.zeros(640, dtype=int)
qa_input_ids = tokenizer(f'[SEP] {x["question"]}' + ' [SEP] [CLS] '.join([''] + x['options']) + ' [SEP]',
add_special_tokens=False)['input_ids']
article_input_ids = tokenizer(x['article'], truncation=True, max_length=min(511, 640 - len(qa_input_ids)),
add_special_tokens=False)['input_ids']
all_input_ids[0:len(article_input_ids)] = article_input_ids
all_input_ids[len(article_input_ids):len(article_input_ids) + len(qa_input_ids)] = qa_input_ids
attention_mask[0:len(article_input_ids) + len(qa_input_ids)] = 1
sep_indices = np.where(np.array(all_input_ids) == 102)[0]
cls_indices = np.where(np.array(all_input_ids) == 101)[0]
assert len(cls_indices) == 4
relative_position_ids = np.arange(512)
position_ids = np.zeros(640, dtype=int)
token_type_ids = np.ones(640, dtype=int)
token_type_ids[0:sep_indices[0] + 1] = 0
position_ids[0:sep_indices[0] + 1] = relative_position_ids[0:sep_indices[0] + 1]
for i in range(5):
position_ids[sep_indices[i] + 1:sep_indices[i + 1] + 1] = relative_position_ids[0:sep_indices[i + 1] - sep_indices[i]]
labels = label_map.get(x["answer"], -1)
label = torch.tensor(labels).long()
return {
"label": label,
"input_ids": torch.tensor(all_input_ids).long(),
"attention_mask": torch.tensor(attention_mask).long(),
"token_type_ids": torch.tensor(token_type_ids).long(),
"position_ids": torch.tensor(position_ids).long(),
"indices": torch.tensor(cls_indices).long(),
}
if __name__ == '__main__':
dm = RACEDataModuleForTinyChoice(train_batch_size=32)
dm.setup('train')
d = (next(iter(dm.test_dataloader())))
print(d)
|
{"hexsha": "f367ba802002656eb97a937ba79702a116d026d6", "size": 5244, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/RACEDataModuleForTinyChoice.py", "max_stars_repo_name": "iamNCJ/bert-race-pytorch-lightning", "max_stars_repo_head_hexsha": "93abcc5d5c80790e16114fb021870593cb60f1f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-05-12T02:15:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T15:02:50.000Z", "max_issues_repo_path": "data/RACEDataModuleForTinyChoice.py", "max_issues_repo_name": "iamNCJ/bert-race-pytorch-lightning", "max_issues_repo_head_hexsha": "93abcc5d5c80790e16114fb021870593cb60f1f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/RACEDataModuleForTinyChoice.py", "max_forks_repo_name": "iamNCJ/bert-race-pytorch-lightning", "max_forks_repo_head_hexsha": "93abcc5d5c80790e16114fb021870593cb60f1f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.7, "max_line_length": 130, "alphanum_fraction": 0.6161327231, "include": true, "reason": "import numpy", "num_tokens": 1139}
|
# Copyright 2016-2020 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/caliban-toolbox/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tempfile
import pytest
import numpy as np
import xarray as xr
from caliban_toolbox import reshape_data
from caliban_toolbox.utils import crop_utils, io_utils
from caliban_toolbox.utils.crop_utils_test import _blank_data_xr
def test_crop_multichannel_data():
# img params
fov_len, stack_len, crop_num, slice_num, row_len = 2, 1, 1, 1, 200
col_len, channel_len = 200, 1
crop_size = (50, 50)
overlap_frac = 0.2
# test only one crop
test_X_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num, row_len=row_len, col_len=col_len,
chan_len=channel_len)
test_y_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num, row_len=row_len, col_len=col_len,
chan_len=channel_len, last_dim_name='compartments')
X_data_cropped, y_data_cropped, log_data = \
reshape_data.crop_multichannel_data(X_data=test_X_data,
y_data=test_y_data,
crop_size=crop_size,
overlap_frac=overlap_frac,
test_parameters=False)
expected_crop_num = len(crop_utils.compute_crop_indices(img_len=row_len,
crop_size=crop_size[0],
overlap_frac=overlap_frac)[0]) ** 2
assert (X_data_cropped.shape == (fov_len, stack_len, expected_crop_num, slice_num,
crop_size[0], crop_size[1], channel_len))
assert log_data["num_crops"] == expected_crop_num
# invalid arguments
# no crop_size or crop_num
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data)
# both crop_size and crop_num
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_size=(20, 20), crop_num=(20, 20))
# bad crop_size dtype
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_size=5)
# bad crop_size shape
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_size=(10, 5, 2))
# bad crop_size values
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_size=(0, 5))
# bad crop_size values
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_size=(1.5, 5))
# bad crop_num dtype
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_num=5)
# bad crop_num shape
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_num=(10, 5, 2))
# bad crop_num values
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_num=(0, 5))
# bad crop_num values
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
crop_num=(1.5, 5))
# bad overlap_frac value
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data,
overlap_frac=1.2)
# bad X_data dims
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data[0], y_data=test_y_data,
crop_size=(5, 5))
# bad y_data dims
with pytest.raises(ValueError):
_ = reshape_data.crop_multichannel_data(X_data=test_X_data, y_data=test_y_data[0],
crop_num=(5, 5))
def test_create_slice_data():
# test output shape with even division of slice
fov_len, stack_len, num_crops, num_slices, row_len, col_len, chan_len = 1, 40, 1, 1, 50, 50, 3
slice_stack_len = 4
X_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=num_crops,
slice_num=num_slices, row_len=row_len, col_len=col_len,
chan_len=chan_len)
y_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=num_crops,
slice_num=num_slices, row_len=row_len, col_len=col_len,
chan_len=chan_len, last_dim_name='compartments')
X_slice, y_slice, slice_indices = reshape_data.create_slice_data(X_data, y_data,
slice_stack_len)
assert X_slice.shape == (fov_len, slice_stack_len, num_crops,
int(np.ceil(stack_len / slice_stack_len)),
row_len, col_len, chan_len)
def test_reconstruct_image_stack():
with tempfile.TemporaryDirectory() as temp_dir:
# generate stack of crops from image with grid pattern
(fov_len, stack_len, crop_num,
slice_num, row_len, col_len, chan_len) = 2, 1, 1, 1, 400, 400, 4
X_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num,
row_len=row_len, col_len=col_len, chan_len=chan_len)
y_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num,
row_len=row_len, col_len=col_len, chan_len=1,
last_dim_name='compartments')
# create image with artificial objects to be segmented
cell_idx = 1
for i in range(12):
for j in range(11):
for fov in range(y_data.shape[0]):
y_data[fov, :, :, :, (i * 35):(i * 35 + 10 + fov * 10),
(j * 37):(j * 37 + 8 + fov * 10), 0] = cell_idx
cell_idx += 1
# Crop the data
crop_size, overlap_frac = 100, 0.2
X_cropped, y_cropped, log_data = \
reshape_data.crop_multichannel_data(X_data=X_data,
y_data=y_data,
crop_size=(crop_size, crop_size),
overlap_frac=overlap_frac)
io_utils.save_npzs_for_caliban(X_data=X_cropped, y_data=y_cropped, original_data=X_data,
log_data=log_data, save_dir=temp_dir)
stitched_imgs = reshape_data.reconstruct_image_stack(crop_dir=temp_dir)
# dims are the same
assert np.all(stitched_imgs.shape == y_data.shape)
# all the same pixels are marked
assert (np.all(np.equal(stitched_imgs[:, :, 0] > 0, y_data[:, :, 0] > 0)))
# there are the same number of cells
assert (len(np.unique(stitched_imgs)) == len(np.unique(y_data)))
with tempfile.TemporaryDirectory() as temp_dir:
# generate data with the corner tagged
fov_len, stack_len, crop_num, slice_num = 1, 40, 1, 1
row_len, col_len, chan_len = 50, 50, 3
slice_stack_len = 4
X_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num,
row_len=row_len, col_len=col_len, chan_len=chan_len)
y_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num,
row_len=row_len, col_len=col_len, chan_len=1,
last_dim_name='compartments')
# tag upper left hand corner of the label in each image
tags = np.arange(stack_len)
y_data[0, :, 0, 0, 0, 0, 0] = tags
X_slice, y_slice, slice_log_data = \
reshape_data.create_slice_data(X_data=X_data,
y_data=y_data,
slice_stack_len=slice_stack_len)
io_utils.save_npzs_for_caliban(X_data=X_slice, y_data=y_slice, original_data=X_data,
log_data={**slice_log_data}, save_dir=temp_dir,
blank_labels="include",
save_format="npz", verbose=False)
stitched_imgs = reshape_data.reconstruct_image_stack(temp_dir)
assert np.all(stitched_imgs.shape == y_data.shape)
assert np.all(np.equal(stitched_imgs[0, :, 0, 0, 0, 0, 0], tags))
with tempfile.TemporaryDirectory() as temp_dir:
# generate data with both corners tagged and images labeled
(fov_len, stack_len, crop_num,
slice_num, row_len, col_len, chan_len) = 1, 8, 1, 1, 400, 400, 4
X_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num,
row_len=row_len, col_len=col_len, chan_len=chan_len)
y_data = _blank_data_xr(fov_len=fov_len, stack_len=stack_len, crop_num=crop_num,
slice_num=slice_num,
row_len=row_len, col_len=col_len, chan_len=1,
last_dim_name='compartments')
# create image with artificial objects to be segmented
cell_idx = 1
for i in range(1, 12):
for j in range(1, 11):
for stack in range(stack_len):
y_data[:, stack, :, :, (i * 35):(i * 35 + 10 + stack * 2),
(j * 37):(j * 37 + 8 + stack * 2), 0] = cell_idx
cell_idx += 1
# tag upper left hand corner of each image with squares of increasing size
for stack in range(stack_len):
y_data[0, stack, 0, 0, :stack, :stack, 0] = 1
# Crop the data
crop_size, overlap_frac = 100, 0.2
X_cropped, y_cropped, log_data = \
reshape_data.crop_multichannel_data(X_data=X_data,
y_data=y_data,
crop_size=(crop_size, crop_size),
overlap_frac=overlap_frac)
X_slice, y_slice, slice_log_data = \
reshape_data.create_slice_data(X_data=X_cropped,
y_data=y_cropped,
slice_stack_len=slice_stack_len)
io_utils.save_npzs_for_caliban(X_data=X_slice, y_data=y_slice, original_data=X_data,
log_data={**slice_log_data, **log_data},
save_dir=temp_dir,
blank_labels="include",
save_format="npz", verbose=False)
stitched_imgs = reshape_data.reconstruct_image_stack(temp_dir)
assert np.all(stitched_imgs.shape == y_data.shape)
# dims are the same
assert np.all(stitched_imgs.shape == y_data.shape)
# all the same pixels are marked
assert (np.all(np.equal(stitched_imgs[:, :, 0] > 0, y_data[:, :, 0] > 0)))
# there are the same number of cells
assert (len(np.unique(stitched_imgs)) == len(np.unique(y_data)))
# check mark in upper left hand corner of image
for stack in range(stack_len):
original = np.zeros((10, 10))
original[:stack, :stack] = 1
new = stitched_imgs[0, stack, 0, 0, :10, :10, 0]
assert np.array_equal(original > 0, new > 0)
|
{"hexsha": "e89fc800a73404a1babfdd27c80afa283b42f0eb", "size": 13796, "ext": "py", "lang": "Python", "max_stars_repo_path": "caliban_toolbox/reshape_data_test.py", "max_stars_repo_name": "vanvalenlab/deepcell-data-engineering", "max_stars_repo_head_hexsha": "a0dca3839f341841cb4a983923fd0eac21225736", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-02-25T21:08:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-15T14:22:31.000Z", "max_issues_repo_path": "caliban_toolbox/reshape_data_test.py", "max_issues_repo_name": "vanvalenlab/caliban-toolbox", "max_issues_repo_head_hexsha": "a0dca3839f341841cb4a983923fd0eac21225736", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 71, "max_issues_repo_issues_event_min_datetime": "2020-02-18T22:59:31.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-06T19:19:02.000Z", "max_forks_repo_path": "caliban_toolbox/reshape_data_test.py", "max_forks_repo_name": "vanvalenlab/deepcell-data-engineering", "max_forks_repo_head_hexsha": "a0dca3839f341841cb4a983923fd0eac21225736", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-05T21:43:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-05T21:43:32.000Z", "avg_line_length": 46.925170068, "max_line_length": 98, "alphanum_fraction": 0.573861989, "include": true, "reason": "import numpy", "num_tokens": 3093}
|
[STATEMENT]
lemma max_depth_0: "max_depth \<phi> = 0 = (\<exists>n. \<phi> = Atom n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (max_depth \<phi> = 0) = (\<exists>n. \<phi> = Atom n)
[PROOF STEP]
by (cases \<phi>) auto
|
{"llama_tokens": 101, "file": "Abstract_Completeness_Propositional_Logic", "length": 1}
|
import json
import os
import numpy as np
from utils.FScore import F1Score
from Identification.LoadDescriptors import loadDescriptors
from Identification.PreprocessingDescriptors import preprocessDescriptors
from Identification.svm import trainSVM
__author__ = 'andres'
format_extension = '.json'
# DescribeSounds.get(folder_of_test_sounds, extension_of_test_sounds)
def load_description(input_dir, maximum='Inf', reverbs=True):
data_details = {}
maximum = float(maximum)
count = 0
for path, directory_name, file_names in os.walk(input_dir):
for file_name in file_names:
if format_extension in file_name.lower():
if not reverbs and (file_name[0:3] == 'R1_'):
continue
cname, sname = path.split('/')[-2], path.split('/')[-1]
second_key = sname.split('.')[0]
if second_key not in data_details:
data_details[second_key] = {}
fDict = json.load(open(os.path.join(cname, sname, file_name), 'r'))
data_details[second_key][file_name] = {'file': file_name, 'feature': fDict}
count += 1
if count >= maximum:
break
return data_details
def classify_new_sounds(folder_of_test_sounds, target_class):
new_descriptors = {target_class: load_description(folder_of_test_sounds)}
new_normalized_features, new_y_class, new_features_names = preprocessDescriptors(new_descriptors)
new_y_class = np.array([target_class]*len(new_y_class))
descriptors = loadDescriptors(maximum='Inf', reverbs=True)
normalized_features, yClass, features_names = preprocessDescriptors(descriptors)
clf = trainSVM(normalized_features, yClass, call=True)
F1 = F1Score(new_normalized_features, new_y_class, clf)
return F1
F1 = classify_new_sounds('testBass', 'Bass')
|
{"hexsha": "47d75ed17ff3c0867ac44ebbbe8a718bb17ff056", "size": 1884, "ext": "py", "lang": "Python", "max_stars_repo_path": "Evaluation/testNewSounds.py", "max_stars_repo_name": "andimarafioti/AIAMI", "max_stars_repo_head_hexsha": "37ea2bf61e85bf879a0f4a1014f2e93b87301582", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-02-03T10:20:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-05T13:06:46.000Z", "max_issues_repo_path": "Evaluation/testNewSounds.py", "max_issues_repo_name": "adi-797/AIAMI", "max_issues_repo_head_hexsha": "37ea2bf61e85bf879a0f4a1014f2e93b87301582", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-07-20T15:20:54.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-21T11:40:58.000Z", "max_forks_repo_path": "Evaluation/testNewSounds.py", "max_forks_repo_name": "adi-797/AIAMI", "max_forks_repo_head_hexsha": "37ea2bf61e85bf879a0f4a1014f2e93b87301582", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-03-09T12:54:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T13:06:49.000Z", "avg_line_length": 35.5471698113, "max_line_length": 101, "alphanum_fraction": 0.686836518, "include": true, "reason": "import numpy", "num_tokens": 421}
|
"""
Predicting the good/bad classification of depth pixels, based solely on the current view.
This network returns the depth estimate and the trust both.
"""
import math
import numpy as np
import torch
from experiment_handler import ExperimentHandler
from network_architectures.network import Network, UNet
class DepthMapInitialTrust(Network):
"""
Network to classify trust on the input image. It should return a good/bad rating for each pixel.
Input: center color image and center depth map
Output: trust image for the first camera
"""
def __init__(self, F=8, depth_scale=1, scale_augmentation=2.0, file=None):
super().__init__()
gpu = torch.device('cuda')
self.depth_scale = depth_scale
self.scale_augmentation = scale_augmentation
if file is not None:
pt = ExperimentHandler.load_experiment_from_file(file).network
# classification between good/bad pixels using a UNet
self.process = pt.process
self.sigmoid = pt.sigmoid
return
# classification between good/bad pixels using a UNet
self.process = UNet(4, F, 1, depth=2, batchnorms=False).to(gpu)
self.sigmoid = torch.nn.Sigmoid().to(gpu)
def get_network_name(self):
return "DepthMapInitialTrust"
def forward(self, color, depth):
depth_scale = self.depth_scale
scale_augmentation = self.scale_augmentation
if scale_augmentation > 1:
B = color.shape[0]
augmentation = scale_augmentation**torch.Tensor(np.random.rand(B,1,1,1)*2-1).cuda()
depth_scale = depth_scale * augmentation
scaled_depth = depth / depth_scale
if color.max() > 1:
color = color / 255.0
network_input = torch.cat((color, scaled_depth), dim=1)
output = self.process(network_input)
trust = self.sigmoid(output)
return (depth, trust)
|
{"hexsha": "4e44d931f367626d0ad2be7fee437d11bcfe03bc", "size": 1951, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/network_architectures/DepthMapInitialTrust.py", "max_stars_repo_name": "simon-donne/defusr", "max_stars_repo_head_hexsha": "fa4275070af4024eea128e99d7c6df2358d129a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2019-04-08T20:24:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-22T22:16:13.000Z", "max_issues_repo_path": "code/network_architectures/DepthMapInitialTrust.py", "max_issues_repo_name": "simon-donne/defusr", "max_issues_repo_head_hexsha": "fa4275070af4024eea128e99d7c6df2358d129a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-07-22T05:30:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-27T05:36:52.000Z", "max_forks_repo_path": "code/network_architectures/DepthMapInitialTrust.py", "max_forks_repo_name": "simon-donne/defusr", "max_forks_repo_head_hexsha": "fa4275070af4024eea128e99d7c6df2358d129a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-05-01T22:22:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-24T07:19:13.000Z", "avg_line_length": 33.0677966102, "max_line_length": 100, "alphanum_fraction": 0.6647872886, "include": true, "reason": "import numpy", "num_tokens": 437}
|
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import sys, os, string
characters = string.printable
char_indices = dict((c, i) for i, c in enumerate(characters))
indices_char = dict((i, c) for i, c in enumerate(characters))
INPUT_VOCAB_SIZE = len(characters)
WINDOW_SIZE = 3
def encode_one_hot(line):
line = " " + line + " "
x = np.zeros((len(line), INPUT_VOCAB_SIZE))
for i, c in enumerate(line):
index = char_indices[c] if c in characters else char_indices[' ']
x[i][index] = 1
return x
def decode_one_hot(x):
s = []
for onehot in x:
one_index = np.argmax(onehot)
s.append(indices_char[one_index])
return ''.join(s)
def prepare_for_window(x):
# All slices of size WINDOW_SIZE, sliding through x
ind = [np.array(np.arange(i, i+WINDOW_SIZE)) for i in range(x.shape[0] - WINDOW_SIZE + 1)]
ind = np.array(ind, dtype=np.int32)
x_window = x[ind]
# Reshape it back to a 2-d tensor
return x_window.reshape(x_window.shape[0], x_window.shape[1]*x_window.shape[2])
def normalization_layer_set_weights(n_layer):
wb = []
w = np.zeros((WINDOW_SIZE*INPUT_VOCAB_SIZE, INPUT_VOCAB_SIZE))
b = np.zeros((INPUT_VOCAB_SIZE))
# Let lower case letters go through
for c in string.ascii_lowercase:
i = char_indices[c]
w[INPUT_VOCAB_SIZE+i, i] = 1
# Map capitals to lower case
for c in string.ascii_uppercase:
i = char_indices[c]
il = char_indices[c.lower()]
w[INPUT_VOCAB_SIZE+i, il] = 1
# Map all non-letters to space
sp_idx = char_indices[' ']
non_letters = [c for c in list(characters) if c not in list(string.ascii_letters)]
for c in non_letters:
i = char_indices[c]
w[INPUT_VOCAB_SIZE+i, sp_idx] = 1
# Map single letters to space
for c in non_letters:
i = char_indices[c]
w[i, sp_idx] = 0.75
w[INPUT_VOCAB_SIZE*2+i, sp_idx] = 0.75
wb.append(w)
wb.append(b)
n_layer.set_weights(wb)
return n_layer
def build_model():
# Normalize characters using a dense layer
model = Sequential()
model.add(Dense(INPUT_VOCAB_SIZE,
input_shape=(WINDOW_SIZE*INPUT_VOCAB_SIZE,),
activation='softmax'))
return model
model = build_model()
model.summary()
normalization_layer_set_weights(model.layers[0])
with open(sys.argv[1]) as f:
for line in f:
if line.isspace(): continue
batch = prepare_for_window(encode_one_hot(line))
preds = model.predict(batch)
normal = decode_one_hot(preds)
print(normal)
|
{"hexsha": "6b6e0f1ba0c385b1e103e98d11457bf825b6f3b7", "size": 2646, "ext": "py", "lang": "Python", "max_stars_repo_path": "39-sliding-window/tf-39.py", "max_stars_repo_name": "cyyeh/exercises-in-programming-style", "max_stars_repo_head_hexsha": "70c4c3a346c77eb207c247b8c717d490099cce4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1821, "max_stars_repo_stars_event_min_datetime": "2015-01-05T11:05:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T18:56:40.000Z", "max_issues_repo_path": "39-sliding-window/tf-39.py", "max_issues_repo_name": "cyyeh/exercises-in-programming-style", "max_issues_repo_head_hexsha": "70c4c3a346c77eb207c247b8c717d490099cce4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2015-02-01T00:06:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-30T11:53:43.000Z", "max_forks_repo_path": "39-sliding-window/tf-39.py", "max_forks_repo_name": "cyyeh/exercises-in-programming-style", "max_forks_repo_head_hexsha": "70c4c3a346c77eb207c247b8c717d490099cce4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 512, "max_forks_repo_forks_event_min_datetime": "2015-01-04T09:56:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T17:11:33.000Z", "avg_line_length": 31.5, "max_line_length": 94, "alphanum_fraction": 0.6507936508, "include": true, "reason": "import numpy", "num_tokens": 711}
|
library(grid)
library(extrafont)
library(extrafontdb)
library(ggthemes)
font_import()
loadfonts(device = "win")
theme_Publication <- function(base_size=14, base_family="Arial") {
(theme_foundation(base_size=base_size, base_family=base_family)
+ theme(plot.title = element_text(face = "bold",
size = rel(1.2), hjust = 0.5),
text = element_text(),
panel.background = element_rect(colour = NA),
plot.background = element_rect(colour = NA),
panel.border = element_rect(colour = NA),
axis.title = element_text(face = "bold",size = rel(1)),
axis.title.y = element_text(angle=90,vjust =2, size = rel(1.3)),
axis.title.x = element_text(vjust = -0.2, size = rel(1.3)),
axis.text = element_text(size = rel(1)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key = element_rect(colour = NA),
legend.key.size= unit(0.2, "cm"),
legend.spacing = unit(0, "cm"),
legend.title = element_text(face="italic", size = rel(1.3)),
legend.text = element_text(size=rel(1)),
plot.margin=unit(c(10,5,5,5),"mm"),
strip.background=element_rect(colour="#f0f0f0",fill="#f0f0f0"),
strip.text = element_text(face="bold")
))
}
scale_fill_Publication <- function(...){
library(scales)
discrete_scale("fill","Publication",manual_pal(values = c("#386cb0","#fdb462","#7fc97f","#ef3b2c","#662506","#a6cee3","#fb9a99","#984ea3","#ffff33")), ...)
}
scale_colour_Publication <- function(...){
library(scales)
discrete_scale("colour","Publication",manual_pal(values = c("#386cb0","#fdb462","#7fc97f","#ef3b2c","#662506","#a6cee3","#fb9a99","#984ea3","#ffff33")), ...)
}
|
{"hexsha": "f54266cdc64eb284bb10e536842591308dcfaf3c", "size": 1984, "ext": "r", "lang": "R", "max_stars_repo_path": "Raw data/publication_theme.r", "max_stars_repo_name": "tshalev/WRC-genome-paper", "max_stars_repo_head_hexsha": "8fbd7d389ee75ea08a0a978f74c4910cd51a4904", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Raw data/publication_theme.r", "max_issues_repo_name": "tshalev/WRC-genome-paper", "max_issues_repo_head_hexsha": "8fbd7d389ee75ea08a0a978f74c4910cd51a4904", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Raw data/publication_theme.r", "max_forks_repo_name": "tshalev/WRC-genome-paper", "max_forks_repo_head_hexsha": "8fbd7d389ee75ea08a0a978f74c4910cd51a4904", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2127659574, "max_line_length": 160, "alphanum_fraction": 0.5786290323, "num_tokens": 501}
|
(*
Copyright 2014 Cornell University
Copyright 2015 Cornell University
Copyright 2016 Cornell University
This file is part of VPrl (the Verified Nuprl project).
VPrl is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VPrl is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VPrl. If not, see <http://www.gnu.org/licenses/>.
Websites: http://nuprl.org/html/verification/
http://nuprl.org/html/Nuprl2Coq
https://github.com/vrahli/NuprlInCoq
Authors: Abhishek Anand & Vincent Rahli
*)
Require Export approx.
Require Export atom_ren.
(** printing # $\times$ #×# *)
(** printing <=> $\Leftrightarrow$ #⇔# *)
(** printing $ $\times$ #×# *)
(** printing & $\times$ #×# *)
Lemma respects_alpha_r2 {o} :
forall (r1 r2 : bin_rel (@NTerm o)),
respects_alpha_r r1
-> respects_alpha_r r2
-> respects_alpha_r (r1 \2/ r2).
Proof.
introv resp1 resp2; introv aeq r.
allsimpl; repndors.
- eapply resp1 in aeq; apply aeq in r; auto.
- eapply resp2 in aeq; apply aeq in r; auto.
Qed.
Hint Resolve respects_alpha_r2 : slow.
Lemma respects_alpha_l2 {o} :
forall (r1 r2 : bin_rel (@NTerm o)),
respects_alpha_l r1
-> respects_alpha_l r2
-> respects_alpha_l (r1 \2/ r2).
Proof.
introv resp1 resp2; introv aeq r.
allsimpl; repndors.
- eapply resp1 in aeq; apply aeq in r; auto.
- eapply resp2 in aeq; apply aeq in r; auto.
Qed.
Hint Resolve respects_alpha_l2 : slow.
Lemma respects_alpha_r_bot2 {o} :
respects_alpha_r (@bot2 o).
Proof.
introv aeq x; tcsp.
Qed.
Hint Resolve respects_alpha_r_bot2 : slow.
Lemma respects_alpha_l_bot2 {o} :
respects_alpha_l (@bot2 o).
Proof.
introv aeq x; tcsp.
Qed.
Hint Resolve respects_alpha_l_bot2 : slow.
Lemma approx_bad_implies_approx {o} :
forall lib (t1 t2 : @NTerm o),
approx_bad lib t1 t2 -> approx lib t1 t2.
Proof.
intro lib.
pose proof
(approx_acc
lib
(fun a b => approx_bad lib a b)
(@bot2 o)) as HH.
allsimpl.
match goal with
[ HH : _ -> ?B |- _ ] => assert B as h;[|introv ap; eapply h; eauto]
end.
introv ap.
apply HH; auto; clear HH ap.
introv hb hr ap.
inversion ap as [? ? ? cl]; subst; clear ap.
constructor.
allunfold @close_comput; repnd; dands; auto.
- introv comp.
clear cl3 cl.
apply cl2 in comp; exrepnd.
eexists; dands; eauto.
allunfold @lblift; dands; repnd; auto.
introv i.
apply comp0 in i.
allunfold @blift; exrepnd.
eexists; eexists; eexists; dands; eauto.
allunfold @olift; repnd; dands; auto.
- introv comp.
clear cl2 cl.
apply cl3 in comp; exrepnd.
eexists; eexists; dands; eauto.
- introv comp.
apply cl4 in comp; exrepnd.
eexists; dands; eauto.
Qed.
Lemma approx_implies_approx_bad {o} :
forall lib (t1 t2 : @NTerm o),
approx lib t1 t2 -> approx_bad lib t1 t2.
Proof.
intro lib.
cofix IND.
introv apr.
inversion apr as [cl].
constructor.
allunfold @close_comput; repnd; dands; auto.
- introv comp.
clear cl3 cl.
apply cl2 in comp; exrepnd.
eexists; dands; eauto.
allunfold @lblift; dands; repnd; auto.
introv i.
apply comp0 in i; clear comp0.
allunfold @blift; exrepnd.
eexists; eexists; eexists; dands; eauto.
allunfold @olift; repnd; dands; auto.
introv wf isp1 isp2.
pose proof (i1 sub wf isp1 isp2) as h; clear i1.
repndors.
+ apply IND; auto.
+ unfold bot2 in h; tcsp.
- introv comp.
clear cl2 cl.
apply cl3 in comp; exrepnd.
repndors; try (complete (allunfold @bot2; sp)).
eexists; eexists; dands; eauto.
- introv comp.
apply cl4 in comp; exrepnd.
eexists; dands; eauto.
introv.
pose proof (comp0 n) as h; repndors; tcsp.
unfold bot2 in h; tcsp.
Qed.
Lemma approx_open_simpler_equiv_r {o} :
forall lib (a c : @NTerm o) r,
respects_alpha_r (approx_aux lib r \2/ r)
-> respects_alpha_l (approx_aux lib r \2/ r)
-> (simpl_olift (approx_aux lib r \2/ r) a c <=> olift (approx_aux lib r \2/ r) a c).
Proof.
introv rr rl.
split.
- intro Hos.
repnud Hos.
unfold olift.
dands;auto.
introv Hwfs Hispa Hispc.
pose proof (lsubst_trim2_alpha1 _ _ _ Hispc Hispa) as Xtrim.
pose proof (lsubst_trim2_alpha2 _ _ _ Hwfs Hispc Hispa) as Xprog.
allsimpl. repnd. rename Xtrim into Xtrima.
rename Xtrim0 into Xtrimc.
revert Hispa Hispc. alpharw Xtrima. alpharw Xtrimc.
introv Hispa Hispc.
pose proof (Hos (sub_keep_first sub (free_vars c ++ free_vars a))) as h.
repeat (autodimp h hyp).
unfold respects2_r in rr.
unfold respects2_l in rl.
pose proof (rr (lsubst a (sub_keep_first sub (free_vars c ++ free_vars a)))
(lsubst c (sub_keep_first sub (free_vars c ++ free_vars a)))
(lsubst c sub))
as h1.
autodimp h1 hyp; eauto 2 with slow.
apply h1 in h; clear h1.
pose proof (rl (lsubst a (sub_keep_first sub (free_vars c ++ free_vars a)))
(lsubst c sub)
(lsubst a sub))
as h1.
autodimp h1 hyp; eauto 2 with slow.
- intro Hos.
repnud Hos.
unfold olift in Hos; unfold simpl_olift; repnd; dands; auto.
introv ps isp1 isp2.
pose proof (Hos sub) as h.
repeat (autodimp h hyp); eauto with slow.
Qed.
(*
Definition rens_utokens {o} (rens : list (@utok_ren o)) (t : NTerm) :=
ren_utokens (flatten rens) t.
Inductive correct_rens {o} : list (@utok_ren o) -> list (get_patom_set o) -> Type :=
| correct_rens_nil : forall atoms, correct_rens [] atoms
| correct_rens_cons :
forall ren rens atoms,
disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) atoms)
-> no_repeats (range_utok_ren ren)
-> no_repeats (dom_utok_ren ren)
-> disjoint (dom_utok_ren ren) (range_utok_ren ren)
-> correct_rens rens (map (ren_atom ren) atoms)
-> correct_rens (ren :: rens) atoms.
Lemma approx_change_utoks {o} :
forall lib (t1 t2 : @NTerm o) rens,
correct_rens rens (get_utokens t1 ++ get_utokens t2)
-> approx lib t1 t2
-> approx lib (rens_utokens rens t1) (rens_utokens rens t2).
Proof.
intro lib.
(*
cofix IND.
introv nr1 nr2 disj1 disj2 apr.
*)
pose proof
(approx_acc
lib
(fun a b => {t1,t2 : NTerm
$ {ren : utok_ren
$ approx lib t1 t2
# no_repeats (range_utok_ren ren)
# no_repeats (dom_utok_ren ren)
# disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t1))
# disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t2))
# a = ren_utokens ren t1
# b = ren_utokens ren t2}})
(@bot2 o)) as HH.
allsimpl.
match goal with
[ HH : _ -> ?B |- _ ] =>
assert B as h;
[|introv nr1 nr2 d1 d2 k; eapply h;
eexists;eexists;eexists;dands;eauto;fail]
end.
apply HH; clear HH.
introv hb hr h; exrepnd; subst.
rename h1 into apr.
constructor.
(*inversion apr as [? ? ? cl]; subst; clear apr.*)
inversion apr as [cl]; clear apr.
allunfold @close_comput; repnd; dands; tcsp; eauto with slow; introv comp.
- clear cl3 cl.
dup comp as comp1.
apply (computes_to_value_ren_utokens _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
auto;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; auto.
rw @ren_utokens_can in comp1.
dup comp1 as comp2.
apply cl2 in comp2; exrepnd.
apply (computes_to_value_ren_utokens _ _ _ ren) in comp2; auto.
rw @ren_utokens_can in comp2.
assert (match
get_utok_c
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
with
| Some a => NUTok (ren_atom ren a)
| None =>
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
end = c) as e.
{ destruct c; allsimpl; tcsp.
rw @inv_ren_atom2; auto.
apply computes_to_value_preserves_utokens in comp; allsimpl.
rw subset_cons_l in comp; repnd.
intro i.
rw @get_utokens_ren_utokens in comp3.
rw in_map_iff in comp3; exrepnd; subst.
rw in_diff in i; repnd.
destruct (ren_atom_or ren a) as [d|d]; tcsp.
rw d in i0.
apply in_dom_in_range in i0; auto.
}
rw e in comp2; clear e.
eexists; dands;[exact comp2|].
unfold lblift; unfold lblift in comp0.
allrw map_length; repnd; dands; auto.
introv i.
applydup comp0 in i.
unfold blift; unfold blift in i0.
exrepnd.
repeat (onerw @selectbt_map; auto; try omega).
remember (selectbt tl_subterms n) as b1.
remember (selectbt tr_subterms n) as b2.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i1.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i2.
assert (disjoint (dom_utok_ren ren)
(diff (get_patom_deq o)
(range_utok_ren ren)
(get_utokens_b b1))) as d.
{
(* clear IND.*)
admit.
}
rw @inv_ren_utokens_b2 in i2; auto.
allsimpl.
exists lv (ren_utokens ren nt1) (ren_utokens ren nt2); dands; auto.
unfold olift; unfold olift in i0; repnd.
dands.
{ apply nt_wf_ren_utokens; auto. }
{ apply nt_wf_ren_utokens; auto. }
introv wfs isp1 isp2.
pose proof (ex_new_utok_ren
(dom_utok_ren ren)
(dom_utok_ren ren
++ range_utok_ren ren
++ get_utokens_sub sub
++ get_utokens nt1
++ get_utokens nt2)) as h.
destruct h as [ren' h]; repnd.
allrw disjoint_app_l; repnd.
pose proof (lsubst_ren_utokens2 nt1 ren ren' sub) as e1.
repeat (autodimp e1 hyp); eauto 3 with slow.
pose proof (lsubst_ren_utokens2 nt2 ren ren' sub) as e2.
repeat (autodimp e2 hyp); eauto 3 with slow.
pose proof (ren_utokens_ren_utokens
(lsubst nt1 (ren_utokens_sub ren' sub))
(inv_utok_ren ren')
ren) as f1.
rw @compose_ren_utokens_trivial in f1;
[|rw @dom_utok_ren_inv_utok_ren; eauto 2 with slow].
pose proof (ren_utokens_ren_utokens
(lsubst nt2 (ren_utokens_sub ren' sub))
(inv_utok_ren ren')
ren) as f2.
rw @compose_ren_utokens_trivial in f2;
[|rw @dom_utok_ren_inv_utok_ren; eauto 2 with slow].
rw <- f1 in e1; rw <- f2 in e2; clear f1 f2.
rewrite e1, e2; clear e1 e2.
pose proof (i0 (ren_utokens_sub ren' sub)) as q; clear i0.
repeat (autodimp q hyp); eauto 2 with slow.
{ apply isprogram_lsubst_iff in isp1; repnd.
apply isprogram_lsubst_iff.
rw @nt_wf_ren_utokens_iff in isp0; dands; auto.
introv j.
rw @free_vars_ren_utokens in isp1.
apply isp1 in j; exrepnd.
rw @sub_find_ren_utokens_sub; rw j1.
eexists; dands; eauto.
- apply nt_wf_ren_utokens; auto.
- unfold closed; rw @free_vars_ren_utokens; auto. }
{ apply isprogram_lsubst_iff in isp2; repnd.
apply isprogram_lsubst_iff.
rw @nt_wf_ren_utokens_iff in isp0; dands; auto.
introv j.
rw @free_vars_ren_utokens in isp2.
apply isp2 in j; exrepnd.
rw @sub_find_ren_utokens_sub; rw j1.
eexists; dands; eauto.
- apply nt_wf_ren_utokens; auto.
- unfold closed; rw @free_vars_ren_utokens; auto. }
repndors; tcsp; try (complete (allunfold @bot2; sp)).
(*
pose proof
(hr
(ren_utokens ren (lsubst nt1 (ren_utokens_sub ren' sub)))
(ren_utokens ren (lsubst nt2 (ren_utokens_sub ren' sub))))
as ind1.
autodimp ind1 hyp.
{ exists
(lsubst nt1 (ren_utokens_sub ren' sub))
(lsubst nt2 (ren_utokens_sub ren' sub))
ren; dands; auto.
- admit.
- admit.
}
*)
apply IND; tcsp.
{ rw @range_utok_ren_inv_utok_ren; rw h0; auto. }
{ rw @dom_utok_ren_inv_utok_ren; auto. }
{ clear IND; admit. }
{ clear IND; admit. }
apply IND; tcsp.
{ clear IND; admit. }
{ clear IND; admit. }
- clear IND; admit.
- clear IND; admit.
Qed.
(*
apply hr.
exists (lsubst nt1 (ren_utokens_sub ren' sub))
(lsubst nt2 (ren_utokens_sub ren' sub))
ren;
dands; eauto 3 with slow.
* rw @range_utok_ren_app.
rw @range_utok_ren_inv_utok_ren.
apply no_repeats_app; dands; eauto 3 with slow.
{ rw h7; auto. }
{ eauto 3 with slow. }
pose proof (hr (ren_utokens (ren ++ inv_utok_ren ren')
(lsubst nt1 (ren_utokens_sub ren' sub)))
(ren_utokens (ren ++ inv_utok_ren ren')
(lsubst nt2 (ren_utokens_sub ren' sub)))
*)
Qed.
XXXXXXXXX
*)
(*
Lemma alpha_eq_swap_lsubst_aux_var_ren {o} :
forall (t : @NTerm o) vs1 vs2,
no_repeats vs2
-> disjoint vs2 vs1
-> disjoint vs2 (free_vars t)
-> disjoint vs2 (bound_vars t)
-> alpha_eq (swap (mk_swapping vs1 vs2) t)
(lsubst_aux t (var_ren vs1 vs2)).
Proof.
nterm_ind1s t as [v|op bs ind] Case; introv norep disj1 disj2 disj3.
- Case "vterm".
allsimpl.
allrw disjoint_singleton_r.
rw @sub_find_var_ren_as_option_map.
rw swapvar_eq; eauto 2 with slow.
remember (renFind (mk_swapping vs1 vs2) v) as rf; destruct rf; allsimpl; auto.
- Case "oterm"; allsimpl.
apply alpha_eq_oterm_combine; allrw map_length; dands; auto.
introv i.
allrw <- @map_combine; allrw in_map_iff; exrepnd; cpx.
destruct a0 as [l1 t1].
destruct a as [l2 t2]; allsimpl.
applydup in_combine in i1; repnd.
disj_flat_map; allsimpl.
allrw disjoint_app_r; repnd.
Qed.
*)
Definition approx_or_bts {o} lib (r : bin_rel (@NTerm o)) :=
lblift (olift (approx_aux lib r \2/ r)).
Lemma approx_or_bts_alpha_eq_bterms_l {o} :
forall lib (bs1 bs2 bs3 : list (@BTerm o)) r,
alpha_eq_bterms bs1 bs2
-> approx_or_bts lib r bs1 bs3
-> approx_or_bts lib r bs2 bs3.
Proof.
introv aeq apr.
allunfold @approx_or_bts.
allunfold @lblift; repnd.
allunfold @alpha_eq_bterms; repnd.
dands; tcsp.
introv i.
rw <- aeq0 in i; applydup apr in i.
assert (alpha_eq_bterm (selectbt bs1 n) (selectbt bs2 n)) as a.
{ apply aeq.
unfold selectbt.
apply in_nth_combine; auto. }
eapply blift_alpha_fun_l; eauto with slow.
Qed.
Lemma approx_or_bts_alpha_eq_bterms_r {o} :
forall lib (bs1 bs2 bs3 : list (@BTerm o)) r,
alpha_eq_bterms bs2 bs3
-> approx_or_bts lib r bs1 bs3
-> approx_or_bts lib r bs1 bs2.
Proof.
introv aeq apr.
allunfold @approx_or_bts.
allunfold @lblift; repnd.
allunfold @alpha_eq_bterms; repnd.
dands; tcsp; try omega.
introv i.
applydup apr in i.
assert (alpha_eq_bterm (selectbt bs2 n) (selectbt bs3 n)) as a.
{ apply aeq.
unfold selectbt.
apply in_nth_combine; auto; try omega. }
eapply blift_alpha_fun_r; eauto with slow.
Qed.
Lemma respects_alpha_r_approx_aux {o} :
forall lib (r : bin_rel (@NTerm o)),
respects_alpha_r r
-> respects_alpha_r (approx_aux lib r).
Proof.
introv resp; introv aeq apr.
revert resp a b b' aeq apr.
pose proof
(approx_acc
lib
(fun a b => {c : NTerm
$ respects_alpha_r r
# alpha_eq c b
# approx_aux lib r a c})
r) as HH.
allsimpl.
match goal with
[ HH : _ -> ?B |- _ ] =>
assert B as h;
[|introv resp aeq apr; eapply h; exists b; dands; auto; fail]
end.
apply HH; clear HH.
introv hb hr h; exrepnd; subst.
rename h1 into resp.
rename h2 into aeq.
rename h0 into apr.
inversion apr as [cl].
constructor.
rename x0 into a.
rename c into b.
rename x1 into c.
allunfold @close_comput; repnd; dands; tcsp.
- apply alphaeq_preserves_program in aeq; apply aeq; auto.
- clear cl3 cl.
introv comp.
apply cl2 in comp.
exrepnd.
eapply compute_to_value_alpha in comp1; eauto 3 with slow; exrepnd.
applydup @alpha_eq_oterm_implies_combine in comp2; exrepnd; subst.
exists bs'; dands; auto.
allunfold @lblift; repnd; dands; auto; try omega.
introv i.
applydup comp0 in i.
allunfold @blift; exrepnd.
pose proof (comp4 (selectbt tr_subterms n) (selectbt bs' n)) as h.
autodimp h hyp.
{ unfold selectbt; apply in_nth_combine; auto; try omega. }
exists lv nt1 nt2; dands; eauto 3 with slow.
allunfold @olift; repnd; dands; auto.
introv wfs isp1 isp2.
pose proof (i0 sub wfs isp1 isp2) as k; clear i0.
repndors; tcsp.
right; apply hr.
exists (lsubst nt2 sub); dands; auto.
- clear cl2 cl.
introv comp.
apply cl3 in comp.
exrepnd.
eapply compute_to_exception_alpha in comp0; eauto 3 with slow; exrepnd.
exists a'0 t2'; dands; auto.
+ clear comp1.
repndors; tcsp.
* right.
apply hr.
exists a'; dands; auto.
* right.
apply hb; auto.
eapply resp; eauto.
+ clear comp2.
repndors; tcsp.
* right.
apply hr.
exists e'; dands; auto.
* right.
apply hb; auto.
eapply resp; eauto.
(*
- clear cl2 cl3.
introv comp.
apply cl in comp.
eapply compute_to_marker_alpha in comp; eauto.
*)
- introv comp.
apply cl4 in comp; exrepnd.
eapply computes_to_seq_alpha in comp1; eauto 3 with slow; exrepnd.
eexists; dands; eauto.
introv.
pose proof (comp0 n) as h; clear comp0.
pose proof (comp2 n) as q; clear comp2.
repndors; tcsp; right.
+ apply hr.
eexists; dands; eauto.
+ apply hb.
eapply resp; eauto.
Qed.
Hint Resolve respects_alpha_r_approx_aux : slow.
Lemma alpha_eq_respects_nt_wf {o} :
forall (a b : @NTerm o),
alpha_eq a b
-> nt_wf a
-> nt_wf b.
Proof.
introv aeq wf.
apply alphaeq_preserves_wf in aeq; apply aeq; auto.
Qed.
Hint Resolve alpha_eq_respects_nt_wf : slow.
Lemma alpha_eq_respects_nt_wf_inv {o} :
forall (a b : @NTerm o),
alpha_eq a b
-> nt_wf b
-> nt_wf a.
Proof.
introv aeq wf; apply alpha_eq_sym in aeq; eauto 3 with slow.
Qed.
Hint Resolve alpha_eq_respects_nt_wf_inv : slow.
Lemma respects_alpha_l_approx_aux {o} :
forall lib (r : bin_rel (@NTerm o)),
respects_alpha_l r
-> respects_alpha_l (approx_aux lib r).
Proof.
introv resp; introv aeq apr.
apply alpha_eq_sym in aeq.
revert resp a b a' aeq apr.
pose proof
(approx_acc
lib
(fun a b => {c : NTerm
$ respects_alpha_l r
# alpha_eq a c
# approx_aux lib r c b})
r) as HH.
allsimpl.
match goal with
[ HH : _ -> ?B |- _ ] =>
assert B as h;
[|introv resp aeq apr; eapply h; exists a; dands; auto; fail]
end.
apply HH; clear HH.
introv hb hr h; exrepnd; subst.
rename h1 into resp.
rename h2 into aeq.
rename h0 into apr.
inversion apr as [cl].
constructor.
rename x0 into a.
rename c into b.
rename x1 into c.
allunfold @close_comput; repnd; dands; tcsp.
- apply alphaeq_preserves_program in aeq; apply aeq; auto.
- clear cl3 cl.
introv comp.
eapply compute_to_value_alpha in comp;[| |exact aeq]; eauto 3 with slow.
exrepnd.
apply @alpha_eq_oterm_implies_combine in comp0; exrepnd; subst.
apply cl2 in comp1; clear cl2.
exrepnd.
exists tr_subterms; dands; auto.
allunfold @lblift; repnd; dands; auto; try omega.
introv i.
rw comp3 in i.
applydup comp0 in i; clear comp0.
allunfold @blift; exrepnd.
pose proof (comp2 (selectbt tl_subterms n) (selectbt bs' n)) as h.
autodimp h hyp.
{ unfold selectbt; apply in_nth_combine; auto; try omega. }
exists lv nt1 nt2; dands; eauto 3 with slow.
allunfold @olift; repnd; dands; auto.
introv wfs isp1 isp2.
pose proof (i0 sub wfs isp1 isp2) as k; clear i0.
repndors; tcsp.
right; apply hr.
exists (lsubst nt1 sub); dands; auto.
- clear cl2 cl.
introv comp.
eapply compute_to_exception_alpha in comp; eauto 3 with slow; exrepnd.
apply cl3 in comp0.
exrepnd.
exists a'0 e'; dands; auto.
+ clear comp0.
repndors; tcsp.
* right.
apply hr.
exists a'; dands; auto.
* right.
apply hb; auto.
eapply resp;[apply alpha_eq_sym; eauto|]; auto.
+ clear comp4.
repndors; tcsp.
* right.
apply hr.
exists t2'; dands; auto.
* right.
apply hb; auto.
eapply resp;[apply alpha_eq_sym; eauto|]; auto.
(*
- clear cl2 cl3.
introv comp.
apply (compute_to_marker_alpha _ _ b) in comp; auto.
*)
- introv comp.
eapply computes_to_seq_alpha in comp;[| | eauto]; eauto 3 with slow; exrepnd.
apply cl4 in comp1; exrepnd.
eexists; dands; eauto.
introv.
pose proof (comp0 n) as h; clear comp0.
pose proof (comp2 n) as q; clear comp2.
repndors; tcsp; right.
+ apply hr.
eexists; dands; eauto.
+ apply hb.
apply alpha_eq_sym in h.
eapply resp; eauto.
Qed.
Hint Resolve respects_alpha_l_approx_aux : slow.
Theorem approx_acc_resp {p} :
forall (lib : library)
(l r0 : bin_rel (@NTerm p))
(resp_l_l : respects_alpha_l l)
(resp_r_l : respects_alpha_r l)
(resp_l_r0 : respects_alpha_l r0)
(resp_r_r0 : respects_alpha_r r0)
(OBG : forall (r: bin_rel NTerm)
(INC: r0 =2> r)
(CIH: l =2> r)
(resp_r : respects_alpha_r r)
(resp_l : respects_alpha_l r),
l =2> approx_aux lib r),
l =2> approx_aux lib r0.
Proof.
intros.
assert (SIM: approx_aux lib (r0 \2/ l) x0 x1) by eauto 6 with slow.
clear PR; revert x0 x1 SIM; cofix CIH.
intros; destruct SIM; econstructor; eauto.
invertsna c Hcl. repnd.
unfold close_comput.
dands; eauto.
- introv Hcomp.
apply Hcl2 in Hcomp.
exrepnd. exists tr_subterms. split; eauto.
eapply le_lblift2; eauto.
apply le_olift.
unfold le_bin_rel.
introv Hap.
repndors; tcsp.
left.
apply CIH; apply OBG; eauto 3 with slow.
- introv Hcomp.
apply Hcl3 in Hcomp; exrepnd.
exists a' e'; dands; auto; repndors; auto; tcsp;
try (complete (left; apply CIH; apply OBG; tcsp; eauto 3 with slow)).
- introv comp.
apply Hcl4 in comp; exrepnd.
eexists; dands; eauto.
introv.
pose proof (comp0 n) as h; clear comp0; repndors; tcsp.
left.
apply CIH; apply OBG; tcsp; eauto 3 with slow.
Qed.
(*
Lemma approx_change_utoks_lsubst_aux {o} :
forall lib (t1 t2 : @NTerm o) sub1 sub2 l,
nrut_sub l sub1
-> nrut_sub l sub2
-> dom_sub sub1 = dom_sub sub2
-> no_repeats (dom_sub sub1)
-> subset (get_utokens t1) l
-> subset (get_utokens t2) l
-> approx lib (lsubst_aux t1 sub1) (lsubst_aux t2 sub1)
-> approx lib (lsubst_aux t1 sub2) (lsubst_aux t2 sub2).
Proof.
intro lib.
pose proof
(approx_acc_resp
lib
(fun a b => {t1,t2 : NTerm
$ {sub1,sub2 : Sub
$ {l : list (get_patom_set o)
$ nrut_sub l sub1
# nrut_sub l sub2
# dom_sub sub1 = dom_sub sub2
# no_repeats (dom_sub sub1)
# subset (get_utokens t1) l
# subset (get_utokens t2) l
# approx lib (lsubst_aux t1 sub1) (lsubst_aux t2 sub1)
# alpha_eq a (lsubst_aux t1 sub2)
# alpha_eq b (lsubst_aux t2 sub2)}}})
(@bot2 o)) as HH.
allsimpl.
match goal with
[ HH : _ -> _ -> _ -> _ -> _ -> ?B |- _ ] =>
assert B as h;
[|introv nr1 nr2 e nr ss1 ss2 apr; eapply h;
exists t1 t2 sub1 sub2 l; dands; auto; fail]
end.
apply HH; clear HH; eauto 2 with slow.
{ introv aeq h; allsimpl; exrepnd; subst.
exists t1 t2 sub1 sub2 l; dands; eauto with slow. }
{ introv aeq h; allsimpl; exrepnd; subst.
exists t1 t2 sub1 sub2 l; dands; eauto with slow. }
introv hb hr rar ral h; exrepnd; subst.
rename h1 into nrut1.
rename h2 into nrut2.
rename h3 into eqdoms.
rename h4 into norep.
rename h5 into ss1.
rename h6 into ss2.
rename h7 into apr.
rename h8 into aeqls1.
rename h0 into aeqls2.
pose proof (respects_alpha_r_approx_aux lib r rar) as rar_aa.
pose proof (respects_alpha_l_approx_aux lib r ral) as ral_aa.
eapply rar_aa;[apply alpha_eq_sym;exact aeqls2|].
eapply ral_aa;[apply alpha_eq_sym;exact aeqls1|].
constructor.
inversion apr as [cl]; clear apr; subst.
allunfold @close_comput; repnd.
prove_and isp1.
{
rw <- @cl_lsubst_lsubst_aux in cl0; eauto 2 with slow.
rw @isprogram_lsubst_iff in cl0.
repnd.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow;
apply isprogram_lsubst_iff; dands; eauto; introv i.
apply cl0 in i; exrepnd.
pose proof (sub_find_some_eq_doms_nrut_sub sub1 sub2 v l nrut2 eqdoms) as h.
rw i1 in h; exrepnd.
rw h0; eexists; dands; eauto 2 with slow.
}
prove_and isp2.
{
rw <- @cl_lsubst_lsubst_aux in cl1; eauto 2 with slow.
rw @isprogram_lsubst_iff in cl1.
repnd.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow;
apply isprogram_lsubst_iff; dands; eauto; introv i.
apply cl1 in i; exrepnd.
pose proof (sub_find_some_eq_doms_nrut_sub sub1 sub2 v l nrut2 eqdoms) as h.
rw i1 in h; exrepnd.
rw h0; eexists; dands; eauto 2 with slow.
}
dands.
- clear cl3 cl.
introv comp.
rw <- @cl_lsubst_lsubst_aux in comp; eauto with slow.
pose proof (computes_to_value_change_utok_sub
lib t1 (oterm (Can c) tl_subterms) sub2 sub1
comp) as h.
repeat (autodimp h hyp); eauto 2 with slow.
{ unfold nrut_sub in nrut2; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut2; exact nrut2|]; auto. }
{ unfold nrut_sub in nrut1; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut1; exact nrut1|]; auto. }
exrepnd.
dup h5 as comp1.
repeat (rw <- @cl_lsubst_lsubst_aux in cl2; eauto with slow).
unfold close_compute_val in cl2.
remember (get_utok_c c) as guo; symmetry in Heqguo; destruct guo.
{
apply get_utok_c_some in Heqguo; subst; allsimpl.
dup comp as isv; unfold computes_to_value in isv; repnd.
apply compute_max_steps_eauto2 in isv.
apply isprogram_implies_wf in isv; auto.
apply wf_term_utok in isv; subst; allsimpl; fold_terms.
apply alpha_eq_mk_utoken in h0.
rw @cl_lsubst_lsubst_aux in h0; eauto 2 with slow.
destruct w as [v|op bs]; allsimpl.
- allrw subvars_singleton_l.
remember (sub_find sub2 v) as sf; symmetry in Heqsf; destruct sf; ginv; subst.
pose proof (sub_find_some_eq_doms_nrut_sub sub2 sub1 v l nrut1) as e.
autodimp e hyp; rw Heqsf in e; exrepnd.
rw @cl_lsubst_lsubst_aux in h1; allsimpl; eauto 2 with slow.
rw e0 in h1.
apply alpha_eq_sym in h1.
apply alpha_eq_mk_utoken in h1; subst.
apply cl2 in comp1; exrepnd.
unfold lblift in comp0; allsimpl; repnd; cpx; clear comp0; fold_terms.
pose proof (computes_to_value_change_utok_sub
lib t2 (mk_utoken a) sub1 sub2 comp1) as q.
repeat (autodimp q hyp); eauto 3 with slow.
{ unfold nrut_sub in nrut1; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut1; exact nrut1|]; auto. }
{ unfold nrut_sub in nrut2; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut2; exact nrut2|]; auto. }
exrepnd.
destruct w as [v'|op' bs']; allsimpl; dgc.
+ allrw subvars_singleton_l.
rw @cl_lsubst_lsubst_aux in q1; eauto 2 with slow.
rw @cl_lsubst_lsubst_aux in q0; eauto 2 with slow.
allsimpl.
pose proof (sub_find_some_eq_doms_nrut_sub sub1 sub2 v' l nrut2) as e'.
autodimp e' hyp.
remember (sub_find sub1 v') as sf'; symmetry in Heqsf'; destruct sf'.
* exrepnd.
rw e'0 in q1.
apply alpha_eq_mk_utoken in q0; subst.
apply alpha_eq_sym in q1.
apply alpha_eq_mk_utoken in q1; subst.
pose proof (nrut_sub_sub_find_same sub1 v v' (mk_utoken a) l) as e.
repeat (autodimp e hyp); exrepnd; ginv; GC.
rw e'0 in Heqsf; ginv.
exists ([] : list (@BTerm o)); fold_terms.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow; dands; auto.
unfold lblift; simpl; sp.
* inversion q0.
+ allrw disjoint_app_r; repnd.
allrw subset_app; repnd.
rw @cl_lsubst_lsubst_aux in q0; eauto 2 with slow; allsimpl.
apply alpha_eq_mk_utoken in q0; subst.
inversion q0; subst.
destruct bs'; allsimpl; cpx; fold_terms; GC; dgc.
allrw disjoint_singleton_r.
allrw singleton_subset.
rw @cl_lsubst_lsubst_aux in q1; eauto 2 with slow; allsimpl; fold_terms.
apply alpha_eq_sym in q1; apply alpha_eq_mk_utoken in q1; subst.
apply sub_find_some in e0; apply in_sub_eta in e0; repnd.
destruct q6.
rw lin_flat_map; exists (mk_utoken a); simpl; sp.
- allrw disjoint_app_r; allrw subset_app; repnd.
inversion h0; subst; allsimpl; cpx.
destruct bs; allsimpl; cpx; fold_terms; GC; dgc.
allrw disjoint_singleton_r; allrw singleton_subset.
rw @cl_lsubst_lsubst_aux in h1; eauto 2 with slow; allsimpl; fold_terms.
apply alpha_eq_sym in h1; apply alpha_eq_mk_utoken in h1; subst.
apply cl2 in h5; exrepnd.
unfold lblift in h0; allsimpl; repnd; cpx; clear h0; fold_terms.
pose proof (computes_to_value_change_utok_sub
lib t2 (mk_utoken g) sub1 sub2 h1) as q.
repeat (autodimp q hyp); eauto 3 with slow.
{ unfold nrut_sub in nrut1; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut1; exact nrut1|]; auto. }
{ unfold nrut_sub in nrut2; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut2; exact nrut2|]; auto. }
exrepnd.
destruct w as [v|op bs]; allsimpl; dgc.
+ allrw subvars_singleton_l.
rw @cl_lsubst_lsubst_aux in q1; eauto 2 with slow.
rw @cl_lsubst_lsubst_aux in q0; eauto 2 with slow.
allsimpl.
pose proof (sub_find_some_eq_doms_nrut_sub sub1 sub2 v l nrut2) as e.
autodimp e hyp.
remember (sub_find sub1 v) as sf; symmetry in Heqsf; destruct sf.
* exrepnd; rw e0 in q1.
apply alpha_eq_sym in q1.
allapply @alpha_eq_mk_utoken; subst; allsimpl.
unfold nrut_sub in nrut1; repnd.
apply ss1 in h6; apply nrut1 in h6; destruct h6.
apply sub_find_some in Heqsf; apply in_sub_eta in Heqsf; repnd.
rw lin_flat_map; exists (mk_utoken g); simpl; sp.
* inversion q0.
+ allrw disjoint_app_r; allrw subset_app; repnd.
rw @cl_lsubst_lsubst_aux in q0; eauto 2 with slow; allsimpl.
apply alpha_eq_mk_utoken in q0.
inversion q0; subst; destruct bs; allsimpl; cpx; fold_terms; GC; dgc.
allrw disjoint_singleton_r; allrw singleton_subset.
rw @cl_lsubst_lsubst_aux in q1; eauto 2 with slow; allsimpl.
apply alpha_eq_sym in q1; apply alpha_eq_mk_utoken in q1; subst.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow.
exists ([] : list (@BTerm o)); dands; auto.
unfold lblift; simpl; sp.
}
destruct w as [v|op bs].
{
rw @cl_lsubst_lsubst_aux in h0; allsimpl; eauto 2 with slow.
remember (sub_find sub2 v) as sf; symmetry in Heqsf; destruct sf.
- apply sub_find_some in Heqsf.
eapply in_nrut_sub in Heqsf; eauto; exrepnd; subst.
apply alpha_eq_sym in h0; apply alpha_eq_mk_utoken in h0; subst.
inversion h0; subst; allsimpl; ginv.
- inversion h0.
}
rw @cl_lsubst_lsubst_aux in h0; eauto 2 with slow; allsimpl.
apply alpha_eq_oterm_combine2 in h0; repnd; subst.
allrw map_length; allsimpl; GC.
rw @cl_lsubst_lsubst_aux in h1; eauto 2 with slow; allsimpl.
apply alpha_eq_sym in h1; apply alpha_eq_oterm_implies_combine in h1; exrepnd; subst.
allrw map_length; GC.
allrw disjoint_app_r; allrw subset_app; repnd.
apply cl2 in h5; exrepnd.
pose proof (computes_to_value_change_utok_sub
lib t2 (oterm (Can c) tr_subterms) sub1 sub2
h5) as q.
repeat (autodimp q hyp); eauto 3 with slow.
{ unfold nrut_sub in nrut1; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut1; exact nrut1|]; auto. }
{ unfold nrut_sub in nrut2; repnd.
eapply subset_disjoint_r;[apply disjoint_sym in nrut2; exact nrut2|]; auto. }
exrepnd.
destruct w as [v|op'' bs''].
{
rw @cl_lsubst_lsubst_aux in q0; allsimpl; eauto 2 with slow.
remember (sub_find sub1 v) as sf; symmetry in Heqsf; destruct sf.
- apply sub_find_some in Heqsf.
eapply in_nrut_sub in Heqsf; eauto; exrepnd; subst.
apply alpha_eq_sym in q0; apply alpha_eq_mk_utoken in q0; subst.
inversion q0; subst; allsimpl; ginv.
- inversion q0.
}
rw @cl_lsubst_lsubst_aux in q0; eauto 2 with slow; allsimpl.
apply alpha_eq_oterm_combine2 in q0; repnd; subst.
allrw map_length; allsimpl; GC.
rw @cl_lsubst_lsubst_aux in q1; eauto 2 with slow; allsimpl.
apply alpha_eq_sym in q1; apply alpha_eq_oterm_implies_combine in q1; exrepnd; subst.
allrw map_length; GC.
allrw disjoint_app_r; allrw subset_app; repnd.
exists bs'0.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow; dands; auto.
assert (alpha_eq_bterms (lsubst_bterms_aux bs'' sub2) bs'0) as aebs1.
{ unfold alpha_eq_bterms, lsubst_bterms_aux; allrw map_length; dands; auto. }
assert (alpha_eq_bterms tl_subterms (lsubst_bterms_aux bs sub2)) as aebs2.
{ unfold alpha_eq_bterms, lsubst_bterms_aux; allrw map_length; dands; auto. }
assert (alpha_eq_bterms (lsubst_bterms_aux bs sub1) bs') as aebs3.
{ unfold alpha_eq_bterms, lsubst_bterms_aux; allrw map_length; dands; auto. }
assert (alpha_eq_bterms tr_subterms (lsubst_bterms_aux bs'' sub1)) as aebs4.
{ unfold alpha_eq_bterms, lsubst_bterms_aux; allrw map_length; dands; auto. }
assert (approx_or_bts lib bot2 bs' tr_subterms) as apr1 by sp.
pose proof (approx_or_bts_alpha_eq_bterms_l
lib bs' (lsubst_bterms_aux bs sub1) tr_subterms bot2) as apr2.
repeat (autodimp apr2 hyp); eauto 2 with slow.
pose proof (approx_or_bts_alpha_eq_bterms_r
lib (lsubst_bterms_aux bs sub1) (lsubst_bterms_aux bs'' sub1) tr_subterms bot2) as apr3.
repeat (autodimp apr3 hyp); eauto 2 with slow.
fold (approx_or_bts lib r tl_subterms bs'0).
eapply approx_or_bts_alpha_eq_bterms_l;[apply alpha_eq_bterms_sym;exact aebs2|].
eapply approx_or_bts_alpha_eq_bterms_r;[apply alpha_eq_bterms_sym;exact aebs1|].
clear h6 h0 q0 q6.
unfold approx_or_bts in apr3; unfold approx_or_bts.
unfold lblift in apr3; unfold lblift.
allrw @length_lsubst_bterms_aux; repnd; dands; auto.
introv i.
pose proof (apr3 n i) as bl; clear apr3.
repeat (rw @selectbt_lsubst_bterms_aux; auto; try omega).
repeat (rw @selectbt_lsubst_bterms_aux in bl; auto; try omega).
remember (selectbt bs n) as b1.
remember (selectbt bs'' n) as b2.
unfold blift in bl; exrepnd.
unfold blift.
pose proof (length_dom sub1) as el; rw eqdoms in el; rw @length_dom in el.
assert (disjoint (get_utokens_sub sub1) (get_utokens_b b1)) as d11.
{ (* using h4 *)
assert (subset (get_utokens_b b1) (get_utokens_bs bs)) as ss.
{ introv k; unfold get_utokens_bs; rw lin_flat_map.
exists (selectbt bs n); rw <- Heqb1; dands; auto.
rw Heqb1; apply selectbt_in; auto. }
eapply subset_disjoint_r;[|exact ss].
eapply subset_disjoint_r;[|exact h4].
eapply subset_disjoint_r;[|exact ss1].
unfold nrut_sub in nrut1; repnd; eauto with slow.
}
assert (disjoint (get_utokens_sub sub1) (get_utokens_b b2)) as d12.
{ (* using q4 *)
assert (subset (get_utokens_b b2) (get_utokens_bs bs'')) as ss.
{ introv k; unfold get_utokens_bs; rw lin_flat_map.
exists (selectbt bs'' n); rw <- Heqb2; dands; auto.
rw Heqb2; apply selectbt_in; auto; try omega. }
eapply subset_disjoint_r;[|exact ss].
eapply subset_disjoint_r;[|exact q4].
eapply subset_disjoint_r;[|exact ss2].
unfold nrut_sub in nrut1; repnd; eauto with slow.
}
(* XXXXXXXXXXXX *)
pose proof (alpha_eq_bterm_ren_utokens_b
(lsubst_bterm_aux b1 sub1)
(bterm lv nt1)
(nrut_subs_to_utok_ren sub1 sub2)
bl2)
as aeqr1.
rw @lsubst_aux_bterm_ren_utokens_b in aeqr1.
rw @ren_utokens_b_trivial in aeqr1;
[|erewrite @dom_utok_ren_nrut_subs_to_utok_ren; complete eauto].
pose proof (alpha_eq_bterm_ren_utokens_b
(lsubst_bterm_aux b2 sub1)
(bterm lv nt2)
(nrut_subs_to_utok_ren sub1 sub2)
bl0)
as aeqr2.
rw @lsubst_aux_bterm_ren_utokens_b in aeqr2.
rw @ren_utokens_b_trivial in aeqr2;
[|erewrite @dom_utok_ren_nrut_subs_to_utok_ren; complete eauto].
erewrite @ren_utokens_sub_nrut_subs_to_utok_ren in aeqr1; eauto.
erewrite @ren_utokens_sub_nrut_subs_to_utok_ren in aeqr2; eauto.
remember (nrut_subs_to_utok_ren sub1 sub2) as ren.
allsimpl.
exists lv (ren_utokens ren nt1) (ren_utokens ren nt2); dands; auto.
apply approx_open_simpler_equiv; eauto 3 with slow.
unfold simpl_olift; unfold olift in bl1; repnd.
prove_and ntwfsu1; eauto 2 with slow.
prove_and ntwfsu2; eauto 2 with slow.
introv wfs ispl1 ispl2.
(* rename the tokens of sub to fresh tokens *)
pose proof (ex_new_utok_ren
(remove_repeats (get_patom_deq o) (get_utokens_sub sub1 ++ get_utokens_sub sub2))
(get_utokens_sub sub1
++ get_utokens_sub sub2
++ get_utokens_sub sub
++ get_utokens nt1
++ get_utokens nt2))
as newut; exrepnd.
pose proof (bl1 (ren_utokens_sub ren0 sub)) as rr.
repeat (autodimp rr hyp); eauto 2 with slow.
{ apply wf_sub_ren_utokens_sub; eauto with slow. }
{ apply isprogram_lsubst_iff.
apply isprogram_lsubst_iff in ispl1; repnd.
apply nt_wf_ren_utokens_iff in ispl0; dands; auto.
introv j.
pose proof (ispl1 v) as k; rw @free_vars_ren_utokens in k; autodimp k hyp.
exrepnd.
rw @sub_find_ren_utokens_sub; rw k1; eexists; dands; eauto 2 with slow.
unfold closed; rw @free_vars_ren_utokens; auto.
}
{ apply isprogram_lsubst_iff.
apply isprogram_lsubst_iff in ispl2; repnd.
apply nt_wf_ren_utokens_iff in ispl0; dands; auto.
introv j.
pose proof (ispl2 v) as k; rw @free_vars_ren_utokens in k; autodimp k hyp.
exrepnd.
rw @sub_find_ren_utokens_sub; rw k1; eexists; dands; eauto 2 with slow.
unfold closed; rw @free_vars_ren_utokens; auto.
}
repndors; tcsp.
pose proof (pull_out_atoms
nt1
(get_utokens_sub sub1)
(free_vars nt2
++ (sub_free_vars (ren_utokens_sub ren0 sub))
++ dom_sub sub)) as pullout.
autodimp pullout hyp; eauto 2 with slow.
exrepnd.
allrw disjoint_app_l; repnd.
rw remove_repeats_if_no_repeats in pullout1; eauto 2 with slow.
pose proof (pull_out_nrut_sub nt2 sub0 (get_utokens u)) as pullout'.
repeat (autodimp pullout' hyp); eauto 2 with slow.
exrepnd.
pose proof (pull_out_atoms_sub
(ren_utokens_sub ren0 sub)
(range_utok_ren ren0)
(allvars u ++ allvars u0 ++ dom_sub sub0)) as pullout''.
autodimp pullout'' hyp.
{ apply wf_sub_ren_utokens_sub; eauto with slow. }
exrepnd.
allrw disjoint_app_l; repnd.
rw remove_repeats_if_no_repeats in pullout''1; auto.
pose proof (respects_alpha_r_approx_aux_bot2 lib) as respr.
unfold respects2_r in respr.
pose proof (respr
(lsubst nt1 (ren_utokens_sub ren0 sub))
(lsubst nt2 (ren_utokens_sub ren0 sub))
(lsubst (lsubst u0 sub0) (lsubst_sub s' sub3)))
as rer; clear respr.
repeat (autodimp rer hyp).
{ apply lsubst_alpha_congr3; auto. }
pose proof (respects_alpha_l_approx_aux_bot2 lib) as respl.
unfold respects2_l in respl.
pose proof (respl
(lsubst nt1 (ren_utokens_sub ren0 sub))
(lsubst (lsubst u0 sub0) (lsubst_sub s' sub3))
(lsubst (lsubst u sub0) (lsubst_sub s' sub3)))
as rel; clear respl.
repeat (autodimp rel hyp).
{ apply lsubst_alpha_congr3; auto. }
clear rer.
assert (subset (sub_free_vars s') (dom_sub sub3)) as ss.
{ introv is.
rw @cl_lsubst_sub_eq_lsubst_aux_sub in pullout''5; eauto 3 with slow.
applydup @alphaeq_sub_preserves_free_vars in pullout''5 as fvs.
rw @sub_free_vars_ren_utokens_sub in fvs.
rw @cl_sub_free_vars_lsubst_aux_sub in fvs; eauto 2 with slow.
rw (sub_free_vars_if_cl_sub sub) in fvs; eauto 2 with slow.
symmetry in fvs; apply null_iff_nil in fvs.
pose proof (in_deq _ deq_nvar x (dom_sub sub3)) as [d|d]; auto.
provefalse.
pose proof (fvs x) as h; rw in_remove_nvars in h; sp. }
applydup @alphaeq_sub_implies_eq_doms in pullout''5 as eqd.
rw @dom_sub_ren_utokens_sub in eqd.
rw @dom_sub_lsubst_sub in eqd.
applydup @alphaeq_sub_preserves_cl_sub in pullout''5; eauto 3 with slow.
pose proof (cl_lsubst_lsubst_lsubst_sub u s' sub0 sub3) as a1.
rw <- eqd in a1.
repeat (autodimp a1 hyp); eauto 4 with slow.
pose proof (cl_lsubst_lsubst_lsubst_sub u0 s' sub0 sub3) as a2.
rw <- eqd in a2.
repeat (autodimp a2 hyp); eauto 4 with slow.
pose proof (respects_alpha_r_approx_aux_bot2 lib) as respr.
unfold respects2_r in respr.
pose proof (respr
(lsubst (lsubst u sub0) (lsubst_sub s' sub3))
(lsubst (lsubst u0 sub0) (lsubst_sub s' sub3))
(lsubst (lsubst u0 s') (sub0 ++ sub3)))
as rer; clear respr.
repeat (autodimp rer hyp).
clear rel.
pose proof (respects_alpha_l_approx_aux_bot2 lib) as respl.
unfold respects2_l in respl.
pose proof (respl
(lsubst (lsubst u sub0) (lsubst_sub s' sub3))
(lsubst (lsubst u0 s') (sub0 ++ sub3))
(lsubst (lsubst u s') (sub0 ++ sub3)))
as rel; clear respl.
repeat (autodimp rel hyp).
clear rer.
right.
apply hr.
exists (lsubst u s') (lsubst u0 s') (sub0 ++ sub3).
Print nrut_sub.
XXXXXXXXXXXXX
pose proof (pull_out_nrut_sub nt1 sub1 l) as pont1.
repeat (autodimp pont1 hyp); eauto 3 with slow.
XXXXXXXXX
pose proof (simple_lsubst_lsubst_sub_aeq4 u1 sub2 sub) as swap1.
repeat (autodimp swap1 hyp); eauto 3 with slow.
pose proof (simple_lsubst_lsubst_sub_aeq4 u2 sub2 sub) as swap2.
repeat (autodimp swap2 hyp); eauto 3 with slow.
repeat (rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow).
rw swap1; rw swap2; clear swap1 swap2.
(* do I have to remove lv? *)
pose proof (bl1 (ren_utokens_sub (nrut_subs_to_utok_ren sub2 sub1) sub)) as rr.
repeat (autodimp rr hyp); eauto 2 with slow.
{ apply wf_sub_ren_utokens_sub; eauto with slow. }
{
}
XXXXXXXXXXXx
pose proof (fresh_vars (length lv)
(lv ++ free_vars nt1
++ free_vars nt2
++ bound_vars nt1
++ bound_vars nt2
++ dom_sub sub1
++ dom_sub sub2)) as fvs.
exrepnd.
allrw disjoint_app_r; repnd.
pose proof (pull_out_nrut_sub_b_aux (bterm lv nt1) sub1 l lvn) as po1.
allsimpl; repeat (autodimp po1 hyp); eauto 3 with slow.
{ apply alpha_eq_bterm_preserves_wf_bterm in bl2; auto.
apply wf_bterm_lsubst_bterm_aux; eauto 2 with slow.
rw Heqb1; apply wf_bterm_selectbt.
apply (wf_bterms_lsubst_bterms_aux_implies bs sub2).
apply alpha_eq_bterms_preserves_wf_bterms in aebs2; auto.
unfold computes_to_value in comp; repnd.
apply compute_max_steps_eauto2 in comp.
apply isprogram_implies_wf in comp.
apply wf_oterm_iff in comp; repnd; auto.
}
{ simpl.
apply alpha_eq_bterm_preserves_free_vars in bl2; allsimpl; rw <- bl2.
erewrite @free_vars_bterm_lsubst_bterm_aux_nrut_sub; eauto.
apply disjoint_remove_nvars_l.
assert (subset (free_vars_bterm b1) (free_vars_bterms bs)) as ss.
{ unfold free_vars_bterms.
apply subsetSingleFlatMap; rw Heqb1.
apply selectbt_in; auto. }
eapply subset_disjoint;[exact ss|].
apply disjoint_remove_nvars_l.
rw eqdoms.
erewrite <- free_vars_bterms_lsubst_bterms_aux_nrut_sub; eauto.
apply alpha_eq_bterms_preserves_free_vars in aebs2; rw <- aebs2.
unfold computes_to_value in comp; repnd.
apply compute_max_steps_eauto2 in comp.
destruct comp as [cl wf].
apply closed_oterm_iff1 in cl.
apply null_iff_nil in cl; rw cl; auto.
}
{ apply disjoint_app_r; dands; auto. }
exrepnd.
pose proof (pull_out_nrut_sub_b_aux (bterm lv nt2) sub1 l lvn) as qo1.
allsimpl; repeat (autodimp qo1 hyp); eauto 3 with slow.
{ apply alpha_eq_bterm_preserves_wf_bterm in bl0; auto.
apply wf_bterm_lsubst_bterm_aux; eauto 2 with slow.
rw Heqb2; apply wf_bterm_selectbt.
apply (wf_bterms_lsubst_bterms_aux_implies bs'' sub1).
apply alpha_eq_bterms_preserves_wf_bterms in aebs4; auto.
unfold computes_to_value in h5; repnd.
apply compute_max_steps_eauto2 in h5.
apply isprogram_implies_wf in h5.
apply wf_oterm_iff in h5; repnd; auto.
}
{ simpl.
apply alpha_eq_bterm_preserves_free_vars in bl0; allsimpl; rw <- bl0.
erewrite @free_vars_bterm_lsubst_bterm_aux_nrut_sub; eauto.
apply disjoint_remove_nvars_l.
assert (subset (free_vars_bterm b2) (free_vars_bterms bs'')) as ss.
{ unfold free_vars_bterms.
apply subsetSingleFlatMap; rw Heqb2.
apply selectbt_in; auto; try omega. }
eapply subset_disjoint;[exact ss|].
apply disjoint_remove_nvars_l.
erewrite <- free_vars_bterms_lsubst_bterms_aux_nrut_sub; eauto.
apply alpha_eq_bterms_preserves_free_vars in aebs4; rw <- aebs4.
unfold computes_to_value in h5; repnd.
apply compute_max_steps_eauto2 in h5.
destruct h5 as [cl wf].
apply closed_oterm_iff1 in cl.
apply null_iff_nil in cl; rw cl; auto.
}
{ apply disjoint_app_r; dands; auto. }
exrepnd.
assert (alpha_eq_bterm (lsubst_bterm_aux b1 sub1) (lsubst_bterm_aux u sub1)) as aeq1 by eauto 2 with slow.
assert (alpha_eq_bterm (lsubst_bterm_aux b2 sub1) (lsubst_bterm_aux u0 sub1)) as aeq2 by eauto 2 with slow.
apply (alpha_eq_bterm_ren_utokens_b _ _ (nrut_subs_to_utok_ren sub1 sub2)) in aeq1.
apply (alpha_eq_bterm_ren_utokens_b _ _ (nrut_subs_to_utok_ren sub1 sub2)) in aeq2.
repeat (rw @lsubst_aux_bterm_ren_utokens_b in aeq1).
repeat (rw @lsubst_aux_bterm_ren_utokens_b in aeq2).
repeat (rw @ren_utokens_b_trivial in aeq1;
[|erewrite @dom_utok_ren_nrut_subs_to_utok_ren; complete eauto]).
repeat (rw @ren_utokens_b_trivial in aeq2;
[|erewrite @dom_utok_ren_nrut_subs_to_utok_ren; complete eauto]).
erewrite @ren_utokens_sub_nrut_subs_to_utok_ren in aeq1; eauto.
erewrite @ren_utokens_sub_nrut_subs_to_utok_ren in aeq2; eauto.
destruct u as [vs u1].
destruct u0 as [vs' u2].
allsimpl.
subst vs vs'.
repeat (onerw (sub_filter_disjoint1 sub1 lvn); eauto 2 with slow).
repeat (onerw (sub_filter_disjoint1 sub2 lvn); eauto 2 with slow).
exists lvn (lsubst_aux u1 sub2) (lsubst_aux u2 sub2); dands; auto.
apply approx_open_simpler_equiv; eauto 3 with slow.
unfold simpl_olift; unfold olift in bl1; repnd.
prove_and ntwfsu1.
{
apply nt_wf_eq; apply lsubst_aux_preserves_wf_term2; eauto 2 with slow.
apply alphaeqbt_preserves_nt_wf in po1.
repeat (rw @nt_wf_eq in po1); rw @nt_wf_eq in bl3; apply po1 in bl3.
rw <- @cl_lsubst_lsubst_aux in bl3; eauto 2 with slow.
apply lsubst_wf_term in bl3; auto.
}
prove_and ntwfsu2.
{
apply nt_wf_eq; apply lsubst_aux_preserves_wf_term2; eauto 2 with slow.
apply alphaeqbt_preserves_nt_wf in qo1.
repeat (rw @nt_wf_eq in qo1); rw @nt_wf_eq in bl4; apply qo1 in bl4.
rw <- @cl_lsubst_lsubst_aux in bl4; eauto 2 with slow.
apply lsubst_wf_term in bl4; auto.
}
assert (isprogram_bt (bterm lv nt1)) as isplvnt1.
{ apply alpha_eq_bterm_preserves_isprogram_bt in bl2; auto.
apply preserve_program in comp; auto;[|complete unflsubst].
apply isprogram_ot_iff in comp; repnd.
unfold alpha_eq_bterms in aebs2; repnd.
pose proof (aebs2 (selectbt tl_subterms n) (selectbt (lsubst_bterms_aux bs sub2) n)) as h.
unfold selectbt, lsubst_bterms_aux in h.
autodimp h hyp.
{ apply in_nth_combine; allrw map_length; auto; try omega. }
rw (@map_nth2 (@BTerm o) (@BTerm o) (@default_bt o)) in h; tcsp.
unfold selectbt in Heqb1; rw <- Heqb1 in h.
pose proof (comp (nth n tl_subterms default_bt)) as k.
autodimp k hyp.
{ apply nth_in; auto; try omega. }
apply alpha_eq_bterm_preserves_isprogram_bt in h; auto.
}
assert (isprogram_bt (bterm lv nt2)) as isplvnt2.
{
}
introv wfs ispl1 ispl2.
pose proof (simple_lsubst_lsubst_sub_aeq4 u1 sub2 sub) as swap1.
repeat (autodimp swap1 hyp); eauto 3 with slow.
pose proof (simple_lsubst_lsubst_sub_aeq4 u2 sub2 sub) as swap2.
repeat (autodimp swap2 hyp); eauto 3 with slow.
repeat (rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow).
rw swap1; rw swap2; clear swap1 swap2.
(* rename lvn into lv in the domain of this substitution *)
pose proof (bl1 (ren_utokens_sub (nrut_subs_to_utok_ren sub2 sub1) sub)) as rr.
repeat (autodimp rr hyp); eauto 2 with slow.
{ apply wf_sub_ren_utokens_sub; eauto with slow. }
{
}
SearchAbout wf_sub ren_utokens_sub.
SearchAbout (lsubst (lsubst _ _) _).
(* replace sub2 by sub1 in sub --> sub'
instantiate bl1 using sub'
pull out sub1 from sub' and sub2 from sub
use IH
*)
Qed.
*)
Require Import sqle.
Lemma approx_change_utoks {o} :
forall lib (t1 t2 : @NTerm o) ren,
no_repeats (range_utok_ren ren)
-> no_repeats (dom_utok_ren ren)
-> disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t1))
-> disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t2))
-> approx lib t1 t2
-> approx lib (ren_utokens ren t1) (ren_utokens ren t2).
Proof.
introv nr1 nr2 d1 d2 apr.
allrw @approx_sqle.
allunfold @sqle.
intro m.
pose proof (apr m) as h; clear apr.
revert t1 t2 ren nr1 nr2 d1 d2 h.
induction m; introv norep1 norep2 disj1 disj2 apr.
{
inversion apr; subst.
constructor; eauto with slow.
}
constructor.
(*inversion apr as [? ? ? cl]; subst; clear apr.*)
inversion apr as [|? ? ? cl]; clear apr; subst.
allunfold @close_comput; repnd; dands; tcsp; eauto with slow; introv comp.
- clear cl3 cl.
dup comp as comp1.
apply (computes_to_value_ren_utokens _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
eauto 3 with slow;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; auto.
rw @ren_utokens_can in comp1.
dup comp1 as comp2.
apply cl2 in comp2; exrepnd.
dup comp2 as comp22.
apply (computes_to_value_ren_utokens _ _ _ ren) in comp2; eauto 3 with slow;[].
rw @ren_utokens_can in comp2.
assert (match
get_utok_c
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
with
| Some a => NUTok (ren_atom ren a)
| None =>
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
end = c) as e.
{ destruct c; allsimpl; tcsp.
rw @inv_ren_atom2; auto.
apply computes_to_value_preserves_utokens in comp; allsimpl; eauto 3 with slow.
rw subset_cons_l in comp; repnd.
intro i.
rw @get_utokens_ren_utokens in comp3.
rw in_map_iff in comp3; exrepnd; subst.
rw in_diff in i; repnd.
destruct (ren_atom_or ren a) as [d|d]; tcsp.
rw d in i0.
apply in_dom_in_range in i0; auto.
}
rw e in comp2; clear e.
eexists; dands;[exact comp2|].
unfold lblift; unfold lblift in comp0.
allrw map_length; repnd; dands; auto.
introv i.
applydup comp0 in i.
unfold blift; unfold blift in i0.
exrepnd.
repeat (onerw @selectbt_map; auto; try omega).
remember (selectbt tl_subterms n) as b1.
remember (selectbt tr_subterms n) as b2.
applydup @computes_to_value_preserves_utokens in comp as ss1; eauto 3 with slow.
eapply (subset_trans _ (get_utokens_b b1)) in ss1;
[|simpl; apply subset_app_l;
introv k; rw lin_flat_map; exists b1;
dands; auto; subst; apply selectbt_in;
complete auto].
apply (subset_map_map (ren_atom (inv_utok_ren ren))) in ss1.
rw <- @get_utokens_b_ren_utokens_b in ss1.
rw <- @get_utokens_ren_utokens in ss1.
rw @inv_ren_utokens in ss1; auto.
applydup @alpha_eq_bterm_preserves_utokens in i2 as put1; allsimpl.
rw put1 in ss1; clear put1.
applydup @computes_to_value_preserves_utokens in comp22 as ss2; allsimpl; eauto 3 with slow.
eapply (subset_trans _ (get_utokens_b b2)) in ss2;
[|simpl; apply subset_app_l;
introv k; rw lin_flat_map; exists b2;
dands; auto; subst; apply selectbt_in; complete omega].
applydup @alpha_eq_bterm_preserves_utokens in i1 as put2; allsimpl.
rw put2 in ss2; clear put2.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i1.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i2.
assert (disjoint (dom_utok_ren ren)
(diff (get_patom_deq o)
(range_utok_ren ren)
(get_utokens_b b1))) as d.
{
apply computes_to_value_preserves_utokens in comp; allsimpl; eauto 3 with slow.
allrw subset_app; repnd.
assert (LIn b1 tl_subterms) as itl.
{ subst b1; unfold selectbt; apply nth_in; auto. }
rw subset_flat_map in comp; apply comp in itl; clear comp.
rw <- disjoint_diff_l.
eapply subset_disjoint_r;[|exact itl].
rw @get_utokens_ren_utokens.
rw disjoint_diff_l.
apply disjoint_dom_diff_range_map_ren_atom.
}
rw @inv_ren_utokens_b2 in i2; auto.
allsimpl.
exists lv (ren_utokens ren nt1) (ren_utokens ren nt2); dands; auto.
unfold olift; unfold olift in i0; repnd.
dands.
{ apply nt_wf_ren_utokens; auto. }
{ apply nt_wf_ren_utokens; auto. }
introv wfs isp1 isp2.
pose proof (ex_ren_utokens_sub
sub
ren
(get_utokens nt1 ++ get_utokens nt2)) as exren.
autodimp exren hyp; exrepnd.
pose proof (i0 sub') as h.
repeat (autodimp h hyp).
{ subst; apply wf_sub_ren_utokens_sub_iff in wfs; auto. }
{ subst; apply isprogram_lsubst_iff in isp1; repnd.
apply isprogram_lsubst_iff.
rw @nt_wf_ren_utokens_iff in isp0; dands; auto.
introv j.
rw @free_vars_ren_utokens in isp1.
apply isp1 in j; exrepnd.
allrw @sub_find_ren_utokens_sub.
remember (sub_find sub' v) as sf; symmetry in Heqsf; destruct sf; ginv.
eexists; dands; eauto.
- apply nt_wf_ren_utokens_iff in j2; auto.
- unfold closed in j0; rw @free_vars_ren_utokens in j0; auto. }
{ subst; apply isprogram_lsubst_iff in isp2; repnd.
apply isprogram_lsubst_iff.
rw @nt_wf_ren_utokens_iff in isp0; dands; auto.
introv j.
rw @free_vars_ren_utokens in isp2.
apply isp2 in j; exrepnd.
allrw @sub_find_ren_utokens_sub.
remember (sub_find sub' v) as sf; symmetry in Heqsf; destruct sf; ginv.
eexists; dands; eauto.
- apply nt_wf_ren_utokens_iff in j2; auto.
- unfold closed in j0; rw @free_vars_ren_utokens in j0; auto. }
pose proof (IHm (lsubst nt1 sub') (lsubst nt2 sub') (ren ++ ren')) as sqn.
allrw @range_utok_ren_app.
allrw @dom_utok_ren_app.
allrw no_repeats_app.
allrw disjoint_app_l.
allrw disjoint_app_r.
repnd.
repeat (autodimp sqn hyp); dands; eauto 3 with slow.
{ introv a b; applydup disj1 in a.
allrw in_diff; allrw in_app_iff; allrw not_over_or; repnd.
apply get_utokens_lsubst in b0; allrw in_app_iff; repndors; tcsp.
apply in_get_utokens_sub in b0; exrepnd.
apply in_sub_keep_first in b4; repnd.
pose proof (exren1 t) as hh.
repeat (autodimp hh hyp).
rw lin_flat_map; apply sub_find_some in b5; apply in_sub_eta in b5; repnd.
eexists; dands; eauto.
}
{ eapply subset_disjoint;[exact exren4|].
apply disjoint_app_l; dands.
{ apply disjoint_diff_l; rw diff_nil_if_subset; eauto with slow. }
{ apply disjoint_diff_l; rw diff_nil_if_subset; eauto with slow. }
}
{ introv a b; applydup disj2 in a.
allrw in_diff; allrw in_app_iff; allrw not_over_or; repnd.
apply get_utokens_lsubst in b0; allrw in_app_iff; repndors; tcsp.
apply in_get_utokens_sub in b0; exrepnd.
apply in_sub_keep_first in b4; repnd.
pose proof (exren1 t) as hh.
repeat (autodimp hh hyp).
rw lin_flat_map; apply sub_find_some in b5; apply in_sub_eta in b5; repnd.
eexists; dands; eauto.
}
{ eapply subset_disjoint;[exact exren4|].
apply disjoint_app_l; dands.
{ apply disjoint_diff_l; rw diff_nil_if_subset; eauto with slow. }
{ apply disjoint_diff_l; rw diff_nil_if_subset; eauto with slow. }
}
{ repeat (rw @lsubst_ren_utokens in sqn).
rw exren0 in sqn.
repeat (rw @ren_utokens_app_weak_l in sqn; eauto 2 with slow).
}
- clear cl2 cl.
dup comp as comp1.
apply (computes_to_exception_ren_utokens _ _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
eauto 3 with slow;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; auto.
dup comp1 as comp2.
apply cl3 in comp2; exrepnd.
dup comp0 as comp00.
apply (computes_to_exception_ren_utokens _ _ _ _ ren) in comp0; eauto 3 with slow.
eexists; eexists; dands;[exact comp0|idtac|].
{
pose proof (IHm (ren_utokens (inv_utok_ren ren) a) a' ren) as h.
repeat (autodimp h hyp).
{ apply computes_to_exception_preserves_utokens in comp1; repnd; eauto 3 with slow.
introv i j; allrw in_diff; repnd.
apply comp4 in j0.
apply disj1 in i; allrw in_diff; sp.
}
{ apply computes_to_exception_preserves_utokens in comp00; repnd; eauto 3 with slow.
introv i j; allrw in_diff; repnd.
apply comp01 in j0.
apply disj2 in i; allrw in_diff; sp.
}
rw @inv_ren_utokens2 in h; auto.
apply computes_to_exception_preserves_utokens in comp; repnd; eauto 3 with slow.
introv i j.
rw @get_utokens_ren_utokens in comp4.
apply (disjoint_dom_diff_range_map_ren_atom (get_utokens t1)) in i; destruct i.
allrw in_diff; repnd; dands; auto.
}
{
pose proof (IHm (ren_utokens (inv_utok_ren ren) e) e' ren) as h.
repeat (autodimp h hyp).
{ apply computes_to_exception_preserves_utokens in comp1; repnd; eauto 3 with slow.
introv i j; allrw in_diff; repnd.
apply comp1 in j0.
apply disj1 in i; allrw in_diff; sp.
}
{ apply computes_to_exception_preserves_utokens in comp00; repnd; eauto 3 with slow.
introv i j; allrw in_diff; repnd.
apply comp00 in j0.
apply disj2 in i; allrw in_diff; sp.
}
rw @inv_ren_utokens2 in h; auto.
apply computes_to_exception_preserves_utokens in comp; repnd; eauto 3 with slow.
introv i j.
rw @get_utokens_ren_utokens in comp.
apply (disjoint_dom_diff_range_map_ren_atom (get_utokens t1)) in i; destruct i.
allrw in_diff; repnd; dands; auto.
}
(*
- clear cl2 cl2.
dup comp as comp1.
apply (computes_to_marker_ren_utokens _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
auto;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; auto.
dup comp1 as comp2.
apply cl in comp2; exrepnd.
dup comp2 as comp22.
apply (computes_to_marker_ren_utokens _ _ _ ren) in comp2; auto.
*)
- dup comp as comp1.
apply (reduces_to_ren_utokens _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
eauto 3 with slow;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; allsimpl; auto.
dup comp1 as comp2.
apply cl4 in comp2; exrepnd.
dup comp0 as comp00.
apply (reduces_to_ren_utokens _ _ _ ren) in comp2; eauto 3 with slow; allsimpl.
eexists; dands; eauto.
Qed.
(*
XXXXXXXXXXXXXXX
Lemma approx_change_utoks {o} :
forall lib (t1 t2 : @NTerm o) ren,
no_repeats (range_utok_ren ren)
-> no_repeats (dom_utok_ren ren)
-> disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t1))
-> disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t2))
-> approx lib t1 t2
-> approx lib (ren_utokens ren t1) (ren_utokens ren t2).
Proof.
intro lib.
(*
cofix IND.
introv nr1 nr2 disj1 disj2 apr.
*)
pose proof
(approx_acc
lib
(fun a b => {t1,t2 : NTerm
$ {ren : utok_ren
$ approx lib t1 t2
# no_repeats (range_utok_ren ren)
# no_repeats (dom_utok_ren ren)
# disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t1))
# disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t2))
# a = ren_utokens ren t1
# b = ren_utokens ren t2}})
(@bot2 o)) as HH.
allsimpl.
match goal with
[ HH : _ -> ?B |- _ ] =>
assert B as h;
[|introv nr1 nr2 d1 d2 k; eapply h;
eexists;eexists;eexists;dands;eauto;fail]
end.
apply HH; clear HH.
introv hb hr h; exrepnd; subst.
rename h1 into apr.
rename h2 into norep1.
rename h3 into norep2.
rename h4 into disj1.
rename h5 into disj2.
constructor.
(*inversion apr as [? ? ? cl]; subst; clear apr.*)
inversion apr as [cl]; clear apr.
allunfold @close_comput; repnd; dands; tcsp; eauto with slow; introv comp.
- clear cl3 cl.
dup comp as comp1.
apply (computes_to_value_ren_utokens _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
auto;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; auto.
rw @ren_utokens_can in comp1.
dup comp1 as comp2.
apply cl2 in comp2; exrepnd.
apply (computes_to_value_ren_utokens _ _ _ ren) in comp2; auto.
rw @ren_utokens_can in comp2.
assert (match
get_utok_c
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
with
| Some a => NUTok (ren_atom ren a)
| None =>
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
end = c) as e.
{ destruct c; allsimpl; tcsp.
rw @inv_ren_atom2; auto.
apply computes_to_value_preserves_utokens in comp; allsimpl.
rw subset_cons_l in comp; repnd.
intro i.
rw @get_utokens_ren_utokens in comp3.
rw in_map_iff in comp3; exrepnd; subst.
rw in_diff in i; repnd.
destruct (ren_atom_or ren a) as [d|d]; tcsp.
rw d in i0.
apply in_dom_in_range in i0; auto.
}
rw e in comp2; clear e.
eexists; dands;[exact comp2|].
unfold lblift; unfold lblift in comp0.
allrw map_length; repnd; dands; auto.
introv i.
applydup comp0 in i.
unfold blift; unfold blift in i0.
exrepnd.
repeat (onerw @selectbt_map; auto; try omega).
remember (selectbt tl_subterms n) as b1.
remember (selectbt tr_subterms n) as b2.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i1.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i2.
assert (disjoint (dom_utok_ren ren)
(diff (get_patom_deq o)
(range_utok_ren ren)
(get_utokens_b b1))) as d.
{
(* clear IND.*)
admit.
}
rw @inv_ren_utokens_b2 in i2; auto.
allsimpl.
exists lv (ren_utokens ren nt1) (ren_utokens ren nt2); dands; auto.
unfold olift; unfold olift in i0; repnd.
dands.
{ apply nt_wf_ren_utokens; auto. }
{ apply nt_wf_ren_utokens; auto. }
introv wfs isp1 isp2.
(*
Lemma ren_utokens_lsubst_aux_approx {o} :
forall lib (t1 t2 : @NTerm o) ren sub,
prog_sub sub
-> no_repeats (range_utok_ren ren)
-> no_repeats (dom_utok_ren ren)
-> disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t1))
-> disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t2))
-> (forall sub, prog_sub sub -> approx lib (lsubst_aux t1 sub) (lsubst_aux t2 sub))
-> approx lib (lsubst_aux (ren_utokens ren t1) sub) (lsubst_aux (ren_utokens ren t2) sub).
Proof.
intro lib.
pose proof
(approx_acc
lib
(fun a b => {t1,t2 : NTerm
$ {ren : utok_ren
$ {sub : Sub
$ prog_sub sub
# no_repeats (range_utok_ren ren)
# no_repeats (dom_utok_ren ren)
# disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t1))
# disjoint (range_utok_ren ren) (diff (get_patom_deq o) (dom_utok_ren ren) (get_utokens t2))
# (forall sub, prog_sub sub -> approx lib (lsubst_aux t1 sub) (lsubst_aux t2 sub))
# a = lsubst_aux (ren_utokens ren t1) sub
# b = lsubst_aux (ren_utokens ren t2) sub}}})
(@bot2 o)) as HH.
allsimpl.
match goal with
[ HH : _ -> ?B |- _ ] =>
assert B as h;
[|introv ps nr1 nr2 d1 d2 k; eapply h;exists t1 t2 ren sub;dands;eauto;fail]
end;[].
apply HH; clear HH.
introv hb hr h; exrepnd; subst.
rename h0 into ps.
rename h2 into nr1.
rename h3 into nr2.
rename h4 into disj1.
rename h5 into disj2.
rename h6 into imp.
constructor.
allunfold @close_comput; repnd; dands; tcsp.
- pose proof (imp sub ps) as h.
apply approx_relates_only_progs in h; repnd.
rw <- @cl_lsubst_lsubst_aux in h0; eauto 2 with slow.
applydup @lsubst_program_implies in h0.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow.
apply isprogram_lsubst_iff in h0; repnd.
apply isprogram_lsubst_if_isprog_sub; eauto 3 with slow.
rw @free_vars_ren_utokens; auto.
- pose proof (imp sub ps) as h.
apply approx_relates_only_progs in h; repnd.
rw <- @cl_lsubst_lsubst_aux in h; eauto 2 with slow.
applydup @lsubst_program_implies in h.
rw <- @cl_lsubst_lsubst_aux; eauto 2 with slow.
apply isprogram_lsubst_iff in h; repnd.
apply isprogram_lsubst_if_isprog_sub; eauto 3 with slow.
rw @free_vars_ren_utokens; auto.
- introv comp.
clear cl3 cl.
dup comp as comp1.
apply (computes_to_value_ren_utokens _ _ _ (inv_utok_ren ren)) in comp1;
allrw @range_utok_ren_inv_utok_ren;
allrw @dom_utok_ren_inv_utok_ren;
auto;[|rw @get_utokens_ren_utokens; apply disjoint_dom_diff_range_map_ren_atom].
rw @inv_ren_utokens in comp1; auto.
rw @ren_utokens_can in comp1.
dup comp1 as comp2.
apply cl2 in comp2; exrepnd.
apply (computes_to_value_ren_utokens _ _ _ ren) in comp2; auto.
rw @ren_utokens_can in comp2.
assert (match
get_utok_c
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
with
| Some a => NUTok (ren_atom ren a)
| None =>
match get_utok_c c with
| Some a => NUTok (ren_atom (inv_utok_ren ren) a)
| None => c
end
end = c) as e.
{ destruct c; allsimpl; tcsp.
rw @inv_ren_atom2; auto.
apply computes_to_value_preserves_utokens in comp; allsimpl.
rw subset_cons_l in comp; repnd.
intro i.
rw @get_utokens_ren_utokens in comp3.
rw in_map_iff in comp3; exrepnd; subst.
rw in_diff in i; repnd.
destruct (ren_atom_or ren a) as [d|d]; tcsp.
rw d in i0.
apply in_dom_in_range in i0; auto.
}
rw e in comp2; clear e.
eexists; dands;[exact comp2|].
unfold lblift; unfold lblift in comp0.
allrw map_length; repnd; dands; auto.
introv i.
applydup comp0 in i.
unfold blift; unfold blift in i0.
exrepnd.
repeat (onerw @selectbt_map; auto; try omega).
remember (selectbt tl_subterms n) as b1.
remember (selectbt tr_subterms n) as b2.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i1.
apply (alpha_eq_bterm_ren_utokens_b _ _ ren) in i2.
assert (disjoint (dom_utok_ren ren)
(diff (get_patom_deq o)
(range_utok_ren ren)
(get_utokens_b b1))) as d.
{
(* clear IND.*)
admit.
}
rw @inv_ren_utokens_b2 in i2; auto.
allsimpl.
exists lv (ren_utokens ren nt1) (ren_utokens ren nt2); dands; auto.
unfold olift; unfold olift in i0; repnd.
dands.
{ apply nt_wf_ren_utokens; auto. }
{ apply nt_wf_ren_utokens; auto. }
introv wfs isp1 isp2.
Qed.
*)
pose proof (ex_new_utok_ren
(dom_utok_ren ren)
(dom_utok_ren ren
++ range_utok_ren ren
++ get_utokens_sub sub
++ get_utokens nt1
++ get_utokens nt2)) as h.
destruct h as [ren' h]; repnd.
allrw disjoint_app_l; repnd.
pose proof (lsubst_ren_utokens2 nt1 ren ren' sub) as e1.
repeat (autodimp e1 hyp); eauto 3 with slow.
pose proof (lsubst_ren_utokens2 nt2 ren ren' sub) as e2.
repeat (autodimp e2 hyp); eauto 3 with slow.
pose proof (ren_utokens_ren_utokens
(lsubst nt1 (ren_utokens_sub ren' sub))
(inv_utok_ren ren')
ren) as f1.
rw @compose_ren_utokens_trivial in f1;
[|rw @dom_utok_ren_inv_utok_ren; eauto 2 with slow].
pose proof (ren_utokens_ren_utokens
(lsubst nt2 (ren_utokens_sub ren' sub))
(inv_utok_ren ren')
ren) as f2.
rw @compose_ren_utokens_trivial in f2;
[|rw @dom_utok_ren_inv_utok_ren; eauto 2 with slow].
rw <- f1 in e1; rw <- f2 in e2; clear f1 f2.
rewrite e1, e2; clear e1 e2.
pose proof (i0 (ren_utokens_sub ren' sub)) as q; clear i0.
repeat (autodimp q hyp); eauto 2 with slow.
{ apply isprogram_lsubst_iff in isp1; repnd.
apply isprogram_lsubst_iff.
rw @nt_wf_ren_utokens_iff in isp0; dands; auto.
introv j.
rw @free_vars_ren_utokens in isp1.
apply isp1 in j; exrepnd.
rw @sub_find_ren_utokens_sub; rw j1.
eexists; dands; eauto.
- apply nt_wf_ren_utokens; auto.
- unfold closed; rw @free_vars_ren_utokens; auto. }
{ apply isprogram_lsubst_iff in isp2; repnd.
apply isprogram_lsubst_iff.
rw @nt_wf_ren_utokens_iff in isp0; dands; auto.
introv j.
rw @free_vars_ren_utokens in isp2.
apply isp2 in j; exrepnd.
rw @sub_find_ren_utokens_sub; rw j1.
eexists; dands; eauto.
- apply nt_wf_ren_utokens; auto.
- unfold closed; rw @free_vars_ren_utokens; auto. }
repndors; tcsp; try (complete (allunfold @bot2; sp)).
Theorem approx_acc {p} :
forall (lib : library)
(l r0 : bin_rel (@NTerm p))
(OBG: forall (r: bin_rel NTerm)
(INC: r0 =2> r)
(CIH: l =2> r),
l =2> approx_aux lib r),
l =2> approx_aux lib r0.
Proof.
intros.
assert (SIM: approx_aux lib (r0 \2/ l) x0 x1) by auto.
clear PR; revert x0 x1 SIM; cofix CIH.
intros; destruct SIM; econstructor; eauto.
invertsna c Hcl. repnd.
unfold close_comput.
dands; eauto.
- introv Hcomp.
apply Hcl2 in Hcomp.
exrepnd. exists tr_subterms. split; eauto.
eapply le_lblift2; eauto.
apply le_olift.
unfold le_bin_rel.
introv Hap.
dorn Hap; spc.
- introv Hcomp.
apply Hcl3 in Hcomp; exrepnd.
exists a' e'; dands; auto; repdors; auto.
Qed.
(*
pose proof
(hr
(ren_utokens ren (lsubst nt1 (ren_utokens_sub ren' sub)))
(ren_utokens ren (lsubst nt2 (ren_utokens_sub ren' sub))))
as ind1.
autodimp ind1 hyp.
{ exists
(lsubst nt1 (ren_utokens_sub ren' sub))
(lsubst nt2 (ren_utokens_sub ren' sub))
ren; dands; auto.
- admit.
- admit.
}
*)
apply IND; tcsp.
{ rw @range_utok_ren_inv_utok_ren; rw h0; auto. }
{ rw @dom_utok_ren_inv_utok_ren; auto. }
{ clear IND; admit. }
{ clear IND; admit. }
apply IND; tcsp.
{ clear IND; admit. }
{ clear IND; admit. }
- clear IND; admit.
- clear IND; admit.
Qed.
(*
apply hr.
exists (lsubst nt1 (ren_utokens_sub ren' sub))
(lsubst nt2 (ren_utokens_sub ren' sub))
ren;
dands; eauto 3 with slow.
* rw @range_utok_ren_app.
rw @range_utok_ren_inv_utok_ren.
apply no_repeats_app; dands; eauto 3 with slow.
{ rw h7; auto. }
{ eauto 3 with slow. }
pose proof (hr (ren_utokens (ren ++ inv_utok_ren ren')
(lsubst nt1 (ren_utokens_sub ren' sub)))
(ren_utokens (ren ++ inv_utok_ren ren')
(lsubst nt2 (ren_utokens_sub ren' sub)))
*)
Qed.
*)
(*
*** Local Variables:
*** coq-load-path: ("." "../util/" "../terms/" "../computation/")
*** End:
*)
|
{"author": "vrahli", "repo": "NuprlInCoq", "sha": "0c3d7723836d3f615ea47f56e58b2ea6173e7d98", "save_path": "github-repos/coq/vrahli-NuprlInCoq", "path": "github-repos/coq/vrahli-NuprlInCoq/NuprlInCoq-0c3d7723836d3f615ea47f56e58b2ea6173e7d98/cequiv/approx_props1.v"}
|
// smooth_feedback: Control theory on Lie groups
// https://github.com/pettni/smooth_feedback
//
// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
//
// Copyright (c) 2021 Petter Nilsson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EVecPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#include <gtest/gtest.h>
#include <Eigen/Core>
#include <smooth/compat/autodiff.hpp>
#include "smooth/feedback/ocp_flatten.hpp"
constexpr auto DT = smooth::diff::Type::Autodiff;
// constexpr auto DT = smooth::diff::Type::Numerical;
#include "ocp.hpp"
TEST(OcpFlatten, Basic)
{
std::srand(10);
const auto t1 = smooth::feedback::test_ocp_derivatives<DT>(ocp_test, 5);
ASSERT_TRUE(t1);
const auto xl = []<typename T>(const T & t) -> smooth::CastT<T, OcpTest::X> {
const Eigen::Vector<T, Nx> vel{1, 2, 3};
return smooth::exp<smooth::CastT<T, OcpTest::X>>(t * vel);
};
const auto ul = []<typename T>(const T & t) -> smooth::CastT<T, OcpTest::U> {
const Eigen::Vector<T, Nu> vel{1, 2};
return smooth::exp<smooth::CastT<T, OcpTest::U>>(t * vel);
};
// test twice to catch allocation/compression issues (first call allocates)
auto ocp_flat = smooth::feedback::flatten_ocp(ocp_test, xl, ul);
const auto t2a = smooth::feedback::test_ocp_derivatives<DT>(ocp_flat, 5);
ASSERT_TRUE(t2a);
const auto t2b = smooth::feedback::test_ocp_derivatives<DT>(ocp_flat, 5);
ASSERT_TRUE(t2b);
}
|
{"hexsha": "24427fd83c01f1ed5332bda669e3f719205a0e2b", "size": 2408, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_ocp_flatten.cpp", "max_stars_repo_name": "tgurriet/smooth_feedback", "max_stars_repo_head_hexsha": "1f926cb4269741ddc09ba048af5bea5e0390a053", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_ocp_flatten.cpp", "max_issues_repo_name": "tgurriet/smooth_feedback", "max_issues_repo_head_hexsha": "1f926cb4269741ddc09ba048af5bea5e0390a053", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_ocp_flatten.cpp", "max_forks_repo_name": "tgurriet/smooth_feedback", "max_forks_repo_head_hexsha": "1f926cb4269741ddc09ba048af5bea5e0390a053", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8387096774, "max_line_length": 81, "alphanum_fraction": 0.7230066445, "num_tokens": 637}
|
double precision susycoup
common/susycoup/susycoup
|
{"hexsha": "65b221ec1d7baa5c4a31c48b8135558c44547a7f", "size": 63, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/src/Inc/susycoup.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/src/Inc/susycoup.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/src/Inc/susycoup.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 21.0, "max_line_length": 31, "alphanum_fraction": 0.7142857143, "num_tokens": 18}
|
# libraries
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib as mpl
from src import graphs
def run1():
N = 8
K = 2
# Build your graph
G = nx.DiGraph()
for i in range(1, N + 1):
G.add_node(i)
A = graphs.generate_n_cycle_d_regular_graph_by_degree(N, K)
for i in range(N):
for j in range(N):
if A[i][j] == 1:
G.add_edge(i + 1, j + 1)
# Plot it
pos = nx.circular_layout(G)
plt.figure(figsize=(4.2, 4))
nx.draw(G, pos,
with_labels=True,
node_size=800,
node_color='white',
edgecolors='black',
arrows=True,
arrowsize=20,
dge_color='black',
width=1,
font_family='Times New Roman',
font_size=14
)
plt.show()
def run2():
N = 8
K = 1
# Build your graph
G = nx.DiGraph()
for i in range(1, N + 1):
G.add_node(i)
A = graphs.generate_n_cycle_d_regular_graph_by_degree(N,K)
for i in range(N):
for j in range(N):
if A[i][j] == 1:
G.add_edge(i + 1, j + 1)
# Plot it
pos = nx.circular_layout(G)
plt.figure(figsize=(4.2, 4))
nx.draw(G, pos,
with_labels=True,
node_size=800,
node_color='white',
edgecolors='black',
arrows=True,
arrowsize=20,
dge_color='black',
width=1,
font_family='Times New Roman',
font_size=14
)
plt.show()
def run3():
N = 4
# Build your graph
G = nx.DiGraph()
for i in range(1, N + 1):
G.add_node(i)
A = graphs.generate_graph_by_edges(N, ['i->i+1', 'i->i+2'])
for i in range(N):
for j in range(N):
if A[i][j] == 1:
G.add_edge(i + 1, j + 1)
# Plot it
pos = nx.circular_layout(G)
plt.figure(figsize=(4.2, 4))
nx.draw(G, pos,
with_labels=True,
node_size=800,
node_color='white',
edgecolors='black',
arrows=True,
arrowsize=20,
dge_color='black',
width=1,
font_family='Times New Roman',
font_size=14
)
plt.show()
def run4():
N = 8
K = 2
# Build your graph
G = nx.Graph()
for i in range(1, N + 1):
G.add_node(i)
A = graphs.generate_undirected_n_cycle_d_regular_graph_by_degree(N, K)
for i in range(N):
for j in range(N):
if A[i][j] == 1:
G.add_edge(i + 1, j + 1)
# Plot it
pos = nx.circular_layout(G)
plt.figure(figsize=(4.2, 4))
nx.draw(G, pos,
with_labels=True,
node_size=800,
node_color='white',
edgecolors='black',
arrows=True,
arrowsize=20,
dge_color='black',
width=1,
font_family='Times New Roman',
font_size=14
)
plt.show()
if __name__ == '__main__':
run4()
|
{"hexsha": "98cde4740295972213f44fb5a0ad6a9167badf98", "size": 2923, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_plot_graphs.py", "max_stars_repo_name": "gianmarcocalbi/asyncdsm-sym", "max_stars_repo_head_hexsha": "21a82c5fafada360faef340db805a772cec61530", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_plot_graphs.py", "max_issues_repo_name": "gianmarcocalbi/asyncdsm-sym", "max_issues_repo_head_hexsha": "21a82c5fafada360faef340db805a772cec61530", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_plot_graphs.py", "max_forks_repo_name": "gianmarcocalbi/asyncdsm-sym", "max_forks_repo_head_hexsha": "21a82c5fafada360faef340db805a772cec61530", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.7371794872, "max_line_length": 74, "alphanum_fraction": 0.5148819706, "include": true, "reason": "import networkx", "num_tokens": 840}
|
#!/usr/bin/env python
import mango
import mango.io
import mango.image
import mango.application.mdss as mdss
import mango.mpi as mpi
import scipy as sp
import logging
import re
import os
import sys
haveArgParse = False
try:
import argparse
haveArgParse = True
except:
import optparse
logger, rootLogger = mpi.getLoggers(__name__)
def ddsFileMetaDataIsConsistent(ddsPath, mpiComm=mpi.world):
rootRank = 0
if (mpiComm != None):
rank = mpiComm.Get_rank()
size = mpiComm.Get_size()
else:
rank = rootRank
size = 1
fileList = None
totalNumZ = None
if (rank == rootRank):
if (os.path.isdir(ddsPath)):
fileList = [os.path.join(ddsPath, fileName) for fileName in os.listdir(ddsPath)]
else:
fileList = [ddsPath,]
fileList.sort()
ncdfFile = sp.io.netcdf_file(fileList[0], 'r')
totalNumZ = ncdfFile.zdim_total
if (mpiComm != None):
fileList,totalNumZ = mpiComm.bcast((fileList, totalNumZ), rootRank)
numZ = 0
for fIdx in range(rank, len(fileList), size):
ncdfFile = sp.io.netcdf_file(fileList[fIdx], 'r')
ncdfZRange = ncdfFile.zdim_range
numZ += ncdfZRange[1] - ncdfZRange[0] + 1
if (mpiComm != None):
numZ = mpiComm.allreduce(numZ, op=mpi.SUM)
message = None
if (numZ != totalNumZ):
message = ("%s NetCDF inconsistency, expected %s zdim_total, got %s from files" % (ddsPath, totalNumZ, numZ))
return message
class DownsampleJob(object):
def __init__(
self,
mpiComm = None,
mdssSrcImagePath = None,
mdssDstDirPath = None,
mdssPrj = None,
loclSrcImagePath = None,
loclDstDirPath = None,
voxelSizeTuples = None
):
self.srcDds = None
self.mpiComm = mpiComm
if (self.mpiComm == None) and (mpi.haveMpi4py):
self.mpiComm = mpi.world
self.rootRank = 0
self.mdssSrcImagePath = mdssSrcImagePath
self.mdssDstDirPath = mdssDstDirPath
self.mdssDstImagePaths = []
self.mdssPrj = mdssPrj
self.loclSrcImagePath = loclSrcImagePath
self.loclSrcDirPath = os.path.split(self.loclSrcImagePath)[0]
self.loclDstDirPath = loclDstDirPath
self.loclDstImagePaths = []
self.voxelSizeTuples = voxelSizeTuples
def mdssStage(self):
if (self.mpiComm != None):
self.mpiComm.barrier()
if ((self.mpiComm == None) or (self.mpiComm.Get_rank() == self.rootRank)):
if (self.mdssSrcImagePath != None):
rootLogger.info("MDSS staging %s" % self.mdssSrcImagePath)
mdss.stage(self.mdssSrcImagePath, recursive=True, project=self.mdssPrj)
if (self.mpiComm != None):
self.mpiComm.barrier()
def mdssGet(self):
if (self.mpiComm != None):
self.mpiComm.barrier()
if ((self.mpiComm == None) or (self.mpiComm.Get_rank() == self.rootRank)):
if (not os.path.exists(self.loclSrcDirPath)):
rootLogger.info("Creating directory %s" % self.loclSrcDirPath)
os.makedirs(self.loclSrcDirPath, mode="u+rwX")
if (self.mdssSrcImagePath != None):
rootLogger.info("MDSS getting %s" % self.mdssSrcImagePath)
mdss.get(self.mdssSrcImagePath, self.loclSrcDirPath, recursive=True, project=self.mdssPrj)
if (self.mpiComm != None):
self.mpiComm.barrier()
def loclSrcImagePathUncompress(self):
uncompressedFiles = \
mango.io.uncompressDdsData(
self.loclSrcImagePath,
preserve=False,
mpiComm=self.mpiComm
)
if (not os.path.isdir(self.loclSrcImagePath)):
self.loclSrcImagePath = uncompressedFiles[0]
def loclSrcImagePathReadData(self):
self.srcDds = mango.io.readDds(self.loclSrcImagePath)
def createLoclDstDirPath(self):
if (self.mpiComm != None):
self.mpiComm.barrier()
if ((self.mpiComm == None) or (self.mpiComm.Get_rank() == self.rootRank)):
if (not os.path.exists(self.loclDstDirPath)):
rootLogger.info("Creating directory %s" % self.loclDstDirPath)
os.makedirs(self.loclDstDirPath, mode="u+rwX")
if (self.mpiComm != None):
self.mpiComm.barrier()
def downsampleAndWriteData(self):
self.createLoclDstDirPath()
dstImageBasename, dstImageExt = mango.io.splitext(os.path.split(self.loclSrcImagePath)[1])
dstImageBasename = os.path.join(self.loclDstDirPath, dstImageBasename + "_GDS")
dstImageExt = ".nc"
for voxSzTuple in self.voxelSizeTuples:
rootLogger.info(
"Downsampling %s image (voxel size=%s%s) to voxel size = %s"
%
(
os.path.split(self.loclSrcImagePath)[1],
tuple(self.srcDds.md.getVoxelSize()),
self.srcDds.md.getVoxelSizeUnit(),
voxSzTuple[2]
)
)
dspDds = \
mango.image.gaussian_downsample(
self.srcDds,
voxsz=[voxSzTuple[0],]*len(self.srcDds.shape),
voxunit=voxSzTuple[1]
)
loclDstImagePath = dstImageBasename + "x" + voxSzTuple[2] + dstImageExt
rootLogger.info("Writing downsampled image to %s" % loclDstImagePath)
mango.io.writeDds(loclDstImagePath, dspDds)
self.loclDstImagePaths.append(loclDstImagePath)
self.srcDds = None
def loclDstImagePathsCompress(self):
compLoclDstImagePaths = []
for loclDstImagePath in self.loclDstImagePaths:
compressFileList = mango.io.compressDdsData(loclDstImagePath, preserve=False, mpiComm=self.mpiComm)
if (os.path.isdir(loclDstImagePath)):
compLoclDstImagePaths.append(loclDstImagePath)
else:
compLoclDstImagePaths += compressFileList
self.loclDstImagePaths = compLoclDstImagePaths
def mdssPut(self):
if (self.mpiComm != None):
self.mpiComm.barrier()
if (self.mdssDstDirPath != None):
self.loclDstImagePathsCompress()
if ((self.mpiComm == None) or (self.mpiComm.Get_rank() == self.rootRank)):
if (self.mdssDstDirPath != None):
if (not mdss.exists(self.mdssDstDirPath, project=self.mdssPrj)):
rootLogger.info("MDSS creating directory %s" % self.mdssDstDirPath)
mdss.makedirs(self.mdssDstDirPath, mode="u+rwX", project=self.mdssPrj)
elif (not mdss.isdir(self.mdssDstDirPath, project=self.mdssPrj)):
raise Exception("MDSS destination %s exists but is not a directory." % self.mdssDstDirPath)
rootLogger.info("MDSS putting %s" % self.loclDstImagePaths)
mdss.put(self.loclDstImagePaths, self.mdssDstDirPath, recursive=True, project=self.mdssPrj)
mdssDstImagePaths = \
[
os.path.join(self.mdssDstDirPath, os.path.split(loclDstImagePath)[1])
for loclDstImagePath in self.loclDstImagePaths
]
self.mdssDstImagePaths = [p for p in mdssDstImagePaths if mdss.exists(p, project=self.mdssPrj)]
if (self.mpiComm != None):
self.mpiComm.barrier()
def __call__(self):
self.mdssGet()
self.loclSrcImagePathUncompress()
rootLogger.info("Checking consistency of image data %s..." % self.loclSrcImagePath)
msg = ddsFileMetaDataIsConsistent(self.loclSrcImagePath, self.mpiComm)
if (msg == None):
self.loclSrcImagePathReadData()
self.downsampleAndWriteData()
self.mdssPut()
else:
rootLogger.error(msg)
self.mdssDstImagePaths, self.loclDstImagePaths = [],[]
return self.mdssDstImagePaths, self.loclDstImagePaths
def __str__(self):
return \
(
(
"mdssSrcImagePath = %s\n mdssDstDirPath = %s"
+
"\n mdssPrj = %s"
+
"\n loclSrcImagePath = %s\n loclDstDirPath = %s"
+
"\n voxelSizeTuples = %s"
)
%
(
self.mdssSrcImagePath,
self.mdssDstDirPath,
self.mdssPrj,
self.loclSrcImagePath,
self.loclDstDirPath,
self.voxelSizeTuples
)
)
def parseVoxelSizes(voxelSizes):
regEx = re.compile("\\s*([+-]?[0-9]+\\.?[0-9]*(?:[eE][+-]?[0-9]+)?)\\s*([a-z]*)\\s*")
voxelSizeTuples = []
for s in voxelSizes:
s = s.lower()
m = regEx.match(s)
if (m != None):
voxelSizeTuples.append((float(m.group(1)), m.group(2), s.strip()))
else:
raise Exception("Could not parse float and unit-string from '%s' in %s list." % (s, voxelSizes))
return voxelSizeTuples
class BatchDownsampler:
"""
"""
def __init__(
self,
mdssSrcRootDir,
mdssDstRootDir,
mdssPrj,
loclSrcRootDir,
loclDstRootDir,
voxelSizeTuples
):
self.mdssSrcRootDir = mdssSrcRootDir
self.mdssDstRootDir = mdssDstRootDir
self.loclSrcRootDir = loclSrcRootDir
self.loclDstRootDir = loclDstRootDir
self.mdssPrj = mdssPrj
self.voxelSizeTuples = voxelSizeTuples
rootLogger.debug("Parsed voxels sizes: %s" % (self.voxelSizeTuples,))
def getRelativeImagePaths(self, sampleDirNames):
regEx = re.compile("SrSkull2009_([0-9]*)_[0-9]")
return [os.path.join(dirName, "tomo%s_nc" % dirName) for dirName in sampleDirNames if (regEx.match(dirName))]
def createJobList(self):
if (mpi.haveMpi4py):
mpi.world.barrier()
if (self.mdssSrcRootDir != None):
sampleDirNames = mdss.listdir(self.mdssSrcRootDir, project=self.mdssPrj)
else:
sampleDirNames = os.listdir(self.loclSrcRootDir)
sampleDirNames.sort()
relImagePaths = self.getRelativeImagePaths(sampleDirNames)
sampleDirNames = [os.path.split(relImagePath)[0] for relImagePath in relImagePaths]
rootLogger.debug("relImagePaths = %s" % (relImagePaths, ))
if (self.mdssSrcRootDir != None):
mdssSrcImagePaths = [os.path.join(self.mdssSrcRootDir, relImagePath) for relImagePath in relImagePaths]
else:
mdssSrcImagePaths = [None,]*len(relImagePaths)
if (self.mdssDstRootDir == None):
mdssDstDirPaths = [None,]*len(relImagePaths)
else:
mdssDstDirPaths = [os.path.join(self.mdssDstRootDir, dirName) for dirName in sampleDirNames]
loclSrcImagePaths = [os.path.join(self.loclSrcRootDir, relImagePath) for relImagePath in relImagePaths]
if (self.loclDstRootDir == None):
loclDstDirPaths = [os.path.split(imagePath)[0] for imagePath in loclSrcImagePaths]
else:
loclDstDirPaths = [os.path.join(self.loclDstRootDir, dirName) for dirName in sampleDirNames]
jobList = []
for i in range(0, len(sampleDirNames)):
jobList.append(
DownsampleJob(
mdssSrcImagePath = mdssSrcImagePaths[i],
mdssDstDirPath = mdssDstDirPaths[i],
mdssPrj = self.mdssPrj,
loclSrcImagePath = loclSrcImagePaths[i],
loclDstDirPath = loclDstDirPaths[i],
voxelSizeTuples = self.voxelSizeTuples
)
)
if (mpi.haveMpi4py):
mpi.world.barrier()
return jobList
def executeJobs(self, jobList):
rootLogger.debug("\n\n".join(map(str, jobList)))
for job in jobList:
job.mdssStage()
mdssCreatedImagePathList = []
loclCreatedImagePathList = []
for job in jobList:
createdPathPair = job()
mdssCreatedImagePathList += createdPathPair[0]
loclCreatedImagePathList += createdPathPair[1]
return mdssCreatedImagePathList, loclCreatedImagePathList
def __call__(self):
mdss.setDefaultProject(self.mdssPrj)
jobList = self.createJobList()
return self.executeJobs(jobList)
def getArgumentParser():
"""
Returns object for parsing command line options.
:rtype: argparse.ArgumentParser
:return: Object to parse command line options.
"""
descStr = \
(
"Creates downsampled images from netCDF files on MDSS."
)
argList = []
argList.append(
{
'cmdLine':['-P', '--mdss-project',],
'dest':'mdssPrj',
'type':str,
'metavar':'P',
'default':None,
'action':'store',
'help': "MDSS project ID string."
}
)
argList.append(
{
'cmdLine':['--mdss-dst',],
'dest':'mdssDst',
'type':str,
'metavar':'D',
'default':None,
'action':'store',
'help': "MDSS top level destination directory."
}
)
argList.append(
{
'cmdLine':['--mdss-src',],
'dest':'mdssSrc',
'type':str,
'metavar':'D',
'default':None,
'action':'store',
'help': "MDSS top level source directory."
}
)
argList.append(
{
'cmdLine':['--local-dst',],
'dest':'loclDst',
'type':str,
'metavar':'D',
'default':None,
'action':'store',
'help': "Local filesystem top level destination directory."
}
)
argList.append(
{
'cmdLine':['--local-src',],
'dest':'loclSrc',
'type':str,
'metavar':'D',
'default':None,
'action':'store',
'help': "Local filesystem top level source directory."
}
)
argList.append(
{
'cmdLine':['--voxel-sizes',],
'dest':'voxelSizes',
'type':str,
'metavar':'F',
'default':"150um,200um",
'action':'store',
'help': "Voxel size for downsampled images."
}
)
argList.append(
{
'cmdLine':['-l','--logging-level'],
'dest':'loggingLevel',
'type':str,
'metavar':'LVL',
'default':"INFO",
'action':'store',
'help':"Level of logging output (one of 'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG')."
}
)
if (haveArgParse):
parser = argparse.ArgumentParser(description=descStr)
for arg in argList:
addArgumentDict = dict(arg)
del addArgumentDict['cmdLine']
parser.add_argument(*arg['cmdLine'], **addArgumentDict)
else:
parser = optparse.OptionParser(description=descStr)
for arg in argList:
addOptionDict = dict(arg)
del addOptionDict['cmdLine']
parser.add_option(*arg['cmdLine'], **addOptionDict)
return parser
if (__name__ == "__main__"):
argParser = getArgumentParser()
if haveArgParse:
args = argParser.parse_args()
else:
(args, argv) = argParser.parse_args()
mpi.initialiseLoggers(
[__name__, "mango.io", "mango.application"],
logLevel=getattr(logging, args.loggingLevel)
)
bdsp = \
BatchDownsampler(
mdssSrcRootDir=args.mdssSrc,
mdssDstRootDir=args.mdssDst,
mdssPrj=args.mdssPrj,
loclSrcRootDir=args.loclSrc,
loclDstRootDir=args.loclDst,
voxelSizeTuples=(parseVoxelSizes(args.voxelSizes.split(",")))
)
bdsp()
|
{"hexsha": "3ce1b7f67ca7a4e4a7989554cd37f1cae8617240", "size": 16574, "ext": "py", "lang": "Python", "max_stars_repo_path": "misc/python/mango/application/srskull/batchdownsample.py", "max_stars_repo_name": "pymango/pymango", "max_stars_repo_head_hexsha": "b55f831f0194b214e746b2dfb4d9c6671a1abc38", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-11T03:23:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-16T09:01:48.000Z", "max_issues_repo_path": "misc/python/mango/application/srskull/batchdownsample.py", "max_issues_repo_name": "pymango/pymango", "max_issues_repo_head_hexsha": "b55f831f0194b214e746b2dfb4d9c6671a1abc38", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "misc/python/mango/application/srskull/batchdownsample.py", "max_forks_repo_name": "pymango/pymango", "max_forks_repo_head_hexsha": "b55f831f0194b214e746b2dfb4d9c6671a1abc38", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-03-04T11:03:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-01T10:01:36.000Z", "avg_line_length": 34.2438016529, "max_line_length": 117, "alphanum_fraction": 0.5560516472, "include": true, "reason": "import scipy", "num_tokens": 4091}
|
# Copyright (c) MMEditing Authors.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.vision.ops import DeformConv2D
from ...utils.download import get_path_from_url
from ...modules.init import kaiming_normal_, constant_
from .builder import GENERATORS
@paddle.no_grad()
def default_init_weights(layer_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
layer_list (list[nn.Layer] | nn.Layer): Layers to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(layer_list, list):
layer_list = [layer_list]
for m in layer_list:
if isinstance(m, nn.Conv2D):
kaiming_normal_(m.weight, **kwargs)
scale_weight = scale * m.weight
m.weight.set_value(scale_weight)
if m.bias is not None:
constant_(m.bias, bias_fill)
elif isinstance(m, nn.Linear):
kaiming_normal_(m.weight, **kwargs)
scale_weight = scale * m.weight
m.weight.set_value(scale_weight)
if m.bias is not None:
constant_(m.bias, bias_fill)
elif isinstance(m, nn.BatchNorm):
constant_(m.weight, 1)
class PixelShufflePack(nn.Layer):
""" Pixel Shuffle upsample layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Upsample ratio.
upsample_kernel (int): Kernel size of Conv layer to expand channels.
Returns:
Upsampled feature map.
"""
def __init__(self, in_channels, out_channels, scale_factor,
upsample_kernel):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2D(
self.in_channels,
self.out_channels * scale_factor * scale_factor,
self.upsample_kernel,
padding=(self.upsample_kernel - 1) // 2)
self.pixel_shuffle = nn.PixelShuffle(self.scale_factor)
self.init_weights()
def init_weights(self):
"""Initialize weights for PixelShufflePack.
"""
default_init_weights(self, 1)
def forward(self, x):
"""Forward function for PixelShufflePack.
Args:
x (Tensor): Input tensor with shape (in_channels, c, h, w).
Returns:
Tensor with shape (out_channels, c, scale_factor*h, scale_factor*w).
"""
x = self.upsample_conv(x)
x = self.pixel_shuffle(x)
return x
def MakeMultiBlocks(func, num_layers, nf=64):
"""Make layers by stacking the same blocks.
Args:
func (nn.Layer): nn.Layer class for basic block.
num_layers (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
Blocks = nn.Sequential()
for i in range(num_layers):
Blocks.add_sublayer('block%d' % i, func(nf))
return Blocks
class ResidualBlockNoBN(nn.Layer):
"""Residual block without BN.
It has a style of:
---Conv-ReLU-Conv-+-
|________________|
Args:
nf (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Residual scale. Default: 1.0.
"""
def __init__(self, nf=64, res_scale=1.0):
super(ResidualBlockNoBN, self).__init__()
self.nf = nf
self.res_scale = res_scale
self.conv1 = nn.Conv2D(self.nf, self.nf, 3, 1, 1)
self.conv2 = nn.Conv2D(self.nf, self.nf, 3, 1, 1)
self.relu = nn.ReLU()
if self.res_scale == 1.0:
default_init_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor with shape (n, c, h, w).
"""
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out * self.res_scale
def flow_warp(x,
flow,
interpolation='bilinear',
padding_mode='zeros',
align_corners=True):
"""Warp an image or a feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2). The last dimension is
a two-channel, denoting the width and height relative offsets.
Note that the values are not normalized to [-1, 1].
interpolation (str): Interpolation mode: 'nearest' or 'bilinear'.
Default: 'bilinear'.
padding_mode (str): Padding mode: 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Whether align corners. Default: True.
Returns:
Tensor: Warped image or feature map.
"""
x_h, x_w = x.shape[-2:]
flow_h, flow_w = flow.shape[1:3]
if x_h != flow_h or x_w != flow_w:
raise ValueError(f'The spatial sizes of input ({x.shape[-2:]}) and '
f'flow ({flow.shape[1:3]}) are not the same.')
_, _, h, w = x.shape
# create mesh grid
grid_y, grid_x = paddle.meshgrid(paddle.arange(0, h), paddle.arange(0, w))
grid = paddle.stack((grid_x, grid_y), axis=2) # (w, h, 2)
grid = paddle.cast(grid, 'float32')
grid.stop_gradient = True
grid_flow = grid + flow
# scale grid_flow to [-1,1]
grid_flow_x = 2.0 * grid_flow[:, :, :, 0] / max(w - 1, 1) - 1.0
grid_flow_y = 2.0 * grid_flow[:, :, :, 1] / max(h - 1, 1) - 1.0
grid_flow = paddle.stack((grid_flow_x, grid_flow_y), axis=3)
output = F.grid_sample(
x,
grid_flow,
mode=interpolation,
padding_mode=padding_mode,
align_corners=align_corners)
return output
class SPyNetBasicModule(nn.Layer):
"""Basic Module for SPyNet.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2D(
in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3)
self.conv2 = nn.Conv2D(
in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3)
self.conv3 = nn.Conv2D(
in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3)
self.conv4 = nn.Conv2D(
in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3)
self.conv5 = nn.Conv2D(
in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3)
self.relu = nn.ReLU()
def forward(self, tensor_input):
"""
Args:
tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
8 channels contain:
[reference image (3), neighbor image (3), initial flow (2)].
Returns:
Tensor: Refined flow with shape (b, 2, h, w)
"""
out = self.relu(self.conv1(tensor_input))
out = self.relu(self.conv2(out))
out = self.relu(self.conv3(out))
out = self.relu(self.conv4(out))
out = self.conv5(out)
return out
class SPyNet(nn.Layer):
"""SPyNet network structure.
The difference to the SPyNet in paper is that
1. more SPyNetBasicModule is used in this version, and
2. no batch normalization is used in this version.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
"""
def __init__(self):
super().__init__()
self.basic_module0 = SPyNetBasicModule()
self.basic_module1 = SPyNetBasicModule()
self.basic_module2 = SPyNetBasicModule()
self.basic_module3 = SPyNetBasicModule()
self.basic_module4 = SPyNetBasicModule()
self.basic_module5 = SPyNetBasicModule()
self.register_buffer(
'mean',
paddle.to_tensor([0.485, 0.456, 0.406]).reshape([1, 3, 1, 1]))
self.register_buffer(
'std',
paddle.to_tensor([0.229, 0.224, 0.225]).reshape([1, 3, 1, 1]))
def compute_flow(self, ref, supp):
"""Compute flow from ref to supp.
Note that in this function, the images are already resized to a
multiple of 32.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
n, _, h, w = ref.shape
# normalize the input images
ref = [(ref - self.mean) / self.std]
supp = [(supp - self.mean) / self.std]
# generate downsampled frames
for level in range(5):
ref.append(F.avg_pool2d(ref[-1], kernel_size=2, stride=2))
supp.append(F.avg_pool2d(supp[-1], kernel_size=2, stride=2))
ref = ref[::-1]
supp = supp[::-1]
# flow computation
flow = paddle.zeros([n, 2, h // 32, w // 32])
# level=0
flow_up = flow
flow = flow_up + self.basic_module0(
paddle.concat(
[
ref[0], flow_warp(
supp[0],
flow_up.transpose([0, 2, 3, 1]),
padding_mode='border'), flow_up
],
1))
# level=1
flow_up = F.interpolate(
flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
flow = flow_up + self.basic_module1(
paddle.concat(
[
ref[1], flow_warp(
supp[1],
flow_up.transpose([0, 2, 3, 1]),
padding_mode='border'), flow_up
],
1))
# level=2
flow_up = F.interpolate(
flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
flow = flow_up + self.basic_module2(
paddle.concat(
[
ref[2], flow_warp(
supp[2],
flow_up.transpose([0, 2, 3, 1]),
padding_mode='border'), flow_up
],
1))
# level=3
flow_up = F.interpolate(
flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
flow = flow_up + self.basic_module3(
paddle.concat(
[
ref[3], flow_warp(
supp[3],
flow_up.transpose([0, 2, 3, 1]),
padding_mode='border'), flow_up
],
1))
# level=4
flow_up = F.interpolate(
flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
flow = flow_up + self.basic_module4(
paddle.concat(
[
ref[4], flow_warp(
supp[4],
flow_up.transpose([0, 2, 3, 1]),
padding_mode='border'), flow_up
],
1))
# level=5
flow_up = F.interpolate(
flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
flow = flow_up + self.basic_module5(
paddle.concat(
[
ref[5], flow_warp(
supp[5],
flow_up.transpose([0, 2, 3, 1]),
padding_mode='border'), flow_up
],
1))
return flow
def forward(self, ref, supp):
"""Forward function of SPyNet.
This function computes the optical flow from ref to supp.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
# upsize to a multiple of 32
h, w = ref.shape[2:4]
w_up = w if (w % 32) == 0 else 32 * (w // 32 + 1)
h_up = h if (h % 32) == 0 else 32 * (h // 32 + 1)
ref = F.interpolate(
ref, size=(h_up, w_up), mode='bilinear', align_corners=False)
supp = F.interpolate(
supp, size=(h_up, w_up), mode='bilinear', align_corners=False)
ref.stop_gradient = False
supp.stop_gradient = False
# compute flow, and resize back to the original resolution
flow_up = self.compute_flow(ref, supp)
flow = F.interpolate(
flow_up, size=(h, w), mode='bilinear', align_corners=False)
# adjust the flow values
# todo: grad bug
# flow[:, 0, :, :] *= (float(w) / float(w_up))
# flow[:, 1, :, :] *= (float(h) / float(h_up))
flow_x = flow[:, 0:1, :, :] * (float(w) / float(w_up))
flow_y = flow[:, 1:2, :, :] * (float(h) / float(h_up))
flow = paddle.concat([flow_x, flow_y], 1)
return flow
class ResidualBlocksWithInputConv(nn.Layer):
"""Residual blocks with a convolution in front.
Args:
in_channels (int): Number of input channels of the first conv.
out_channels (int): Number of channels of the residual blocks.
Default: 64.
num_blocks (int): Number of residual blocks. Default: 30.
"""
def __init__(self, in_channels, out_channels=64, num_blocks=30):
super().__init__()
# a convolution used to match the channels of the residual blocks
self.covn1 = nn.Conv2D(in_channels, out_channels, 3, 1, 1)
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
# residual blocks
self.ResidualBlocks = MakeMultiBlocks(
ResidualBlockNoBN, num_blocks, nf=out_channels)
def forward(self, feat):
"""
Forward function for ResidualBlocksWithInputConv.
Args:
feat (Tensor): Input feature with shape (n, in_channels, h, w)
Returns:
Tensor: Output feature with shape (n, out_channels, h, w)
"""
out = self.Leaky_relu(self.covn1(feat))
out = self.ResidualBlocks(out)
return out
@GENERATORS.register()
class BasicVSRNet(nn.Layer):
"""BasicVSR network structure for video super-resolution.
Support only x4 upsampling.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
mid_channels (int): Channel number of the intermediate features.
Default: 64.
num_blocks (int): Number of residual blocks in each propagation branch.
Default: 30.
"""
def __init__(self, mid_channels=64, num_blocks=30):
super().__init__()
self.mid_channels = mid_channels
# optical flow network for feature alignment
self.spynet = SPyNet()
weight_path = get_path_from_url(
'https://paddlegan.bj.bcebos.com/models/spynet.pdparams')
self.spynet.set_state_dict(paddle.load(weight_path))
# propagation branches
self.backward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
self.forward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
# upsample
self.fusion = nn.Conv2D(mid_channels * 2, mid_channels, 1, 1, 0)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2D(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2D(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1)
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
self.is_mirror_extended = False
if lrs.shape[1] % 2 == 0:
lrs_1, lrs_2 = paddle.chunk(lrs, 2, axis=1)
lrs_2 = paddle.flip(lrs_2, [1])
if paddle.norm(lrs_1 - lrs_2) == 0:
self.is_mirror_extended = True
def compute_flow(self, lrs):
"""Compute optical flow using SPyNet for feature warping.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lrs.shape
lrs_1 = lrs[:, :-1, :, :, :].reshape([-1, c, h, w])
lrs_2 = lrs[:, 1:, :, :, :].reshape([-1, c, h, w])
flows_backward = self.spynet(lrs_1, lrs_2).reshape([n, t - 1, 2, h, w])
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lrs_2,
lrs_1).reshape([n, t - 1, 2, h, w])
return flows_forward, flows_backward
def forward(self, lrs):
"""Forward function for BasicVSR.
Args:
lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lrs.shape
t = paddle.to_tensor(t)
assert h >= 64 and w >= 64, (
'The height and width of inputs should be at least 64, '
f'but got {h} and {w}.')
# check whether the input is an extended sequence
self.check_if_mirror_extended(lrs)
# compute optical flow
flows_forward, flows_backward = self.compute_flow(lrs)
# backward-time propgation
outputs = []
feat_prop = paddle.zeros([n, self.mid_channels, h, w])
for i in range(t - 1, -1, -1):
if i < t - 1: # no warping required for the last timestep
flow1 = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow1.transpose([0, 2, 3, 1]))
feat_prop = paddle.concat([lrs[:, i, :, :, :], feat_prop], axis=1)
feat_prop = self.backward_resblocks(feat_prop)
outputs.append(feat_prop)
outputs = outputs[::-1]
# forward-time propagation and upsampling
feat_prop = paddle.zeros_like(feat_prop)
for i in range(0, t):
lr_curr = lrs[:, i, :, :, :]
if i > 0: # no warping required for the first timestep
if flows_forward is not None:
flow = flows_forward[:, i - 1, :, :, :]
else:
flow = flows_backward[:, -i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.transpose([0, 2, 3, 1]))
feat_prop = paddle.concat([lr_curr, feat_prop], axis=1)
feat_prop = self.forward_resblocks(feat_prop)
# upsampling given the backward and forward features
out = paddle.concat([outputs[i], feat_prop], axis=1)
out = self.lrelu(self.fusion(out))
out = self.lrelu(self.upsample1(out))
out = self.lrelu(self.upsample2(out))
out = self.lrelu(self.conv_hr(out))
out = self.conv_last(out)
base = self.img_upsample(lr_curr)
out += base
outputs[i] = out
return paddle.stack(outputs, axis=1)
class SecondOrderDeformableAlignment(nn.Layer):
"""Second-order deformable alignment module.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
deformable_groups (int).
"""
def __init__(self,
in_channels=128,
out_channels=64,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
groups=1,
deformable_groups=16):
super(SecondOrderDeformableAlignment, self).__init__()
self.conv_offset = nn.Sequential(
nn.Conv2D(3 * out_channels + 4, out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2D(out_channels, out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2D(out_channels, out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1),
nn.Conv2D(out_channels, 27 * deformable_groups, 3, 1, 1), )
self.dcn = DeformConv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
deformable_groups=deformable_groups)
self.init_offset()
def init_offset(self):
constant_(self.conv_offset[-1].weight, 0)
constant_(self.conv_offset[-1].bias, 0)
def forward(self, x, extra_feat, flow_1, flow_2):
extra_feat = paddle.concat([extra_feat, flow_1, flow_2], axis=1)
out = self.conv_offset(extra_feat)
o1, o2, mask = paddle.chunk(out, 3, axis=1)
# offset
offset = 10 * paddle.tanh(paddle.concat((o1, o2), axis=1))
offset_1, offset_2 = paddle.chunk(offset, 2, axis=1)
offset_1 = offset_1 + flow_1.flip(1).tile(
[1, offset_1.shape[1] // 2, 1, 1])
offset_2 = offset_2 + flow_2.flip(1).tile(
[1, offset_2.shape[1] // 2, 1, 1])
offset = paddle.concat([offset_1, offset_2], axis=1)
# mask
mask = F.sigmoid(mask)
out = self.dcn(x, offset, mask)
return out
|
{"hexsha": "73567032d968d42844d9f0010be52248aa8a901b", "size": 22879, "ext": "py", "lang": "Python", "max_stars_repo_path": "paddlers/models/ppgan/models/generators/basicvsr.py", "max_stars_repo_name": "huilin16/PaddleRS", "max_stars_repo_head_hexsha": "ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2022-02-28T02:07:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:54:29.000Z", "max_issues_repo_path": "paddlers/models/ppgan/models/generators/basicvsr.py", "max_issues_repo_name": "huilin16/PaddleRS", "max_issues_repo_head_hexsha": "ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2022-03-15T12:13:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T15:54:08.000Z", "max_forks_repo_path": "paddlers/models/ppgan/models/generators/basicvsr.py", "max_forks_repo_name": "huilin16/PaddleRS", "max_forks_repo_head_hexsha": "ca0d6223d8e56cd3bd3cbd3a033c89f1718ce26a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2022-02-28T02:07:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T11:40:40.000Z", "avg_line_length": 34.0968703428, "max_line_length": 80, "alphanum_fraction": 0.5605577167, "include": true, "reason": "import numpy", "num_tokens": 5819}
|
"""
Dynamical Systems needed to create the data for the experiments.
Gabriele Abbati, Machine Learning Research Group, University of Oxford
February 2019
"""
# Libraries
from abc import ABC, abstractmethod
import numpy as np
from typing import Union, Tuple
from scipy.integrate import ode
from scipy.optimize import fsolve
class DynamicalSystem(ABC):
"""
Abstract class for a dynamical system. Includes an ODE solver based on
scipy.
"""
def __init__(self, dimensionality: int,
true_param: Union[list, np.array],
noise_variance: float = 0.0,
stn_ratio: float = None):
"""
General Constructor.
:param dimensionality: dimension of the state of the system;
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
self.dim = dimensionality
self.theta = np.array(true_param)
self.mean = 0.0
self.variance = noise_variance
self.system_ode = ode(self._system_ode).set_integrator('vode',
method='bdf')
self.stn_ratio = stn_ratio
return
@staticmethod
@abstractmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
return []
def simulate(self, initial_state: Union[list, np.array],
initial_time: float, final_time: float,
t_delta_integration: float) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system = np.copy(initial_state).reshape(self.dim, 1)
t = [initial_time]
self.system_ode.set_initial_value(initial_state,
initial_time).set_f_params(self.theta)
while self.system_ode.successful() and self.system_ode.t < final_time:
self.system_ode.integrate(self.system_ode.t + t_delta_integration)
system = np.c_[system, self.system_ode.y.reshape(self.dim, 1)]
t.append(self.system_ode.t)
return system, np.array(t)
def observe(self, initial_state: Union[list, np.array],
initial_time: float, final_time: float,
t_delta_integration: float,
t_delta_observation: float) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
[system, t] = self.simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
t_obs = np.arange(initial_time, final_time + t_delta_observation,
t_delta_observation)
observed_system = np.zeros([self.dim, t_obs.shape[0]])
for n in range(self.dim):
observed_system[n, :] = np.interp(t_obs, t, system[n, :])
if self.variance != 0.0:
noise = np.random.normal(loc=0.0, scale=np.sqrt(self.variance),
size=observed_system.shape)
observed_system += noise.reshape(observed_system.shape)
if self.stn_ratio:
std_devs_signal = np.std(observed_system, axis=1)
std_devs_noise = std_devs_signal / np.sqrt(self.stn_ratio)
noise = np.random.normal(loc=0.0, scale=1.0,
size=observed_system.shape)
for n in range(self.dim):
noise[n, :] = noise[n, :] * std_devs_noise[n]
observed_system += noise.reshape(observed_system.shape)
return observed_system, t_obs.reshape(-1, 1)
def observe_at_t(self, initial_state: Union[list, np.array],
initial_time: float, final_time: float,
t_delta_integration: float,
t_observations: np.array):
""""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations, computed at the time stamps specified in
t_observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_observations: time stamps at which observe the system.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
[system, t] = self.simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
t_obs = t_observations
observed_system = np.zeros([self.dim, t_obs.shape[0]])
for n in range(self.dim):
observed_system[n, :] = np.interp(t_obs, t, system[n, :])
if self.variance != 0.0:
noise = np.random.normal(loc=0.0, scale=np.sqrt(self.variance),
size=observed_system.shape)
observed_system += noise.reshape(observed_system.shape)
if self.stn_ratio:
std_devs_signal = np.std(observed_system, axis=1)
std_devs_noise = std_devs_signal / np.sqrt(self.stn_ratio)
noise = np.random.normal(loc=0.0, scale=1.0,
size=observed_system.shape)
for n in range(self.dim):
noise[n, :] = noise[n, :] * std_devs_noise[n]
observed_system += noise.reshape(observed_system.shape)
return observed_system, t_obs.reshape(-1, 1)
class LotkaVolterra(DynamicalSystem):
"""
2D Lotka-Volterra ODE.
"""
def __init__(self,
true_param: Union[list, np.array] = (2.0, 1.0, 4.0, 1.0),
noise_variance: float = 0.1 ** 2,
stn_ratio: float = None):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
super(LotkaVolterra, self).__init__(2,
true_param,
noise_variance,
stn_ratio)
assert self.theta.shape[0] == 4,\
"Error: length of true_param should be 4"
return
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
f = [theta[0] * y[0] - theta[1] * y[0] * y[1],
- theta[2] * y[1] + theta[3] * y[0] * y[1]]
return f
def simulate(self,
initial_state: Union[list, np.array] = (5.0, 3.0),
initial_time: float = 0.0,
final_time: float = 2.0,
t_delta_integration: float = 0.01)\
-> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system, t = super(LotkaVolterra, self).simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self, initial_state: Union[list, np.array] = (5.0, 3.0),
initial_time: float = 0.0,
final_time: float = 2.0,
t_delta_integration: float = 0.01,
t_delta_observation: float = 0.1) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(LotkaVolterra,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
class FitzHughNagumo(DynamicalSystem):
"""
2D FitzHugh-Nagumo ODE.
"""
def __init__(self,
true_param: Union[list, np.array] = (0.2, 0.2, 3.0),
noise_variance: float = 0.0,
stn_ratio: float = None):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
super(FitzHughNagumo, self).__init__(2,
true_param,
noise_variance,
stn_ratio)
assert self.theta.shape[0] == 3,\
"Error: length of true_param should be 3"
return
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
f = [theta[2] * (y[0] - y[0]**3 / 3.0 + y[1]),
- 1.0 / theta[2] * (y[0] - theta[0] + theta[1] * y[1])]
return f
def simulate(self,
initial_state: Union[list, np.array] = (-1.0, 1.0),
initial_time: float = 0.0,
final_time: float = 20.0,
t_delta_integration: float = 0.01)\
-> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system, t = super(FitzHughNagumo, self).simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self,
initial_state: Union[list, np.array] = (-1.0, 1.0),
initial_time: float = 0.0,
final_time: float = 20.0,
t_delta_integration: float = 0.01,
t_delta_observation: float = 0.5) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(FitzHughNagumo,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
class ProteinTransduction(DynamicalSystem):
"""
Protein transduction system, Vyshemirsky and Girolami, 2008. With coherent
notation, y = [S, dS, R, R_s, R_pp]
"""
def __init__(self,
true_param: np.array = np.array([0.07, 0.6, 0.05, 0.3,
0.017, 0.3]),
noise_variance: float = 0.001**2,
stn_ratio: float = 0.0):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
super(ProteinTransduction, self).__init__(5,
true_param,
noise_variance,
stn_ratio)
assert self.theta.shape[0] == 6,\
"Error: length of true_param should be 6"
return
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
f = [- theta[0] * y[0] - theta[1] * y[0] * y[2] + theta[2] * y[3],
theta[0] * y[0],
- theta[1] * y[0] * y[2] + theta[2] * y[3] +
theta[4] * y[4] / (theta[5] + y[4]),
theta[1] * y[0] * y[2] - theta[2] * y[3] - theta[3] * y[3],
theta[3] * y[3] - theta[4] * y[4] / (theta[5] + y[4])]
return f
def simulate(self, initial_state: Union[list, np.array] = (1.0, 0.0, 1.0,
0.0, 0.0),
initial_time: float = 0.0,
final_time: float = 100.0,
t_delta_integration: float = 0.1) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system, t = super(ProteinTransduction,
self).simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self, initial_state: Union[list, np.array] = (1.0, 0.0, 1.0,
0.0, 0.0),
initial_time: float = 0.0,
final_time: float = 100.0,
t_delta_integration: float = 0.1,
t_delta_observation: float = 10.0) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(ProteinTransduction,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
class Lorenz96(DynamicalSystem):
"""
Lorenz 96 Attractor, n_states different ODEs.
"""
def __init__(self,
n_states: int = 40,
true_param: float = 8.0,
noise_variance: float = 1.0,
stn_ratio: float = None):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
self.n_states = n_states
super(Lorenz96, self).__init__(n_states,
true_param,
noise_variance,
stn_ratio)
return
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
n_states = y.shape[0]
f = [(y[1] - y[n_states - 2]) * y[n_states - 1] - y[0] + theta,
(y[2] - y[n_states - 1]) * y[0] - y[1] + theta]
for n in range(2, n_states - 1):
state_derivative = (y[n + 1] - y[n - 2]) * y[n - 1] - y[n] + theta
f.append(state_derivative)
state_derivative = (y[0] - y[n_states - 3])\
* y[n_states - 2] - y[n_states - 1] + theta
f.append(state_derivative)
return f
def simulate(self, initial_state: Union[list, np.array] = 1.0,
initial_time: float = 0.0,
final_time: float = 4.0,
t_delta_integration: float = 0.01)\
-> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
initial_state_vector = initial_state * np.ones(self.n_states)\
+ np.random.normal(0.0, 0.01, self.n_states)
system, t = super(Lorenz96, self).simulate(initial_state_vector,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self, initial_state: Union[list, np.array] = 1.0,
initial_time: float = 0.0,
final_time: float = 20.0,
t_delta_integration: float = 0.01,
t_delta_observation: float = 0.2) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(Lorenz96,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
class Lorenz63(DynamicalSystem):
"""
2D FitzHugh-Nagumo ODE.
"""
def __init__(self,
true_param: Union[list, np.array] = (10.0, 28.0, 8.0/3.0),
noise_variance: float = 0.0,
stn_ratio: float = None):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
super(Lorenz63, self).__init__(3,
true_param,
noise_variance,
stn_ratio)
assert self.theta.shape[0] == 3,\
"Error: length of true_param should be 3"
return
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
f = [theta[0]*(y[1] - y[0]),
y[0]*(theta[1] - y[2]) - y[1],
y[0]*y[1] - theta[2]*y[2]]
return f
def simulate(self,
initial_state: Union[list, np.array] = (1.0, 1.0, 1.0),
initial_time: float = 0.0,
final_time: float = 10.0,
t_delta_integration: float = 0.01)\
-> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system, t = super(Lorenz63, self).simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self,
initial_state: Union[list, np.array] = (1.0, 1.0, 1.0),
initial_time: float = 0.0,
final_time: float = 10.0,
t_delta_integration: float = 0.01,
t_delta_observation: float = 0.5) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(Lorenz63,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
class Quadrocopter(DynamicalSystem):
"""
m = 0.1 #kg
Ixx = 0.00062 #kg-m^2
Iyy = 0.00113 #kg-m^2
Izz = 0.9*(Ixx + Iyy) #kg-m^2 (Assume nearly flat object, z=0)
dx = 0.114 #m
dy = 0.0825 #m
g = 9.81 #m/s/s
DTR = 1/57.3; RTD = 57.3
code and parameters based on https://github.com/charlestytler/QuadcopterSim
"""
def __init__(self,
true_param: Union[list, np.array] = \
(0.1, 0.62, 1.13, 0.9, 0.114, 8.25, 9.85),
noise_variance: float = 0.0,
stn_ratio: float = 100):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
super(Quadrocopter, self).__init__(12,
true_param,
noise_variance,
stn_ratio)
assert self.theta.shape[0] == 7,\
"Error: length of true_param should be 7"
return
@staticmethod
def controlForces(x):
trim = 0.24525 # just enough force to keep the quadrocopter stable
pitch_cmd = 0
roll_cmd = 0
climb_cmd = 0
yaw_cmd=0
climb_cmd = 0.01
pitch_cmd = 0.0005
roll_cmd = 0.0005
u = np.zeros(4)
u[0] = trim + ( pitch_cmd + roll_cmd + climb_cmd - yaw_cmd) / 4
u[1] = trim + (-pitch_cmd - roll_cmd + climb_cmd - yaw_cmd) / 4
u[2] = trim + ( pitch_cmd - roll_cmd + climb_cmd + yaw_cmd) / 4
u[3] = trim + (-pitch_cmd + roll_cmd + climb_cmd + yaw_cmd) / 4
return u
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed to calculate inputs;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
ub = y[0]
vb = y[1]
wb = y[2]
p = y[3]
q = y[4]
r = y[5]
phi = y[6]
theta_sys = y[7]
psi = y[8]
xE = y[9]
yE = y[10]
hE = y[11]
m = theta[0] #kg
Ixx = theta[1]*1e-3 #kg-m^2
Iyy = theta[2]*1e-3 #kg-m^2
Izz = theta[3]*(Ixx + Iyy) #kg-m^2 (Assume nearly flat object, z=0)
dx = theta[4] #m
dy = theta[5]*1e-2 #m
g = theta[6] #m/s/s
# Directly get forces as inputs
[F1, F2, F3, F4] = Quadrocopter.controlForces(y)
Fz = F1 + F2 + F3 + F4
L = (F2 + F3) * dy - (F1 + F4) * dy
M = (F1 + F3) * dx - (F2 + F4) * dx
N = 0 #-T(F1,dx,dy) - T(F2,dx,dy) + T(F3,dx,dy) + T(F4,dx,dy)
# Pre-calculate trig values
cphi = np.cos(phi); sphi = np.sin(phi)
cthe = np.cos(theta_sys); sthe = np.sin(theta_sys)
cpsi = np.cos(psi); spsi = np.sin(psi)
# Calculate the derivative of the state matrix using EOM
xdot = np.zeros(12)
xdot[0] = -g * sthe + r * vb - q * wb # = udot
xdot[1] = g * sphi*cthe - r * ub + p * wb # = vdot
xdot[2] = 1/m * (-Fz) + g*cphi*cthe + q * ub - p * vb # = wdot
xdot[3] = 1/Ixx * (L + (Iyy - Izz) * q * r) # = pdot
xdot[4] = 1/Iyy * (M + (Izz - Ixx) * p * r) # = qdot
xdot[5] = 1/Izz * (N + (Ixx - Iyy) * p * q) # = rdot
xdot[6] = p + (q*sphi + r*cphi) * sthe / cthe # = phidot
xdot[7] = q * cphi - r * sphi # = thetadot
xdot[8] = (q * sphi + r * cphi) / cthe # = psidot
xdot[9] = cthe*cpsi*ub + (-cphi*spsi + sphi*sthe*cpsi) * vb + \
(sphi*spsi+cphi*sthe*cpsi) * wb # = xEdot
xdot[10] = cthe*spsi * ub + (cphi*cpsi+sphi*sthe*spsi) * vb + \
(-sphi*cpsi+cphi*sthe*spsi) * wb # = yEdot
xdot[11] = -1*(-sthe * ub + sphi*cthe * vb + cphi*cthe * wb) # = hEdot
f = xdot
return f
def simulate(self,
initial_state: Union[list, np.array] = (0,0,0,0,0,0,0,0,0,0,0,0),
initial_time: float = 0.0,
final_time: float = 30.0,
t_delta_integration: float = 0.01)\
-> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system, t = super(Quadrocopter, self).simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self, initial_state: Union[list, np.array] = (0,0,0,0,0,0,0,0,0,0,0,0),
initial_time: float = 0.0,
final_time: float = 30.0,
t_delta_integration: float = 0.01,
t_delta_observation: float = 0.1) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(Quadrocopter,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
class Glucose(DynamicalSystem):
"""
2D Lotka-Volterra ODE.
"""
def __init__(self,
true_param: Union[list, np.array] = (0.1, 0.0, 0.4, 0.0, 0.3, 0.0, 0.7, 0.0, 0.1, 0.2),
noise_variance: float = 0.1 ** 2,
stn_ratio: float = None):
"""
Constructor.
:param true_param: true parameters of the system;
:param noise_variance: variance of the observation noise, if different
from zero it overwrites the signal to noise ratio;
:param stn_ratio: signal to noise ratio (variance should be set to zero
if the stn_ratio is different than None).
"""
super(Glucose, self).__init__(9,
true_param,
noise_variance,
stn_ratio)
assert self.theta.shape[0] == 10,\
"Error: length of true_param should be 10"
return
@staticmethod
def _system_ode(t: float, y: np.array,
theta: np.array) -> list:
"""
Describes the overall evolution of the system in the form:
dy / dt = f( t, y, args)
Needed by scipy.
:param t: time, needed in arguments even if it's not directly used;
:param y: current state;
:param theta: arguments and parameters of the system.
:return: the f function so built.
"""
p = theta
x = y
f = np.zeros(9)
f[0]= p[1]*x[5] - p[0]*x[0]*x[7]
f[1]= p[3]*x[6] - p[2]*x[1]*x[8]
f[2]= -p[7]*x[2] + p[6]*x[4]*x[8]
f[3]= -p[5]*x[3] + p[4]*x[4]*x[6]
f[4]= p[5]*x[3] + p[7]*x[2] - p[4]*x[4]*x[6] - p[6]*x[4]*x[8]
f[5]= -p[1]*x[5] - p[8]*x[5] + p[8]*x[6] + p[0]*x[0]*x[7]
f[6]= p[5]*x[3] - p[3]*x[6] + p[8]*x[5] - p[8]*x[6] + p[2]*x[1]*x[8] - p[4]*x[4]*x[6]
f[7]= p[1]*x[5] - p[9]*x[7] + p[9]*x[8] - p[0]*x[0]*x[7]
f[8]= p[3]*x[6] + p[7]*x[2] + p[9]*x[7] - p[9]*x[8] - p[2]*x[1]*x[8] - p[6]*x[4]*x[8]
return f
def simulate(self,
initial_state: Union[list, np.array] = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0),
initial_time: float = 0.0,
final_time: float = 100.0,
t_delta_integration: float = 0.01)\
-> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals.
:return: a numpy array containing the integrated dynamical system (of
size [n_states, n_points]) and a numpy array containing the time stamps.
"""
system, t = super(Glucose, self).simulate(initial_state,
initial_time,
final_time,
t_delta_integration)
return system, t
def observe(self, initial_state: Union[list, np.array] = (1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0),
initial_time: float = 0.0,
final_time: float = 100.0,
t_delta_integration: float = 0.01,
t_delta_observation: float = 0.1) -> Tuple[np.array, np.array]:
"""
Integrate the system using an scipy built-in ODE solver and extract the
noisy observations.
:param initial_state: initial state of the system;
:param initial_time: initial time of the simulation;
:param final_time: final time of the simulation;
:param t_delta_integration: time between integration intervals;
:param t_delta_observation: time between observation intervals.
:return: a numpy array containing the noisy observations of the
integrated dynamical system (of size [n_states, n_points])
and a numpy array containing the time stamps.
"""
observed_system, t = super(Glucose,
self).observe(initial_state,
initial_time,
final_time,
t_delta_integration,
t_delta_observation)
return observed_system, t
|
{"hexsha": "ac73c3616af6dc5ead777e4c1e6ffdf703dcf3a6", "size": 39922, "ext": "py", "lang": "Python", "max_stars_repo_path": "odin/utils/dynamical_systems.py", "max_stars_repo_name": "sdi1100041/SLEIPNIR", "max_stars_repo_head_hexsha": "02dd3eca8574899fd3f0e287b1a050e76e5ba0de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-09T08:25:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-15T03:34:37.000Z", "max_issues_repo_path": "odin/utils/dynamical_systems.py", "max_issues_repo_name": "sdi1100041/SLEIPNIR", "max_issues_repo_head_hexsha": "02dd3eca8574899fd3f0e287b1a050e76e5ba0de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "odin/utils/dynamical_systems.py", "max_forks_repo_name": "sdi1100041/SLEIPNIR", "max_forks_repo_head_hexsha": "02dd3eca8574899fd3f0e287b1a050e76e5ba0de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-07T12:15:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-07T12:15:39.000Z", "avg_line_length": 45.2117780294, "max_line_length": 107, "alphanum_fraction": 0.5318120335, "include": true, "reason": "import numpy,from scipy", "num_tokens": 9346}
|
import numpy as np
# The 'fit.py' module is unchanged from the previous tutorial.
class FitDataset:
# noinspection PyUnresolvedReferences
def __init__(self, masked_dataset, model_data):
"""Class to fit a dataset with model data.
Parameters
-----------
masked_data : np.ndarray
The observed masked dataset that is fitted.
model_data : np.ndarray
The model data the data is fitted with.
Attributes
-----------
residual_map : np.ndarray
The residual-map of the fit (data - model_data).
chi_squared_map : np.ndarray
The chi-squared-map of the fit ((data - model_data) / noise_maps ) **2.0
chi_squared : float
The overall chi-squared of the model's fit to the dataset, summed over every data point.
reduced_chi_squared : float
The reduced chi-squared of the model's fit to simulate (chi_squared / number of datas points), summed over \
every data point.
noise_normalization : float
The overall normalization term of the noise_map, summed over every data point.
log_likelihood : float
The overall log likelihood of the model's fit to the dataset, summed over evey data point.
"""
self.masked_dataset = masked_dataset
self.model_data = model_data
# This is a convenience method that makes the dataset's xvalues (used to generate the model data) directly
# accessible to an instance of to fit class. It is used in the 'plot.py' module.
@property
def xvalues(self):
return self.masked_dataset.dataset.xvalues
# Another convenience method for the mask.
@property
def mask(self):
return self.masked_dataset.mask
# Given this is a masked fit, its attributes (the data, noise-map, residual-map, etc. will all be masked).
@property
def data(self):
return self.masked_dataset.data
@property
def noise_map(self):
return self.masked_dataset.noise_map
@property
def residual_map(self):
return residual_map_from_data_model_data_and_mask(
data=self.data, model_data=self.model_data, mask=self.mask
)
@property
def normalized_residual_map(self):
return normalized_residual_map_from_residual_map_noise_map_and_mask(
residual_map=self.residual_map, noise_map=self.noise_map, mask=self.mask
)
@property
def chi_squared_map(self):
return chi_squared_map_from_residual_map_noise_map_and_mask(
residual_map=self.residual_map, noise_map=self.noise_map, mask=self.mask
)
@property
def signal_to_noise_map(self):
"""The signal-to-noise_map of the dataset and noise-map which are fitted."""
signal_to_noise_map = np.divide(self.data, self.noise_map)
signal_to_noise_map[signal_to_noise_map < 0] = 0
return signal_to_noise_map
@property
def chi_squared(self):
return chi_squared_from_chi_squared_map_and_mask(
chi_squared_map=self.chi_squared_map, mask=self.mask
)
@property
def noise_normalization(self):
return noise_normalization_from_noise_map_and_mask(
noise_map=self.noise_map, mask=self.mask
)
@property
def log_likelihood(self):
return likelihood_from_chi_squared_and_noise_normalization(
chi_squared=self.chi_squared, noise_normalization=self.noise_normalization
)
def residual_map_from_data_model_data_and_mask(data, mask, model_data):
"""
Returns the residual-map between a masked dataset and model data, where:
Residuals = (Data - Model_Data).
Parameters
-----------
data : np.ndarray
The observed data that is fitted.
mask : np.ndarray
The mask applied to the dataset, where `False` entries are included in the calculation.
model_data : np.ndarray
The model data used to fit the observed data.
"""
return np.subtract(
data, model_data, out=np.zeros_like(data), where=np.asarray(mask) == 0
)
def normalized_residual_map_from_residual_map_noise_map_and_mask(
residual_map, noise_map, mask
):
"""
Returns the normalized residual-map between a masked dataset and model data, where:
Normalized_Residual = (Data - Model_Data) / Noise
Parameters
-----------
residual_map : np.ndarray
The residual-map of the model-simulator fit to the observed dataset.
noise_map : np.ndarray
The noise-map of the observed dataset.
mask : np.ndarray
The mask applied to the residual-map, where `False` entries are included in the calculation.
"""
return np.divide(
residual_map,
noise_map,
out=np.zeros_like(residual_map),
where=np.asarray(mask) == 0,
)
def chi_squared_map_from_residual_map_noise_map_and_mask(residual_map, noise_map, mask):
"""
Returns the chi-squared-map between a masked residual-map and noise-map, where:
Chi_Squared = ((Residuals) / (Noise)) ** 2.0 = ((Data - Model)**2.0)/(Variances)
Parameters
-----------
residual_map : np.ndarray
The residual-map of the model-simulator fit to the observed dataset.
noise_map : np.ndarray
The noise-map of the observed dataset.
mask : np.ndarray
The mask applied to the residual-map, where `False` entries are included in the calculation.
"""
return np.square(
np.divide(
residual_map,
noise_map,
out=np.zeros_like(residual_map),
where=np.asarray(mask) == 0,
)
)
def chi_squared_from_chi_squared_map_and_mask(chi_squared_map, mask):
"""
Returns the chi-squared terms of each model data's fit to an observed dataset, by summing the masked
chi-squared-map of the fit.
Parameters
----------
chi_squared_map : np.ndarray
The chi-squared-map of values of the model-simulator fit to the observed dataset.
mask : np.ndarray
The mask applied to the chi-squared-map, where `False` entries are included in the calculation.
"""
return np.sum(chi_squared_map[np.asarray(mask) == 0])
def noise_normalization_from_noise_map_and_mask(noise_map, mask):
"""
Returns the noise-map normalization terms of masked noise-map, summing the noise_map value in every pixel as:
[Noise_Term] = sum(log(2*pi*[Noise]**2.0))
Parameters
----------
noise_map : np.ndarray
The masked noise-map of the observed dataset.
mask : np.ndarray
The mask applied to the noise-map, where `False` entries are included in the calculation.
"""
return np.sum(np.log(2 * np.pi * noise_map[np.asarray(mask) == 0] ** 2.0))
def likelihood_from_chi_squared_and_noise_normalization(
chi_squared, noise_normalization
):
"""
Returns the log likelihood of each 1D model-data fit to the dataset, where:
Log Likelihood = -0.5*[Chi_Squared_Term + Noise_Term] (see functions above for these definitions)
Parameters
----------
chi_squared : float
The chi-squared term for the model-data fit to the observed dataset.
noise_normalization : float
The normalization noise_map-term for the observed dataset's noise-map.
"""
return -0.5 * (chi_squared + noise_normalization)
|
{"hexsha": "a56f4bf5ed2e1c5614380340f39d1735b912c2a8", "size": 7640, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_autofit/integration/src/fit/fit.py", "max_stars_repo_name": "arfon/PyAutoFit", "max_stars_repo_head_hexsha": "5926b13eefd97e089ee468cbec33452766edbd22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-18T23:20:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-18T23:20:31.000Z", "max_issues_repo_path": "test_autofit/integration/src/fit/fit.py", "max_issues_repo_name": "arfon/PyAutoFit", "max_issues_repo_head_hexsha": "5926b13eefd97e089ee468cbec33452766edbd22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_autofit/integration/src/fit/fit.py", "max_forks_repo_name": "arfon/PyAutoFit", "max_forks_repo_head_hexsha": "5926b13eefd97e089ee468cbec33452766edbd22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4144144144, "max_line_length": 121, "alphanum_fraction": 0.6509162304, "include": true, "reason": "import numpy", "num_tokens": 1649}
|
(** * imp: a formalisation of the IMP programming language on top of KAT *)
(* We formalise the IMP language (whose programs are also known as
"while programs"). We give a big step semantics as an inductive
predicate, and using KAT, and we show that the two versions
actually coincide.
We then use the [kat] tactic to prove some simple program
equivalences, and to derive all rules of corresponding Hoare logic
for partial correctness. *)
From RelationAlgebra Require Import kat prop rel comparisons kat_tac.
Section s.
(** identifiers for memory locations *)
Variable loc: Set.
(** abstract state (or memory) *)
Variable state: Set.
(** updating the state *)
Variable update: loc -> nat -> state -> state.
(** * Definition of the languague *)
(** programs *)
Inductive prog :=
| skp
| aff (x: loc) (e: state -> nat)
| seq (p q: prog)
| ite (t: dset state) (p q: prog)
| whl (t: dset state) (p: prog).
(** notations *)
Declare Scope imp_scope.
Bind Scope imp_scope with prog.
Delimit Scope imp_scope with imp.
Notation "x <- y" := (aff x y) (at level 90): imp_scope.
Notation "p ;; q" := (seq p%imp q%imp) (left associativity, at level 101): imp_scope.
Arguments ite _%ra _%imp _%imp.
Arguments whl _%ra _%imp.
(** * Big step semantics *)
(** corresponding functional relation *)
Notation upd x e := (frel (fun s => update x (e s) s)).
(** ** using KAT expressions in the model of relations
the semantics can then be given by induction on the program, using
a simple fixpoint *)
Fixpoint bstep (p: prog): hrel state state :=
match p with
| skp => 1
| aff x e => upd x e
| seq p q => bstep p ⋅ bstep q
| ite b p q => [b] ⋅ bstep p + [!b] ⋅ bstep q
| whl b p => ([b] ⋅ bstep p)^* ⋅ [!b]
end.
(** ** using an inductive predicate, as in standard textbooks *)
Inductive bstep': prog -> hrel state state :=
| s_skp: forall s, bstep' skp s s
| s_aff: forall x e s, bstep' (x <- e) s (update x (e s) s)
| s_seq: forall p q s s' s'', bstep' p s s' -> bstep' q s' s'' -> bstep' (p ;; q) s s''
| s_ite_ff: forall (b: dset state) p q s s', b s = false -> bstep' q s s' -> bstep' (ite b p q) s s'
| s_ite_tt: forall (b: dset state) p q s s', b s = true -> bstep' p s s' -> bstep' (ite b p q) s s'
| s_whl_ff: forall (b: dset state) p s, b s = false -> bstep' (whl b p) s s
| s_whl_tt: forall (b: dset state) p s s', b s = true -> bstep' (p ;; whl b p) s s' -> bstep' (whl b p) s s'.
(** ** equivalence between the two definitions *)
Lemma bstep_eq p: bstep' p ≡ bstep p.
Proof.
apply antisym.
- intros s s'. induction 1.
reflexivity.
reflexivity.
eexists; eassumption.
right. eexists. split. reflexivity. simpl; now rewrite H. assumption.
left. eexists. split. reflexivity. assumption. assumption.
exists s. apply (str_refl ([b] ⋅ bstep p)). reflexivity.
simpl. unfold hrel_inj. simpl. now rewrite H.
destruct IHbstep' as [t ? [t' ? ?]]. exists t'. 2: assumption.
apply (str_cons ([b] ⋅ bstep p)). exists t. 2: assumption.
eexists; eauto. now split.
- induction p; unfold bstep; fold bstep.
intros ? ? <-. constructor.
intros ? ? ->. constructor.
intros ? ? [? H1 H2]. econstructor. apply IHp1, H1. apply IHp2, H2.
intros ? ? [[? [<- H] H']|[? [<- H] H']].
apply s_ite_tt. assumption. apply IHp1, H'.
apply s_ite_ff. now apply Bool.negb_true_iff. apply IHp2, H'.
apply str_ind_l'.
intros ? ? [<- H]. apply s_whl_ff. now apply Bool.negb_true_iff.
rewrite <-dotA. intros s s'' [? [<- H] [s' H' H'']]. apply s_whl_tt. assumption.
econstructor. apply IHp, H'. assumption.
Qed.
(** * Some program equivalences *)
(** two programs are said to be equivalent if they have the same semantics *)
Notation "p ~ q" := (bstep p ≡ bstep q) (at level 80).
(** ad-hoc simplification tactic *)
Ltac simp := unfold bstep; fold bstep.
(** ** denesting nested loops *)
Lemma two_loops b p:
whl b (whl b p) ~ whl b p.
Proof. simp. kat. Qed.
(** ** folding a loop *)
Lemma fold_loop b p:
whl b (p ;; ite b p skp) ~
whl b p.
Proof. simp. kat. Qed.
(** ** eliminating deadcode *)
Lemma dead_code b p q r:
(whl b p ;; ite b q r) ~
(whl b p ;; r).
Proof. simp. kat. Qed.
Lemma dead_code' a b p q r:
(whl (a ⊔ b) p ;; ite b q r) ~
(whl (a ⊔ b) p ;; r).
Proof. simp. kat. Qed.
(** * Reasoning about assignations *)
(** (higher-order style) substitution in formulas and expressions *)
Definition subst x v (A: dset state): dset state :=
fun s => A (update x (v s) s).
Definition esubst x v (e: state -> nat): state -> nat :=
fun s => e (update x (v s) s).
(** is [x] fresh in the expression e *)
Definition fresh x (e: state -> nat) := forall v s, e (update x v s) = e s.
Hypothesis update_twice: forall x i j s, update x j (update x i s) = update x j s.
Hypothesis update_comm: forall x y i j s, x<>y -> update x i (update y j s) = update y j (update x i s).
(** ** stacking assignations *)
Lemma aff_stack x e f:
(x <- e ;; x <- f) ~
(x <- esubst x e f).
Proof.
simp. rewrite frel_comp.
apply frel_weq; intro s.
apply update_twice.
Qed.
(** ** removing duplicates *)
Lemma aff_idem x e: fresh x e -> (x <- e ;; x <- e) ~ (x <- e).
Proof.
intro. rewrite aff_stack.
intros s s'. cbv. rewrite (H (e s)). tauto.
Qed.
(** ** commuting assignations *)
Lemma aff_comm x y e f: x<>y -> fresh y e ->
(x <- e ;; y <- f) ~ (y <- esubst x e f ;; x <- e).
Proof.
intros Hx Hy. simp. rewrite 2frel_comp. apply frel_weq; intro s.
rewrite update_comm by congruence.
now rewrite (Hy _).
Qed.
(** ** delaying choices *)
(** in the above example, we cannot exploit KAT since this is just
about assignations. In the following example, we show how to
perform a mixed proof: once we assert that the test [t] somehow
commutes with the assignation [x<-e], [hkat] can make use of this
assumption to close the goal *)
Lemma aff_ite x e t p q:
(x <- e ;; ite t p q)
~
(ite (subst x e t) (x <- e ;; p) (x <- e ;; q)).
Proof.
simp.
assert (H: upd x e ⋅ [t] ≡ [subst x e t] ⋅ upd x e)
by (cbv; firstorder; subst; eauto).
hkat.
Qed.
(** * Embedding Hoare logic for partial correctness *)
(** Hoare triples for partial correctness can be expressed really
easily using KAT: *)
Notation Hoare A p B := ([A] ⋅ bstep p ⋅ [!B] ≦ 0).
(** ** correspondence w.r.t. the standard interpretation of Hoare triples *)
Lemma Hoare_eq A p B:
Hoare A p B <->
forall s s', A s -> bstep p s s' -> B s'.
Proof.
split.
- intros H s s' HA Hp. case_eq (B s'). reflexivity. intro HB.
destruct (H s s'). exists s'. exists s.
now split. assumption. split. reflexivity. simpl. now rewrite HB.
- intros H s s' [? [? [<- HA] Hp] [-> HB]]. simpl in HB.
rewrite (H _ _ HA Hp) in HB. discriminate.
Qed.
(** ** deriving Hoare logic rules using the [hkat] tactic *)
(** Hoare triples are encoded as propositions of the shape [x ≦ 0] ;
therefore, they can always be eliminated by [hkat], so that all
rules of Hoare logic can be proved automatically (except for the
assignation rule, of course)
This idea come from the following paper:
Dexter Kozen. On Hoare logic and Kleene algebra with tests.
Trans. Computational Logic, 1(1):60-76, July 2000.
The fact that we have an automatic tactic makes it trivial to
formalise it. *)
Lemma weakening (A A' B B': dset state) p:
A' ≦ A -> Hoare A p B -> B ≦ B' -> Hoare A' p B'.
Proof. hkat. Qed.
Lemma rule_skp A: Hoare A skp A.
Proof. simp. kat. Qed.
Lemma rule_seq A B C p q:
Hoare A p B ->
Hoare B q C ->
Hoare A (p;;q) C.
Proof. simp. hkat. Qed.
Lemma rule_ite A B t p q:
Hoare (A ⊓ t) p B ->
Hoare (A ⊓ !t) q B ->
Hoare A (ite t p q) B.
Proof. simp. hkat. Qed.
Lemma rule_whl A t p:
Hoare (A ⊓ t) p A ->
Hoare A (whl t p) (A ⊓ neg t).
Proof. simp. hkat. Qed.
Lemma rule_aff x v (A: dset state): Hoare (subst x v A) (x <- v) A.
Proof.
rewrite Hoare_eq. intros s s' HA H.
now inversion_clear H.
Qed.
Lemma wrong_rule_whl A t p:
Hoare (A ⊓ !t) p A ->
Hoare A (whl t p) (A ⊓ !t).
Proof. simp. Fail hkat. Abort.
Lemma rule_whl' (I A: dset state) t p:
Hoare (I ⊓ t) p I ->
I ⊓ !t ≦ A ->
Hoare I (whl t p) A.
Proof. eauto 3 using weakening, rule_whl. Qed.
End s.
|
{"author": "damien-pous", "repo": "relation-algebra", "sha": "13b99896782e449c7ca3910e48e18427517c8135", "save_path": "github-repos/coq/damien-pous-relation-algebra", "path": "github-repos/coq/damien-pous-relation-algebra/relation-algebra-13b99896782e449c7ca3910e48e18427517c8135/examples/imp.v"}
|
import torch
import numpy as np
import pyredner
import torch
import enum
import math
from typing import Optional
class Texture:
"""
Representing a texture and its mipmap.
Args
====
texels: torch.Tensor
a float32 tensor with size C or [height, width, C]
uv_scale: Optional[torch.Tensor]
scale the uv coordinates when mapping the texture
a float32 tensor with size 2
"""
def __init__(self,
texels: torch.Tensor,
uv_scale: Optional[torch.Tensor] = None,
mesh_colors_resolution = 0):
if uv_scale is None:
uv_scale = torch.tensor([1.0, 1.0], device = pyredner.get_device())
assert(texels.dtype == torch.float32)
assert(uv_scale.dtype == torch.float32)
assert(uv_scale.is_contiguous())
self._texels = texels
self.uv_scale = uv_scale
self.mesh_colors_resolution = mesh_colors_resolution
self.generate_mipmap()
def generate_mipmap(self):
texels = self._texels
if len(texels.shape) >= 2:
# Build a mipmap for texels
width = max(texels.shape[0], texels.shape[1])
num_levels = math.ceil(math.log(width, 2) + 1)
mipmap = texels.unsqueeze(0).expand(num_levels, *texels.shape)
if len(mipmap.shape) == 3:
mipmap.unsqueeze_(-1)
num_channels = mipmap.shape[-1]
box_filter = torch.ones(num_channels, 1, 2, 2,
device = texels.device) / 4.0
# Convert from HWC to NCHW
base_level = texels.unsqueeze(0).permute(0, 3, 1, 2)
mipmap = [base_level]
prev_lvl = base_level
for l in range(1, num_levels):
dilation_size = 2 ** (l - 1)
# Pad for circular boundary condition
# This is slow. The hope is at some point PyTorch will support
# circular boundary condition for conv2d
desired_height = prev_lvl.shape[2] + dilation_size
while prev_lvl.shape[2] < desired_height:
prev_lvl = torch.cat([prev_lvl, prev_lvl[:,:,0:(desired_height - prev_lvl.shape[2])]], dim=2)
desired_width = prev_lvl.shape[3] + dilation_size
while prev_lvl.shape[3] < desired_width:
prev_lvl = torch.cat([prev_lvl, prev_lvl[:,:,:,0:dilation_size]], dim=3)
current_lvl = torch.nn.functional.conv2d(\
prev_lvl, box_filter,
dilation = dilation_size,
groups = num_channels)
mipmap.append(current_lvl)
prev_lvl = current_lvl
mipmap = torch.cat(mipmap, 0)
# Convert from NCHW to NHWC
mipmap = mipmap.permute(0, 2, 3, 1)
texels = mipmap.contiguous()
self.mipmap = texels
@property
def texels(self):
return self._texels
@texels.setter
def texels(self, value):
self._texels = value
self.generate_mipmap()
def state_dict(self):
return {
'texels': self.texels,
'mipmap': self.mipmap,
'uv_scale': self.uv_scale,
'mesh_colors_resolution': self.mesh_colors_resolution
}
@classmethod
def load_state_dict(cls, state_dict):
out = cls.__new__(Texture)
out.texels = state_dict['texels']
out.mipmap = state_dict['mipmap']
out.uv_scale = state_dict['uv_scale'].to(torch.device('cpu'))
out.mesh_colors_resolution = state_dict['mesh_colors_resolution']
return out
|
{"hexsha": "3aa94441977c89e5c1e474abf9ea71f13527eabf", "size": 3780, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyredner/texture.py", "max_stars_repo_name": "brownvc/shapefromtracing", "max_stars_repo_head_hexsha": "f89226c8f20f1be9d6c8b7e46405872355accf2b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-12-09T09:54:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T07:53:55.000Z", "max_issues_repo_path": "pyredner/texture.py", "max_issues_repo_name": "brownvc/shapefromtracing", "max_issues_repo_head_hexsha": "f89226c8f20f1be9d6c8b7e46405872355accf2b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyredner/texture.py", "max_forks_repo_name": "brownvc/shapefromtracing", "max_forks_repo_head_hexsha": "f89226c8f20f1be9d6c8b7e46405872355accf2b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-16T13:36:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-11T13:48:36.000Z", "avg_line_length": 37.0588235294, "max_line_length": 114, "alphanum_fraction": 0.5605820106, "include": true, "reason": "import numpy", "num_tokens": 872}
|
[STATEMENT]
lemma reachable_snapshot_inv_black_heap_no_grey_refs_dequeue_Mutate:
assumes sb: "sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws"
assumes bh: "black_heap s"
assumes ngr: "no_grey_refs s"
assumes vri: "valid_refs_inv s"
shows "mut_m.reachable_snapshot_inv m (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)),
mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))" (is "mut_m.reachable_snapshot_inv m ?s'")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mut_m.reachable_snapshot_inv m (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
apply (rule mut_m.reachable_snapshot_invI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. mut_m.reachable m y (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>)) \<Longrightarrow> in_snapshot y (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
apply (rule in_snapshotI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. mut_m.reachable m y (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>)) \<Longrightarrow> black y (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
apply (erule black_heap_reachable)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>y. black_heap (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
2. \<And>y. valid_refs_inv (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
using bh vri
[PROOF STATE]
proof (prove)
using this:
black_heap s
valid_refs_inv s
goal (2 subgoals):
1. \<And>y. black_heap (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
2. \<And>y. valid_refs_inv (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
apply (simp add: black_heap_def fun_upd_apply; fail)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. valid_refs_inv (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
using bh ngr sb vri
[PROOF STATE]
proof (prove)
using this:
black_heap s
no_grey_refs s
sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws
valid_refs_inv s
goal (1 subgoal):
1. \<And>y. valid_refs_inv (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
apply (subst valid_refs_inv_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>black_heap s; no_grey_refs s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s\<rbrakk> \<Longrightarrow> \<forall>x xa. mut_m.reachable x xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>)) \<or> grey_reachable xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>)) \<longrightarrow> obj_at (\<lambda>s. True) xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
[PROOF STEP]
apply (clarsimp simp add: no_grey_refs_def grey_reachable_def fun_upd_apply)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s; mut_m.reachable x xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))\<rbrakk> \<Longrightarrow> obj_at (\<lambda>s. True) xa s
[PROOF STEP]
apply (drule black_heap_reachable)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s\<rbrakk> \<Longrightarrow> black_heap (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
2. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s\<rbrakk> \<Longrightarrow> valid_refs_inv (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
3. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s; black xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))\<rbrakk> \<Longrightarrow> obj_at (\<lambda>s. True) xa s
[PROOF STEP]
apply (simp add: black_heap_def fun_upd_apply; fail)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s\<rbrakk> \<Longrightarrow> valid_refs_inv (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))
2. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s; black xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))\<rbrakk> \<Longrightarrow> obj_at (\<lambda>s. True) xa s
[PROOF STEP]
apply (clarsimp simp: valid_refs_inv_dequeue_Mutate; fail)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x xa. \<lbrakk>black_heap s; \<forall>x. \<not> grey x s; sys_mem_store_buffers (mutator m') s = mw_Mutate r f opt_r' # ws; valid_refs_inv s; black xa (s(sys := s sys\<lparr>heap := (sys_heap s)(r := map_option (\<lambda>obj. obj\<lparr>obj_fields := (obj_fields obj)(f := opt_r')\<rparr>) (sys_heap s r)), mem_store_buffers := (mem_store_buffers (s sys))(mutator m' := ws)\<rparr>))\<rbrakk> \<Longrightarrow> obj_at (\<lambda>s. True) xa s
[PROOF STEP]
apply (clarsimp simp: in_snapshot_def in_snapshot_valid_ref fun_upd_apply)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 3883, "file": "ConcurrentGC_Global_Noninterference", "length": 13}
|
import time
#from pyftdi.ftdi import Ftdi
#ft = Ftdi()
#url = 'ftdi://ftdi:2232:FT2RTNYW/2'
#ft.open_bitbang_from_url(url, 0b00001111)
#print(ft.is_connected)
#print(ft.bitbang_enabled)
#print(ft.device_version)
#print(ft.fifo_sizes)
#print(ft.get_identifiers(url))
#print(ft.has_mpsse)
#print(ft.has_wide_port)
#print(ft.ic_name)
#print('{:08b}'.format(ft.read_pins()))
#time.sleep(5)
#exit(1)
from pyftdi.gpio import GpioController
import sys
sys.path.insert(1, '/home/kaveh/Development/eclipse/neos/script/')
from circuit import *
import vparse
import numpy as np
import os
import serial
SCA_CLK = 0
SCA_DATA = 1
SCA_RESET = 2
BITS = [1, 2, 4, 16, 32, 64, 128, 256]
NUM_INS = 51
WAIT = 0.01
class Controller:
def __init__(self):
self.gp = ''
self.state = 0
import serial.tools.list_ports
serports = list(serial.tools.list_ports.comports())
print([port.name for port in serports])
#sp = int(input('select com port for usb2fpga:'))
sp = -1
for i in range(len(serports)):
if 'ACM' in serports[i].name:
sp = i
if sp not in range(0, len(serports)):
print('invalid port {}'.format(sp))
exit(1)
self.ser = serial.Serial('/dev/' + serports[sp].name, 115200, timeout=0.1)
return
def reset_receiver(self):
return
def send_to_device(self, invals, flip_mask):
assert len(invals) == NUM_INS
assert len(flip_mask) == NUM_INS
# while True:
# string = input('input:')
# self.ser.write(string.encode())
# print(self.ser.read(10000).decode())
self.ser.write(b'rr')
time.sleep(0.005)
string = ''
#string = self.ser.readline().decode()
#string += self.ser.readline().decode()
#string += self.ser.readline().decode()
# print(string)
# if 'enter bit index' in string:
# print('Setup success!')
# else:
# print('bad response!')
# exit(1)
#self.ser.write('{}\n'.format(bit_index).encode())
#string = self.ser.read(100)
#print('got back: ', string)
# if 'enter input values' in string:
# print('index success')
# else:
# print('bad response!')
# exit(1)
#
#print('end')
self.ser.write(b'0')
#self.ser.reset_input_buffer()
message = b''
for b in invals:
print(b, end='')
message += '{}'.format(b).encode()
for b in flip_mask:
print(b, end='')
message += '{}'.format(b).encode()
self.ser.write(message)
# self.ser.flush()
# # time.sleep(1)
string = b''
i = 0
start = time.time()
while b'success' not in string:
string = self.ser.readline()
print('got:', string)
i += 1
if b'failed' in string or i > 4:
print('FPGA message not sent properly!')
self.send_to_device(invals, flip_mask)
print('Ack time: ', time.time() - start)
print('done ', string)
return
import copy
import pyvisa
import dill
import matplotlib.pyplot as plt
class Oscope():
def __init__(self):
self.rm = pyvisa.ResourceManager()
self.devices = self.rm.list_resources()
print(self.devices)
self.devnum = -1
for i in range(len(self.devices)):
if 'USB0' in self.devices[i]:
self.devnum = i
#self.devnum = int(input("which device to pick? "))
self.device_addr = self.devices[self.devnum]
#device_addr = 'USB0::10893::6039::CN57266229::0::INSTR'
print("trying to connect to ", self.device_addr)
self.scope = self.rm.open_resource(self.device_addr) # Connect to the scope using the VISA address (see below)
#print(scope)
print(self.scope.query('*IDN?'))
return
def __del__(self):
self.scope.close()
return
def collect_trace(self):
# Allow plots to appear inline with the IPython notebook
# scope.read_termination = '\n' # For ascii transfers the read is terminated with a newline character
start = time.time()
self.scope.write(':WAVeform:SOURce CHANnel2')
self.scope.write(':WAVeform:FORMat ASCII')
self.scope.write(':WAVeform:POINts 1000')
print('format time: ', time.time() - start)
start = time.time()
print('Reading SCOPE...')
wfm_ascii = self.scope.query(':WAVeform:DATA?')
print('Done')
print('query time is: ', time.time() - start)
#print(wfm_ascii)
#print(wfm_ascii)
#print('ascii table size: ', len(wfm_ascii))
x = []
y = []
i = 0
for st in wfm_ascii.split(','):
i += 1
if i > 5 or '#' not in st:
y.append(float(st))
print('data width: ', len(y))
#print(y)
#exit(1)
return y
def plot_trace(self, y):
x = range(0, len(y))
plt.plot(x, y)
plt.show()
return
class ScaUnlock():
def __init__(self, enc_cir_filename, sim_cir_filename):
self.enc_cir = Circuit(enc_cir_filename)
self.sim_cir = Circuit(sim_cir_filename)
return
def hamming_model(self, input_vec, flip_mask, hamm_function = 0):
i = 0
sim_map1 = dict()
sim_map2 = dict()
for xid in self.enc_cir.ins_and_keys():
sim_map1[xid] = input_vec[i]
i += 1
sim_map2 = copy.deepcopy(sim_map1)
i = 0
for xid in self.enc_cir.ins_and_keys():
if flip_mask[i] == 1:
sim_map2[xid] = ~sim_map1[xid] & 1
i += 1
self.enc_cir.simulate(sim_map1)
self.enc_cir.simulate(sim_map2)
hamming_distance = 0.0
if hamm_function == 0:
inset = set(self.enc_cir.ins_and_keys())
for xid, val in sim_map1.items():
if val != sim_map2[xid] and xid not in inset:
hamming_distance += 1.0
elif hamm_function == 1:
for xid, val in sim_map1.items():
if val != sim_map2[xid]:
hamming_distance += 1.0
elif hamm_function == 2:
for b in flip_mask:
if b == 1:
hamming_distance += 1.0
print('Hamm is : ', hamming_distance, hamming_distance / float(self.enc_cir.num_wires()))
return hamming_distance / float(self.enc_cir.num_wires())
def collect_trace(self, invals, flip_mask):
start = time.time()
self.cnt.send_to_device(invals, flip_mask)
print('device send time: ', time.time() - start)
time.sleep(0.05)
trace = self.osc.collect_trace()
print('trace collect time: ', time.time() - start)
return trace
def run_attack_v1(self):
self.cnt = Controller()
self.osc = Oscope()
self.num_ins = unl.enc_cir.num_ins_and_keys()
print('NUM_INS:', self.num_ins)
while True:
instr = input('enter character to roll ')
if instr == 'q':
cnt.close()
exit(1)
invals = np.random.randint(2, size=NUM_INS)
bit_index = np.random.randint(0, NUM_INS - 1)
flip_mask = np.zeros((NUM_INS,), dtype=int)
flip_mask[bit_index] = 1
print('invals: ', end='')
for b in invals:
print(b, end='')
print('\nflipms: ', end='')
for b in flip_mask:
print(b, end='')
print()
trace = self.collect_trace(invals, flip_mask)
self.osc.plot_trace(trace)
return
def run_attack_v2(self):
self.num_ins = unl.enc_cir.num_ins_and_keys()
print('NUM_INS:', self.num_ins)
self.num_traces = 10000
dill_filename = 'traces.dill'
traces = []
vectors = []
pwrmodel_vals = []
if not os.path.isfile(dill_filename):
if input('Are you sure you want to collect traces? [yes]') != 'yes':
exit(1)
self.cnt = Controller()
self.osc = Oscope()
traces = []
vectors = []
for nt in range(self.num_traces):
invals = np.random.randint(2, size=self.num_ins)
bit_index = np.random.randint(0, self.num_ins - 1)
flip_mask = np.random.randint(2, size=self.num_ins)
#flip_mask = np.zeros((self.num_ins,), dtype=int)
#flip_mask[bit_index] = 1
vectors.append((invals, flip_mask))
print('invals: ', end='')
for b in invals:
print(b, end='')
print('\nflipms: ', end='')
for b in flip_mask:
print(b, end='')
print()
traces.append(self.collect_trace(invals, flip_mask))
#osc.plot_trace(trace)
nt += 1
self.attack_data = (vectors, traces)
with open('traces.dill', 'wb') as fn:
dill.dump(self.attack_data, fn)
else:
with open('traces.dill', 'rb') as fn:
self.attack_data = dill.load(fn)
# analysis step
traces = self.attack_data[1]
vectors = self.attack_data[0]
self.num_points = len(traces[0])
print('num_points is: ', self.num_points)
np_traces = np.asfarray(traces)
print(np_traces.shape)
pwrmodel_vals = np.zeros((3, self.num_traces))
i = 0
for invals, flip_mask in vectors:
#print(invals, flip_mask)
for j in range(3):
hamm = self.hamming_model(invals, flip_mask, j)
pwrmodel_vals[j][i] = float(hamm)
i += 1
print(pwrmodel_vals.shape, np_traces.shape)
time_x = np.arange(self.num_points)
sig_power = np.zeros(self.num_traces, dtype=float)
sig_power = np.sum(np_traces**2, axis=1)
print(sig_power.shape)
maxp = np.argmax(pwrmodel_vals[0])
minp = np.argmin(pwrmodel_vals[0])
from collections import defaultdict
avg_dict = dict()
avg_count = dict()
for i in range(self.num_traces):
if pwrmodel_vals[0][i] not in avg_dict:
avg_dict[pwrmodel_vals[0][i]] = 0
avg_count[pwrmodel_vals[0][i]] = 0
avg_dict[pwrmodel_vals[0][i]] += sig_power[i]
avg_count[pwrmodel_vals[0][i]] += 1
x = []
y = []
for p in sorted(avg_dict):
x.append(p)
y.append(avg_dict[p]/avg_count[p])
plt.scatter(pwrmodel_vals[0], sig_power)
plt.plot(x, y)
plt.show()
for i in range(self.num_traces):
if i == minp:
plt.plot(time_x, np_traces[i], 'b')
elif i == maxp:
plt.plot(time_x, np_traces[i], 'r')
else:
plt.plot(time_x, np_traces[i], 'gray', linewidth=0.05)
#plt.plot(time_x, np_traces[maxp], 'b', time_x, np_traces[minp], 'r')
plt.show()
import scipy.stats
x = np.arange(self.num_points)
y = np.zeros((4, self.num_points), dtype=float)
randvec = np.random.uniform(-1, 1, self.num_traces)
np_traces = np_traces**2
for xp in x:
#print(np_traces[:, xp].shape, pwrmodel_vals.shape)
y[0][xp] = scipy.stats.pearsonr(np_traces[:, xp], pwrmodel_vals[0])[0]
y[1][xp] = scipy.stats.pearsonr(np_traces[:, xp], pwrmodel_vals[1])[0]
y[2][xp] = scipy.stats.pearsonr(np_traces[:, xp], pwrmodel_vals[2])[0]
y[3][xp] = scipy.stats.pearsonr(np_traces[:, xp], np.random.uniform(-1, 1, self.num_traces))[0]
plt.plot(x, y[0], label='y0')
plt.plot(x, y[1], label='y1')
plt.plot(x, y[2], label='y2')
plt.plot(x, y[3], label='y3')
plt.legend()
plt.show()
return
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage: sca_unlock.py <enc_cir> <sim_cir>')
exit(1)
unl = ScaUnlock(sys.argv[1], sys.argv[2])
unl.run_attack_v2()
#unl.enc_cir.write_bench()
# TEST MODE
#while True:
# instr = input("enter command: ")
# cnt.bitbang_send(ord(instr[0]))
#
# if instr == 'r':
# cnt.reset_receiver()
# else:
# bit_index = int(input("enter input index: "))
# input_bits = ''
# while len(input_bits) < NUM_INS:
# input_bits += input('enter input bits ({0}/{1}): '.format(len(input_bits), NUM_INS))
# input_bits = list(input_bits[0:NUM_INS - 1])
#
# num_bytes = int(NUM_INS / 8)
# bytes = []
# for nb in range(0, num_bytes):
# byte2send = 0
# for i in range(0, 8):
# bit = int(input_bits[nb*8 + i] == '1')
# byte2send += (bit << i)
# print(bin(byte2send))
# bytes.append(byte2send)
# for byte in bytes:
# cnt.bitbang_send(byte)
|
{"hexsha": "86fe186133624c909ca1eb6314f9699976682ebc", "size": 14017, "ext": "py", "lang": "Python", "max_stars_repo_path": "platform/ise/xl9_cw/sca_attack.py", "max_stars_repo_name": "kavehshamsi/scadec", "max_stars_repo_head_hexsha": "3cc1e0eba5db12be5b16aea7b7fd4909faf42714", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "platform/ise/xl9_cw/sca_attack.py", "max_issues_repo_name": "kavehshamsi/scadec", "max_issues_repo_head_hexsha": "3cc1e0eba5db12be5b16aea7b7fd4909faf42714", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "platform/ise/xl9_cw/sca_attack.py", "max_forks_repo_name": "kavehshamsi/scadec", "max_forks_repo_head_hexsha": "3cc1e0eba5db12be5b16aea7b7fd4909faf42714", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5381263617, "max_line_length": 118, "alphanum_fraction": 0.5102375687, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3513}
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype code-based object validation classes** (i.e.,
:mod:`beartype`-specific classes enabling callers to define PEP-compliant
validators from arbitrary caller-defined objects tested via explicitly
supported object introspectors efficiently generating stack-free code).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ TODO }....................
# All "FIXME:" comments for this submodule reside in this package's "__init__"
# submodule to improve maintainability and readability here.
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeValeSubscriptionException
from beartype.vale._valeisabc import _IsABC
from beartype._vale._valesub import _SubscriptedIs
from beartype._util.cache.utilcachecall import callable_cached
from beartype._util.data.utildatadict import update_mapping
from beartype._util.func.utilfuncscope import (
CallableScope,
add_func_scope_attr,
)
from beartype._util.text.utiltextmagic import (
CODE_INDENT_1,
# LINE_RSTRIP_INDEX_AND,
)
from beartype._util.text.utiltextrepr import represent_object
from beartype._util.utilobject import SENTINEL
from typing import Any, Tuple
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ CLASSES ~ subscriptable }....................
class IsAttr(_IsABC):
'''
**Beartype object attribute validator factory** (i.e., class that, when
subscripted (indexed) by both the name of any object attribute *and* any
:class:`_SubscriptedIs` object created by subscripting any
:mod:`beartype.vale` class for validating that attribute, creates another
:class:`_SubscriptedIs` object suitable for subscripting (indexing)
:attr:`typing.Annotated` type hints, which validates that
:mod:`beartype`-decorated callable parameters and returns annotated by
those hints define an attribute with that name satisfying that attribute
validator).
This class efficiently validates that callable parameters and returns
define arbitrary object attributes satisfying arbitrary validators
subscripting (indexing) this class. Any :mod:`beartype`-decorated callable
parameter or return annotated by a :attr:`typing.Annotated` type hint
subscripted (indexed) by this class subscripted (indexed) by any object
attribute name and validator (e.g., ``typing.Annotated[{cls},
beartype.vale.IsAttr[{attr_name}, {attr_validator}]]`` for any class
``{cls}``, object attribute name ``{attr_name}`, and object attribute
validator ``{attr_validator}``) validates that parameter or return value to
be an instance of that class defining an attribute with that name
satisfying that attribute validator.
**This class incurs no time performance penalties at call time.** Whereas
the general-purpose :class:`beartype.vale.Is` class necessarily calls the
caller-defined callable subscripting that class at call time and thus
incurs a minor time performance penalty, this class efficiently reduces to
one-line tests in :mod:`beartype`-generated wrapper functions *without*
calling any callables and thus incurs *no* time performance penalties.
Examples
----------
.. code-block:: python
# Import the requisite machinery.
>>> import numpy as np
>>> from beartype import beartype
>>> from beartype.vale import IsAttr, IsEqual
>>> from typing import Annotated
# Type hint matching only two-dimensional NumPy arrays of 64-bit floats,
# generating code resembling:
# (isinstance(array, np.ndarray) and
# array.ndim == 2 and
# array.dtype == np.dtype(np.float64))
>>> Numpy2DArrayOfFloats = Annotated[
... np.ndarray,
... IsAttr['ndim', IsEqual[2]],
... IsAttr['dtype', IsEqual[np.dtype(np.float64)]],
... ]
# Type hint matching only one-dimensional NumPy arrays of 64-bit floats,
# generating code resembling:
# (isinstance(array, np.ndarray) and
# array.ndim == 2 and
# array.dtype.type == np.float64)
>>> Numpy1DArrayOfFloats = Annotated[
... np.ndarray,
... IsAttr['ndim', IsEqual[2]],
... # Nested attribute validators test equality against a "."-delimited
... # attribute lookup (e.g., "dtype.type"), as expected.
... IsAttr['dtype', IsAttr['type', IsEqual[np.float64]]],
... ]
# NumPy arrays of well-known real number series.
>>> FAREY_2D_ARRAY_OF_FLOATS = np.array(
... [[0/1, 1/8,], [1/7, 1/6,], [1/5, 1/4], [2/7, 1/3], [3/8, 2/5]])
>>> FAREY_1D_ARRAY_OF_FLOATS = np.array(
... [3/7, 1/2, 4/7, 3/5, 5/8, 2/3, 5/7, 3/4, 4/5, 5/6, 6/7, 7/8])
# Annotate callables by those type hints.
>>> @beartype
... def sqrt_sum_2d(
... array: Numpy2DArrayOfFloats) -> Numpy1DArrayOfFloats:
... """
... One-dimensional NumPy array of 64-bit floats produced by first
... summing the passed two-dimensional NumPy array of 64-bit floats
... along its second dimension and then square-rooting those sums.
... """
... return np.sqrt(array.sum(axis=1))
# Call those callables with parameters satisfying those hints.
>>> sqrt_sum_2d(FAREY_2D_ARRAY_OF_FLOATS)
[0.35355339 0.55634864 0.67082039 0.78679579 0.88034084]
# Call those callables with parameters not satisfying those hints.
>>> sqrt_sum_2d(FAREY_1D_ARRAY_OF_FLOATS)
beartype.roar._roarexc.BeartypeCallHintPepParamException: @beartyped
sqrt_sum_2d() parameter array="array([0.42857143, 0.5, 0.57142857, 0.6,
0.625, ...])" violates type hint typing.Annotated[numpy.ndarray,
IsAttr['ndim', IsEqual[2]], IsAttr['dtype', IsEqual[dtype('float64')]]],
as value "array([0.42857143, 0.5, 0.57142857, 0.6, 0.625, ...])"
violates data constraint IsAttr['ndim', IsEqual[2]].
See Also
----------
:class:`beartype.vale.Is`
Further commentary.
'''
# ..................{ DUNDERS }..................
@callable_cached
def __class_getitem__(
cls, args: Tuple[str, _SubscriptedIs]) -> _SubscriptedIs:
'''
`PEP 560`_-compliant dunder method creating and returning a new
:class:`_SubscriptedIs` object validating object attributes with the
passed name satisfying the passed validator, suitable for subscripting
`PEP 593`_-compliant :attr:`typing.Annotated` type hints.
This method is memoized for efficiency.
Parameters
----------
args : Tuple[str, _SubscriptedIs]
2-tuple ``(attr_name, attr_validator)``, where:
* ``attr_name`` is the arbitrary attribute name to validate that
parameters and returns define satisfying the passed validator.
* ``attr_validator`` is the attribute validator to validate that
attributes with the passed name of parameters and returns
satisfy.
Returns
----------
_SubscriptedIs
New object encapsulating this validation.
Raises
----------
BeartypeValeSubscriptionException
If this class was subscripted by either:
* *No* arguments.
* One argument.
* Three or more arguments.
See Also
----------
:class:`IsAttr`
Usage instructions.
.. _PEP 560:
https://www.python.org/dev/peps/pep-0560
.. _PEP 593:
https://www.python.org/dev/peps/pep-0593
'''
# If this class was subscripted by one non-tuple argument, raise an
# exception.
if not isinstance(args, tuple):
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by one non-tuple argument:\n'
f'{represent_object(args)}'
)
# Else, this class was subscripted by either no *OR* two or more
# arguments (contained in this tuple).
#
# If this class was *NOT* subscripted by two arguments...
elif len(args) != 2:
# If this class was subscripted by one or more arguments, then by
# deduction this class was subscripted by three or more arguments.
# In this case, raise a human-readable exception.
if args:
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by three or more arguments:\n'
f'{represent_object(args)}'
)
# Else, this class was subscripted by *NO* arguments. In this case,
# raise a human-readable exception.
else:
raise BeartypeValeSubscriptionException(
f'{repr(cls)} subscripted by empty tuple.')
# Else, this class was subscripted by exactly two arguments.
# Localize these arguments to human-readable local variables.
attr_name, attr_validator = args
# Representer (i.e., callable accepting *NO* arguments returning a
# machine-readable representation of this validator), defined *AFTER*
# localizing these validator arguments.
get_repr = lambda: (
f'{cls.__name__}[{repr(attr_name)}, {repr(attr_validator)}]')
# If this name is *NOT* a string, raise an exception.
if not isinstance(attr_name, str):
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} not string.'
)
# Else, this name is a string.
#
# If this name is the empty string, raise an exception.
elif not attr_name:
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} empty.'
)
# Else, this name is a non-empty string.
#
# Note that this name has *NOT* yet been validated to be valid Python
# identifier. While we could do so here by calling our existing
# is_identifier() tester, doing so would inefficiently repeat
# the split on "." characters performed below. Instead, we iteratively
# validate each split substring to be a valid Python identifier below.
# Callable inefficiently validating object attributes with this name
# against this validator.
# is_valid: SubscriptedIsValidator = None # type: ignore[assignment]
# Code snippet efficiently validating object attributes with this name
# against this validator.
is_valid_code = ''
# Dictionary mapping from the name to value of each local attribute
# referenced in the "is_valid_code" snippet defined below.
is_valid_code_locals: CallableScope = {}
# If this attribute name is unqualified (i.e., contains no "."
# delimiters), prefer an efficient optimization avoiding iteration.
if '.' not in attr_name:
# If this name is *NOT* a valid Python identifier, raise an
# exception.
if not attr_name.isidentifier():
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} syntactically invalid '
f'(i.e., not valid Python identifier).'
)
# Else, this name is a valid Python identifier.
def is_valid(pith: Any) -> bool:
f'''
``True`` only if the passed object defines an attribute named
{repr(attr_name)} whose value satisfies the validator
{repr(attr_validator)}.
'''
# Attribute of this object with this name if this object
# defines such an attribute *OR* a sentinel placeholder
# otherwise (i.e., if this object defines *NO* such attribute).
pith_attr = getattr(pith, attr_name, SENTINEL)
# Return true only if...
return (
# This object defines an attribute with this name *AND*...
pith_attr is not SENTINEL and
# This attribute satisfies this validator.
attr_validator.is_valid(pith_attr)
)
# Names of new parameters added to the signature of wrapper
# functions enabling this validator to be tested in those functions
# *WITHOUT* additional stack frames whose values are:
# * The sentinel placeholder.
local_name_sentinel = add_func_scope_attr(
attr=SENTINEL, attr_scope=is_valid_code_locals)
# Generate locals safely merging the locals required by both the
# code generated below *AND* the code validating this attribute.
update_mapping(
is_valid_code_locals, attr_validator._is_valid_code_locals)
#FIXME: Unfortunately, this still isn't sufficiently unique,
#because "IsAttr['name', IsAttr['name', IsEqual[True]]]" is a
#trivial counter-example where the current approach breaks down.
#For true uniquification here, we're going to need to instead:
#* Define a global private counter:
# _local_name_obj_attr_value_counter = Counter(0)
#* Replace the assignment below with:
# local_name_obj_attr_value = (
# f'{{obj}}_isattr_'
# f'{next(_local_name_obj_attr_value_counter)}'
# )
# Name of a local variable in this code whose:
# * Name is sufficiently obfuscated as to be hopefully unique to
# the code generated by this validator.
# * Value is the value of this attribute of the arbitrary object
# being validated by this code.
local_name_obj_attr_value = f'{{obj}}_isattr_{attr_name}'
# Code validating this attribute's value, formatted so as to be
# safely embeddable in the larger code expression defined below.
obj_attr_value_is_valid_expr = (
attr_validator._is_valid_code.format(
# Replace the placeholder substring "{obj}" in this code
# with the local variable whose value is the value of the
# desired object attribute.
obj=local_name_obj_attr_value,
# Replace the placeholder substring "{index}" in this code
# with an indentation increased by one level.
indent=f'{{indent}}{CODE_INDENT_1}',
))
# Code snippet efficiently validating against this object.
is_valid_code = VALE_CODE_CHECK_ISATTR_format(
attr_name_expr=repr(attr_name),
local_name_obj_attr_value=local_name_obj_attr_value,
obj_attr_value_is_valid_expr=obj_attr_value_is_valid_expr,
local_name_sentinel=local_name_sentinel,
)
# Else, this attribute name is qualified (i.e., contains one or more
# "." delimiters), fallback to a general solution performing iteration.
else:
#FIXME: Implement us up when we find the time, please. We currently
#raise an exception simply because we ran out of time for this. :{
raise BeartypeValeSubscriptionException(
f'{get_repr()} subscripted first argument '
f'{repr(attr_name)} not unqualified Python identifier '
f'(i.e., contains one or more "." characters).'
)
# Create and return this subscription.
return _SubscriptedIs(
is_valid=is_valid,
is_valid_code=is_valid_code,
is_valid_code_locals=is_valid_code_locals,
get_repr=get_repr,
)
# ....................{ CONSTANTS }....................
#FIXME: Shift into a new "_valesnip" submodule, please.
VALE_CODE_CHECK_ISATTR = '''(
{{indent}} # True only if this pith defines an attribute with this name.
{{indent}} ({local_name_obj_attr_value} := getattr(
{{indent}} {{obj}}, {attr_name_expr}, {local_name_sentinel}))
{{indent}} is not {local_name_sentinel} and
{{indent}} {obj_attr_value_is_valid_expr}
{{indent}})'''
'''
:mod:`beartype.vale.IsAttr`-specific code snippet validating an arbitrary
object to define an attribute with an arbitrary name satisfying an arbitrary
expression evaluating to a boolean.
'''
# Format methods of the code snippets declared above as a microoptimization.
VALE_CODE_CHECK_ISATTR_format = VALE_CODE_CHECK_ISATTR.format
|
{"hexsha": "85473ea15452b0a452755acc718d80df167e19bc", "size": 17318, "ext": "py", "lang": "Python", "max_stars_repo_path": "beartype/vale/_valeisobj.py", "max_stars_repo_name": "jonathanmorley/beartype", "max_stars_repo_head_hexsha": "0d1207210220807d5c5848033d13657afa307983", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beartype/vale/_valeisobj.py", "max_issues_repo_name": "jonathanmorley/beartype", "max_issues_repo_head_hexsha": "0d1207210220807d5c5848033d13657afa307983", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beartype/vale/_valeisobj.py", "max_forks_repo_name": "jonathanmorley/beartype", "max_forks_repo_head_hexsha": "0d1207210220807d5c5848033d13657afa307983", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5736842105, "max_line_length": 82, "alphanum_fraction": 0.6191823536, "include": true, "reason": "import numpy", "num_tokens": 3854}
|
#include <cctbx/boost_python/flex_fwd.h>
#include <cctbx/geometry_restraints/shared_wrapper_pickle.hpp>
#include <boost/python/class.hpp>
#include <boost/python/def.hpp>
#include <boost/python/return_value_policy.hpp>
#include <boost/python/copy_const_reference.hpp>
#include <boost/python/return_arg.hpp>
#include <boost/python/return_internal_reference.hpp>
#include <scitbx/array_family/boost_python/shared_wrapper.h>
#include <scitbx/array_family/selections.h>
#include <scitbx/stl/map_wrapper.h>
#include <scitbx/stl/vector_wrapper.h>
#include <cctbx/crystal/pair_tables.h>
#include <cctbx/crystal/workarounds_bpl.h>
namespace cctbx { namespace crystal {
namespace {
struct pair_sym_table_wrappers
{
static void
wrap()
{
using namespace boost::python;
typedef return_internal_reference<> rir;
scitbx::stl::boost_python::map_wrapper<pair_sym_dict, rir>::wrap(
"pair_sym_dict");
typedef scitbx::af::boost_python::shared_wrapper<pair_sym_dict, rir> shared_w_t;
shared_w_t::wrap("pair_sym_table")
.def("proxy_select",
(pair_sym_table(*)(
af::const_ref<pair_sym_dict> const&,
af::const_ref<std::size_t> const&))
scitbx::af::array_of_map_proxy_select)
.def_pickle(shared_wrapper_pickle_suite< shared_w_t::w_t >())
;
}
};
struct pair_asu_table_table_wrappers
{
static void
wrap()
{
using namespace boost::python;
typedef return_internal_reference<> rir;
scitbx::stl::boost_python::map_wrapper<pair_asu_dict, rir>::wrap(
"pair_asu_dict");
scitbx::af::boost_python::shared_wrapper<pair_asu_dict, rir>::wrap(
"pair_asu_table_table");
}
};
struct pair_asu_table_wrappers : boost::python::pickle_suite
{
typedef pair_asu_table<> w_t;
static boost::python::tuple
getinitargs(w_t const& self)
{
return boost::python::make_tuple(self.asu_mappings());
}
static boost::python::tuple
getstate(w_t const& self)
{
return boost::python::make_tuple(
boost::python::list(self.table_)
);
}
static void
setstate(w_t& self, boost::python::tuple state)
{
// table_ is of type pair_asu_table_table. It doesn't unpickle but its
// individual elements do. So create a new pair_asu_table_table and
// unpickle the individual elements one by one
self.table_ = pair_asu_table_table();
for (std::size_t index = 0; index < boost::python::len(state[0]); ++index)
{
self.table_.push_back(boost::python::extract<pair_asu_dict>(state[0][index]));
}
}
static void
wrap()
{
using namespace boost::python;
typedef return_value_policy<copy_const_reference> ccr;
typedef return_internal_reference<> rir;
class_<w_t, boost::shared_ptr<w_t> >("pair_asu_table", no_init)
.def(init<
boost::shared_ptr<direct_space_asu::asu_mappings<> > >(
(arg("asu_mappings"))))
.def("asu_mappings", &w_t::asu_mappings)
.def("table", &w_t::table, ccr())
.def("__contains__",
(bool(w_t::*)(direct_space_asu::asu_mapping_index_pair const&) const)
&w_t::contains, (
arg("pair")))
.def("contains",
(bool(w_t::*)(unsigned, unsigned, unsigned) const)
&w_t::contains, (
arg("i_seq"), arg("j_seq"), arg("j_sym")))
.def("__eq__", &w_t::operator==)
.def("__ne__", &w_t::operator!=)
.def("pair_counts", &w_t::pair_counts)
.def("cluster_pivot_selection", &w_t::cluster_pivot_selection, (
arg("general_positions_only")=false,
arg("max_clusters")=0,
arg("estimated_reduction_factor")=4))
.def("add_covalent_pairs", &w_t::add_covalent_pairs, (
arg("scattering_types"),
arg("exclude_scattering_types")=boost::python::object(),
arg("conformer_indices")=boost::python::object(),
arg("sym_excl_indices")=boost::python::object(),
arg("distance_cutoff")=3.5,
arg("min_cubicle_edge")=5,
arg("tolerance")=0.5,
arg("epsilon")=1e-6,
arg("radii")=std::map<std::string,double>()), return_self<>())
.def("add_all_pairs", &w_t::add_all_pairs, (
arg("distance_cutoff"),
arg("min_cubicle_edge")=5,
arg("epsilon")=1e-6), return_self<>())
.def("add_pair_sym_table", &w_t::add_pair_sym_table, (
arg("sym_table")), return_self<>())
.def("add_pair", (pair_asu_table<>&(w_t::*)(
direct_space_asu::asu_mapping_index_pair const&)) &w_t::add_pair,
(arg("pair")), return_self<>())
.def("add_pair", (pair_asu_table<>&(w_t::*)(
unsigned, unsigned, sgtbx::rt_mx const&)) &w_t::add_pair,
(arg("i_seq"), arg("j_seq"), arg("rt_mx_ji")), return_self<>())
.def("add_pair", (pair_asu_table<>&(w_t::*)(
af::tiny<unsigned, 2> const&)) &w_t::add_pair,
(arg("i_seqs")), return_self<>())
.def("extract_pair_sym_table", &w_t::extract_pair_sym_table, (
arg("skip_j_seq_less_than_i_seq")=true,
arg("all_interactions_from_inside_asu")=false))
.def("angle_pair_asu_table", &w_t::angle_pair_asu_table)
.def_pickle(pair_asu_table_wrappers())
;
}
};
struct adp_iso_local_sphere_restraints_energies_wrappers : boost::python::pickle_suite
{
typedef adp_iso_local_sphere_restraints_energies w_t;
static boost::python::tuple
getstate(w_t const& self)
{
return boost::python::make_tuple(
self.number_of_restraints,
self.residual_sum,
self.gradients,
self.u_i,
self.u_j,
self.r_ij
);
}
static void
setstate(w_t& self, boost::python::tuple state)
{
self.number_of_restraints = boost::python::extract< unsigned >(state[0]);
self.residual_sum = boost::python::extract< double >(state[1]);
self.gradients = boost::python::extract< af::shared<double> >(state[2]);
self.u_i = boost::python::extract< af::shared<double> >(state[3]);
self.u_j = boost::python::extract< af::shared<double> >(state[3]);
self.r_ij = boost::python::extract< af::shared<double> >(state[3]);
}
static void
wrap()
{
using namespace boost::python;
typedef boost::python::arg arg_;
typedef return_value_policy<return_by_value> rbv;
class_<w_t>("adp_iso_local_sphere_restraints_energies", no_init)
.def(init<
af::const_ref<pair_sym_dict> const&,
scitbx::mat3<double> const&,
af::const_ref<scitbx::vec3<double> > const&,
af::const_ref<double> const&,
af::const_ref<bool> const&,
af::const_ref<bool> const&,
af::const_ref<bool> const&,
double,
double,
double,
double,
bool,
bool>((
arg("pair_sym_table"),
arg("orthogonalization_matrix"),
arg("sites_frac"),
arg("u_isos"),
arg("selection"),
arg("use_u_iso"),
arg("grad_u_iso"),
arg("sphere_radius"),
arg("distance_power"),
arg("average_power"),
arg("min_u_sum"),
arg("compute_gradients"),
arg("collect"))))
.def_readonly("number_of_restraints", &w_t::number_of_restraints)
.def_readonly("residual_sum", &w_t::residual_sum)
.add_property("gradients", make_getter(&w_t::gradients, rbv()))
.add_property("u_i", make_getter(&w_t::u_i, rbv()))
.add_property("u_j", make_getter(&w_t::u_j, rbv()))
.add_property("r_ij", make_getter(&w_t::r_ij, rbv()))
.def_pickle(adp_iso_local_sphere_restraints_energies_wrappers())
;
}
};
void
wrap_all()
{
using namespace boost::python;
def("get_distances",
(af::shared<double>(*)(
af::const_ref<crystal::pair_sym_dict> const&,
scitbx::mat3<double> const&,
af::const_ref<scitbx::vec3<double> > const&)) get_distances, (
arg("pair_sym_table"),
arg("orthogonalization_matrix"),
arg("sites_frac")));
def("get_distances",
(af::shared<double>(*)(
af::const_ref<crystal::pair_sym_dict> const&,
af::const_ref<scitbx::vec3<double> > const&)) get_distances, (
arg("pair_sym_table"),
arg("sites_cart")));
pair_sym_table_wrappers::wrap();
pair_asu_table_table_wrappers::wrap();
pair_asu_table_wrappers::wrap();
adp_iso_local_sphere_restraints_energies_wrappers::wrap();
}
} // namespace <anonymous>
namespace boost_python {
void
wrap_pair_tables() { wrap_all(); }
}}} // namespace cctbx::crystal::boost_python
|
{"hexsha": "dbc3e58da4c48053618ee4aa63e7698aa1eacf25", "size": 8861, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cctbx/crystal/pair_tables_bpl.cpp", "max_stars_repo_name": "rimmartin/cctbx_project", "max_stars_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 155.0, "max_stars_repo_stars_event_min_datetime": "2016-11-23T12:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:35:44.000Z", "max_issues_repo_path": "cctbx/crystal/pair_tables_bpl.cpp", "max_issues_repo_name": "rimmartin/cctbx_project", "max_issues_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 590.0, "max_issues_repo_issues_event_min_datetime": "2016-12-10T11:31:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T23:10:09.000Z", "max_forks_repo_path": "cctbx/crystal/pair_tables_bpl.cpp", "max_forks_repo_name": "rimmartin/cctbx_project", "max_forks_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 115.0, "max_forks_repo_forks_event_min_datetime": "2016-11-15T08:17:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T15:30:14.000Z", "avg_line_length": 34.3449612403, "max_line_length": 88, "alphanum_fraction": 0.6186660648, "num_tokens": 2331}
|
"""
Exit signals based on a dollar value.
"""
import numpy as np
class DollarExit():
"""
Calculate dollar value based exit signals.
"""
@classmethod
def exit_dollar(
cls, prices, trigger_value, exit_level):
"""
Calculate exit based on a dollar amount.
Parameters
----------
prices : DataFrame
The OHLC data.
trigger_value : Series
The series to trigger exit.
trade_number : Series
Array of trade numbers.
end_of_day_position : Series
The close of day, long/short/flat position.
trade_high_price : Series
The high price of the trade.
trade_low_price : Series
The low price of the trade.
exit_level : Str
The type of exit strategy.
Returns
-------
prices : DataFrame
The OHLC data
exit : Series
Exit signals.
"""
# Calculate exit signal based on a profit target
if exit_level == 'profit_target':
prices, exit_ = cls._exit_profit_target(
prices=prices,
trigger_value=trigger_value)
# Calculate exit signal based on a loss from entry price
elif exit_level == 'initial':
prices, exit_ = cls._exit_initial_dollar_loss(
prices=prices,
trigger_value=trigger_value)
# Calculate exit signal based on a breakeven level
elif exit_level == 'breakeven':
prices, exit_ = cls._exit_breakeven(
prices=prices,
trigger_value=trigger_value)
# Calculate exit signal based on a trailing stop referencing the close
elif exit_level == 'trail_close':
prices, exit_ = cls._exit_trailing(
prices=prices,
trigger_value=trigger_value)
# Calculate exit signal based on a trailing stop referencing the
# high/low
elif exit_level == 'trail_high_low':
prices, exit_ = cls._exit_trailing(
prices=prices,
trigger_value=trigger_value)
return prices, exit_
@staticmethod
def _exit_profit_target(prices, trigger_value):
"""
Calculate exit based on a profit target.
Parameters
----------
prices : DataFrame
The OHLC data.
trigger_value : Series
The series to trigger exit.
trade_number : Series
Array of trade numbers.
end_of_day_position : Series
The close of day, long/short/flat position.
Returns
-------
prices : DataFrame
The OHLC data
profit_target_exit : Series
The exit signals.
"""
trade_number = prices['raw_trade_number']
end_of_day_position = prices['raw_end_of_day_position']
# Create an empty array to store the signals
profit_target_exit = np.array([0]*len(prices))
# For each row in the data
for row in range(1, len(prices)):
# If there is a trade on
if trade_number[row] != 0:
# If there is a long position
if end_of_day_position[row] > 0:
# If the close is greater than the trigger value
if prices['Close'][row] > trigger_value[row]:
# Set the exit signal to -1
profit_target_exit[row] = -1
# If there is a short position
elif end_of_day_position[row] < 0:
# If the close is less than the trigger value
if prices['Close'][row] < trigger_value[row]:
# Set the exit signal to 1
profit_target_exit[row] = 1
else:
# Set the exit signal to 0
profit_target_exit[row] = 0
return prices, profit_target_exit
@staticmethod
def _exit_initial_dollar_loss(prices, trigger_value):
"""
Calculate exit based on a given loss from the entry point.
Parameters
----------
prices : DataFrame
The OHLC data.
trigger_value : Series
The series to trigger exit.
trade_number : Series
Array of trade numbers.
end_of_day_position : Series
The close of day, long/short/flat position.
Returns
-------
prices : DataFrame
The OHLC data.
initial_dollar_loss_exit : Series
The exit signals.
"""
trade_number = prices['raw_trade_number']
end_of_day_position = prices['raw_end_of_day_position']
# Create an empty array to store the signals
initial_dollar_loss_exit = np.array([0]*len(prices))
# For each row in the data
for row in range(1, len(prices)):
# If there is a trade on
if trade_number[row] != 0:
# If there is a long position
if end_of_day_position[row] > 0:
# If the close is less than the trigger value
if prices['Close'][row] < trigger_value[row]:
# Set the exit signal to -1
initial_dollar_loss_exit[row] = -1
# If there is a short position
elif end_of_day_position[row] < 0:
# If the close is greater than the trigger value
if prices['Close'][row] > trigger_value[row]:
# Set the exit signal to 1
initial_dollar_loss_exit[row] = 1
else:
# Set the exit signal to 0
initial_dollar_loss_exit[row] = 0
return prices, initial_dollar_loss_exit
@staticmethod
def _exit_breakeven(prices, trigger_value):
"""
Calculate exit based on passing a breakeven threshold.
Parameters
----------
prices : DataFrame
The OHLC data.
trigger_value : Series
The series to trigger exit.
trade_number : Series
Array of trade numbers.
end_of_day_position : Series
The close of day, long/short/flat position.
trade_high_price : Series
The high price of the trade.
trade_low_price : Series
The low price of the trade.
Returns
-------
prices : DataFrame
The OHLC data.
breakeven_exit : Series
The exit signals.
"""
trade_number = prices['raw_trade_number']
end_of_day_position = prices['raw_end_of_day_position']
trade_high_price = prices['raw_trade_high_price']
trade_low_price = prices['raw_trade_low_price']
# Create an empty array to store the signals
breakeven_exit = np.array([0.0]*len(prices))
# For each row in the data
for row in range(1, len(prices)):
# If there is a trade on
if trade_number[row] != 0:
# If there is a long position
if end_of_day_position[row] > 0:
# If the high price of the trade is greater than the
# trigger value
if trade_high_price[row] > trigger_value[row]:
# If the close is less than the trigger value
if prices['Close'][row] < trigger_value[row]:
# Set the exit signal to -1
breakeven_exit[row] = -1
# If there is a short position
elif end_of_day_position[row] < 0:
# If the low price of the trade is less than the
# trigger value
if trade_low_price[row] < trigger_value[row]:
# If the close is greater than the trigger value
if prices['Close'][row] > trigger_value[row]:
# Set the exit signal to 1
breakeven_exit[row] = 1
else:
# Set the exit signal to 0
breakeven_exit[row] = 0
return prices, breakeven_exit
@staticmethod
def _exit_trailing(prices, trigger_value):
"""
Calculate exit based on a trailing stop.
Parameters
----------
prices : DataFrame
The OHLC data.
trigger_value : Series
The series to trigger exit.
trade_number : Series
Array of trade numbers.
end_of_day_position : Series
The close of day, long/short/flat position.
Returns
-------
prices : DataFrame
The OHLC data.
trailing_exit : Series
The exit signals.
"""
trade_number = prices['raw_trade_number']
end_of_day_position = prices['raw_end_of_day_position']
# Create an empty array to store the signals
trailing_exit = np.array([0.0]*len(prices))
# For each row in the data
for row in range(1, len(prices)):
# If there is a trade on
if trade_number[row] != 0:
# If there is a long position
if end_of_day_position[row] > 0:
# If the close is less than the trigger value
if prices['Close'][row] < trigger_value[row]:
# Set the exit signal to -1
trailing_exit[row] = -1
# If there is a short position
elif end_of_day_position[row] < 0:
# If the close is greater than the trigger value
if prices['Close'][row] > trigger_value[row]:
# Set the exit signal to 1
trailing_exit[row] = 1
else:
# Set the exit signal to 0
trailing_exit[row] = 0
return prices, trailing_exit
|
{"hexsha": "4ccc9cc4a179d95fc107be63cb1b5b53c67f3c5b", "size": 10216, "ext": "py", "lang": "Python", "max_stars_repo_path": "tradingsystems/dollar_exits.py", "max_stars_repo_name": "GBERESEARCH/tradingsystems", "max_stars_repo_head_hexsha": "5158d41d32b48d35db34a6e132c7fa2f259987c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-10T04:28:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T04:28:37.000Z", "max_issues_repo_path": "tradingsystems/dollar_exits.py", "max_issues_repo_name": "GBERESEARCH/tradingsystems", "max_issues_repo_head_hexsha": "5158d41d32b48d35db34a6e132c7fa2f259987c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tradingsystems/dollar_exits.py", "max_forks_repo_name": "GBERESEARCH/tradingsystems", "max_forks_repo_head_hexsha": "5158d41d32b48d35db34a6e132c7fa2f259987c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-10T04:28:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-10T04:28:38.000Z", "avg_line_length": 30.3145400593, "max_line_length": 78, "alphanum_fraction": 0.5296593579, "include": true, "reason": "import numpy", "num_tokens": 2073}
|
def PlaneWaveEM(Eo, kvec, r):
import numpy as np
(L, M, N, junk) = r.shape
krx = kvec[0] * r[:, :, :, 0]
kry = kvec[1] * r[:, :, :, 1]
krz = kvec[2] * r[:, :, :, 2]
kr = krx + kry + krz
expKr = np.exp(1j * kr)
Einc = np.zeros((L, M, N, 3), dtype=np.complex128)
Einc[:, :, :, 0] = Eo[0] * expKr
Einc[:, :, :, 1] = Eo[1] * expKr
Einc[:, :, :, 2] = Eo[2] * expKr
return Einc
|
{"hexsha": "24749bcdb21b551cd9744486262dd0e1420b026a", "size": 433, "ext": "py", "lang": "Python", "max_stars_repo_path": "vines/fields/plane_wave_em.py", "max_stars_repo_name": "AndrewGibbs/vines", "max_stars_repo_head_hexsha": "4c4f75adc8f601f06e2ab12fbaa95a047ef4354e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-07-05T19:01:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T18:27:21.000Z", "max_issues_repo_path": "vines/fields/plane_wave_em.py", "max_issues_repo_name": "AndrewGibbs/vines", "max_issues_repo_head_hexsha": "4c4f75adc8f601f06e2ab12fbaa95a047ef4354e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vines/fields/plane_wave_em.py", "max_forks_repo_name": "AndrewGibbs/vines", "max_forks_repo_head_hexsha": "4c4f75adc8f601f06e2ab12fbaa95a047ef4354e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-31T13:57:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T08:49:03.000Z", "avg_line_length": 22.7894736842, "max_line_length": 54, "alphanum_fraction": 0.4272517321, "include": true, "reason": "import numpy", "num_tokens": 190}
|
(* (c) Copyright Microsoft Corporation and Inria. All rights reserved. *)
Require Import ssreflect ssrbool ssrfun eqtype ssrnat seq path div choice.
Require Import fintype tuple finfun bigop prime ssralg poly finset.
Require Import fingroup morphism perm automorphism quotient finalg action.
Require Import gproduct zmodp commutator cyclic center pgroup sylow frobenius.
Require Import vector ssrnum ssrint intdiv algC algnum.
Require Import classfun character integral_char.
Set Implicit Arguments.
Unset Strict Implicit.
Unset Printing Implicit Defensive.
Import GroupScope GRing.Theory Num.Theory.
Local Open Scope ring_scope.
(******************************************************************************)
(* This file provides basic notions of virtual character theory: *)
(* 'Z[S, A] == collective predicate for the phi that are Z-linear *)
(* combinations of elements of S : seq 'CF(G) and have *)
(* support in A : {set gT}. *)
(* 'Z[S] == collective predicate for the Z-linear combinations of *)
(* elements of S. *)
(* 'Z[irr G] == the collective predicate for virtual characters. *)
(* dirr G == the collective predicate for normal virtual characters, *)
(* i.e., virtual characters of norm 1: *)
(* mu \in dirr G <=> m \in 'Z[irr G] and '[mu] = 1 *)
(* <=> mu or - mu \in irr G. *)
(* --> othonormal subsets of 'Z[irr G] are contained in dirr G. *)
(* dIirr G == an index type for normal virtual characters. *)
(* dchi i == the normal virtual character of index i. *)
(* of_irr i == the (unique) irreducible constituent of dchi i: *)
(* dchi i = 'chi_(of_irr i) or - 'chi_(of_irr i). *)
(* ndirr i == the index of - dchi i. *)
(* dirr1 G == the normal virtual character index of 1 : 'CF(G), the *)
(* principal character. *)
(* dirr_dIirr j f == the index i (or dirr1 G if it does not exist) such that *)
(* dchi i = f j. *)
(* dirr_constt phi == the normal virtual character constituents of phi: *)
(* i \in dirr_constt phi <=> [dchi i, phi] > 0. *)
(* to_dirr phi i == the normal virtual character constituent of phi with an *)
(* irreducible constituent i, when i \in irr_constt phi. *)
(******************************************************************************)
Section Basics.
Variables (gT : finGroupType) (B : {set gT}) (S : seq 'CF(B)) (A : {set gT}).
Definition Zchar : pred_class :=
[pred phi in 'CF(B, A) | dec_Cint_span (in_tuple S) phi].
Fact Zchar_key : pred_key Zchar. Proof. by []. Qed.
Canonical Zchar_keyed := KeyedPred Zchar_key.
Lemma cfun0_zchar : 0 \in Zchar.
Proof.
rewrite inE mem0v; apply/sumboolP; exists 0.
by rewrite big1 // => i _; rewrite ffunE.
Qed.
Fact Zchar_zmod : zmod_closed Zchar.
Proof.
split; first exact: cfun0_zchar.
move=> phi xi /andP[Aphi /sumboolP[a Da]] /andP[Axi /sumboolP[b Db]].
rewrite inE rpredB // Da Db -sumrB; apply/sumboolP; exists (a - b).
by apply: eq_bigr => i _; rewrite -mulrzBr !ffunE.
Qed.
Canonical Zchar_opprPred := OpprPred Zchar_zmod.
Canonical Zchar_addrPred := AddrPred Zchar_zmod.
Canonical Zchar_zmodPred := ZmodPred Zchar_zmod.
Lemma scale_zchar a phi : a \in Cint -> phi \in Zchar -> a *: phi \in Zchar.
Proof. by case/CintP=> m -> Zphi; rewrite scaler_int rpredMz. Qed.
End Basics.
Notation "''Z[' S , A ]" := (Zchar S A)
(at level 8, format "''Z[' S , A ]") : group_scope.
Notation "''Z[' S ]" := 'Z[S, setT]
(at level 8, format "''Z[' S ]") : group_scope.
Section Zchar.
Variables (gT : finGroupType) (G : {group gT}).
Implicit Types (A B : {set gT}) (S : seq 'CF(G)).
Lemma zchar_split S A phi :
phi \in 'Z[S, A] = (phi \in 'Z[S]) && (phi \in 'CF(G, A)).
Proof. by rewrite !inE cfun_onT andbC. Qed.
Lemma zcharD1E phi S : (phi \in 'Z[S, G^#]) = (phi \in 'Z[S]) && (phi 1%g == 0).
Proof. by rewrite zchar_split cfunD1E. Qed.
Lemma zcharD1 phi S A :
(phi \in 'Z[S, A^#]) = (phi \in 'Z[S, A]) && (phi 1%g == 0).
Proof. by rewrite zchar_split cfun_onD1 andbA -zchar_split. Qed.
Lemma zcharW S A : {subset 'Z[S, A] <= 'Z[S]}.
Proof. by move=> phi; rewrite zchar_split => /andP[]. Qed.
Lemma zchar_on S A : {subset 'Z[S, A] <= 'CF(G, A)}.
Proof. by move=> phi /andP[]. Qed.
Lemma zchar_onS A B S : A \subset B -> {subset 'Z[S, A] <= 'Z[S, B]}.
Proof.
move=> sAB phi; rewrite zchar_split (zchar_split _ B) => /andP[->].
exact: cfun_onS.
Qed.
Lemma zchar_onG S : 'Z[S, G] =i 'Z[S].
Proof. by move=> phi; rewrite zchar_split cfun_onG andbT. Qed.
Lemma irr_vchar_on A : {subset 'Z[irr G, A] <= 'CF(G, A)}.
Proof. exact: zchar_on. Qed.
Lemma support_zchar S A phi : phi \in 'Z[S, A] -> support phi \subset A.
Proof. by move/zchar_on; rewrite cfun_onE. Qed.
Lemma mem_zchar_on S A phi :
phi \in 'CF(G, A) -> phi \in S -> phi \in 'Z[S, A].
Proof.
move=> Aphi /(@tnthP _ _ (in_tuple S))[i Dphi]; rewrite inE /= {}Aphi {phi}Dphi.
apply/sumboolP; exists [ffun j => (j == i)%:Z].
rewrite (bigD1 i) //= ffunE eqxx (tnth_nth 0) big1 ?addr0 // => j i'j.
by rewrite ffunE (negPf i'j).
Qed.
(* A special lemma is needed because trivial fails to use the cfun_onT Hint. *)
Lemma mem_zchar S phi : phi \in S -> phi \in 'Z[S].
Proof. by move=> Sphi; rewrite mem_zchar_on ?cfun_onT. Qed.
Lemma zchar_nth_expansion S A phi :
phi \in 'Z[S, A] ->
{z | forall i, z i \in Cint & phi = \sum_(i < size S) z i *: S`_i}.
Proof.
case/andP=> _ /sumboolP/sig_eqW[/= z ->].
exists (intr \o z) => [i|]; first exact: Cint_int.
by apply: eq_bigr => i _; rewrite scaler_int.
Qed.
Lemma zchar_tuple_expansion n (S : n.-tuple 'CF(G)) A phi :
phi \in 'Z[S, A] ->
{z | forall i, z i \in Cint & phi = \sum_(i < n) z i *: S`_i}.
Proof. by move/zchar_nth_expansion; rewrite size_tuple. Qed.
(* A pure seq version with the extra hypothesis of S's unicity. *)
Lemma zchar_expansion S A phi : uniq S ->
phi \in 'Z[S, A] ->
{z | forall xi, z xi \in Cint & phi = \sum_(xi <- S) z xi *: xi}.
Proof.
move=> Suniq /zchar_nth_expansion[z Zz ->] /=.
pose zS xi := oapp z 0 (insub (index xi S)).
exists zS => [xi | ]; rewrite {}/zS; first by case: (insub _).
rewrite (big_nth 0) big_mkord; apply: eq_bigr => i _; congr (_ *: _).
by rewrite index_uniq // valK.
Qed.
Lemma zchar_span S A : {subset 'Z[S, A] <= <<S>>%VS}.
Proof.
move=> _ /zchar_nth_expansion[z Zz ->] /=.
by apply: rpred_sum => i _; rewrite rpredZ // memv_span ?mem_nth.
Qed.
Lemma zchar_trans S1 S2 A B :
{subset S1 <= 'Z[S2, B]} -> {subset 'Z[S1, A] <= 'Z[S2, A]}.
Proof.
move=> sS12 phi; rewrite !(zchar_split _ A) andbC => /andP[->]; rewrite andbT.
case/zchar_nth_expansion=> z Zz ->; apply: rpred_sum => i _.
by rewrite scale_zchar // (@zcharW _ B) ?sS12 ?mem_nth.
Qed.
Lemma zchar_trans_on S1 S2 A :
{subset S1 <= 'Z[S2, A]} -> {subset 'Z[S1] <= 'Z[S2, A]}.
Proof.
move=> sS12 _ /zchar_nth_expansion[z Zz ->]; apply: rpred_sum => i _.
by rewrite scale_zchar // sS12 ?mem_nth.
Qed.
Lemma zchar_sub_irr S A :
{subset S <= 'Z[irr G]} -> {subset 'Z[S, A] <= 'Z[irr G, A]}.
Proof. exact: zchar_trans. Qed.
Lemma zchar_subset S1 S2 A :
{subset S1 <= S2} -> {subset 'Z[S1, A] <= 'Z[S2, A]}.
Proof.
move=> sS12; apply: zchar_trans setT _ => // f /sS12 S2f.
by rewrite mem_zchar.
Qed.
Lemma zchar_subseq S1 S2 A :
subseq S1 S2 -> {subset 'Z[S1, A] <= 'Z[S2, A]}.
Proof. by move/mem_subseq; apply: zchar_subset. Qed.
Lemma zchar_filter S A (p : pred 'CF(G)) :
{subset 'Z[filter p S, A] <= 'Z[S, A]}.
Proof. by apply: zchar_subset=> f; apply/mem_subseq/filter_subseq. Qed.
End Zchar.
Section VChar.
Variables (gT : finGroupType) (G : {group gT}).
Implicit Types (A B : {set gT}) (phi chi : 'CF(G)) (S : seq 'CF(G)).
Lemma char_vchar chi : chi \is a character -> chi \in 'Z[irr G].
Proof.
case/char_sum_irr=> r ->; apply: rpred_sum => i _.
by rewrite mem_zchar ?mem_tnth.
Qed.
Lemma irr_vchar i : 'chi[G]_i \in 'Z[irr G].
Proof. exact/char_vchar/irr_char. Qed.
Lemma cfun1_vchar : 1 \in 'Z[irr G]. Proof. by rewrite -irr0 irr_vchar. Qed.
Lemma vcharP phi :
reflect (exists2 chi1, chi1 \is a character
& exists2 chi2, chi2 \is a character & phi = chi1 - chi2)
(phi \in 'Z[irr G]).
Proof.
apply: (iffP idP) => [| [a Na [b Nb ->]]]; last by rewrite rpredB ?char_vchar.
case/zchar_tuple_expansion=> z Zz ->; rewrite (bigID (fun i => 0 <= z i)) /=.
set chi1 := \sum_(i | _) _; set nchi2 := \sum_(i | _) _.
exists chi1; last exists (- nchi2); last by rewrite opprK.
apply: rpred_sum => i zi_ge0; rewrite -tnth_nth rpredZ_Cnat ?irr_char //.
by rewrite CnatEint Zz.
rewrite -sumrN rpred_sum // => i zi_lt0; rewrite -scaleNr -tnth_nth.
rewrite rpredZ_Cnat ?irr_char // CnatEint rpredN Zz oppr_ge0 ltrW //.
by rewrite real_ltrNge ?Creal_Cint.
Qed.
Lemma Aint_vchar phi x : phi \in 'Z[irr G] -> phi x \in Aint.
Proof.
case/vcharP=> [chi1 Nchi1 [chi2 Nchi2 ->]].
by rewrite !cfunE rpredB ?Aint_char.
Qed.
Lemma Cint_vchar1 phi : phi \in 'Z[irr G] -> phi 1%g \in Cint.
Proof.
case/vcharP=> phi1 Nphi1 [phi2 Nphi2 ->].
by rewrite !cfunE rpredB // rpred_Cnat ?Cnat_char1.
Qed.
Lemma Cint_cfdot_vchar_irr i phi : phi \in 'Z[irr G] -> '[phi, 'chi_i] \in Cint.
Proof.
case/vcharP=> chi1 Nchi1 [chi2 Nchi2 ->].
by rewrite cfdotBl rpredB // rpred_Cnat ?Cnat_cfdot_char_irr.
Qed.
Lemma cfdot_vchar_r phi psi :
psi \in 'Z[irr G] -> '[phi, psi] = \sum_i '[phi, 'chi_i] * '[psi, 'chi_i].
Proof.
move=> Zpsi; rewrite cfdot_sum_irr; apply: eq_bigr => i _; congr (_ * _).
by rewrite aut_Cint ?Cint_cfdot_vchar_irr.
Qed.
Lemma Cint_cfdot_vchar : {in 'Z[irr G] &, forall phi psi, '[phi, psi] \in Cint}.
Proof.
move=> phi psi Zphi Zpsi; rewrite /= cfdot_vchar_r // rpred_sum // => k _.
by rewrite rpredM ?Cint_cfdot_vchar_irr.
Qed.
Lemma Cnat_cfnorm_vchar : {in 'Z[irr G], forall phi, '[phi] \in Cnat}.
Proof.
by move=> phi Zphi; rewrite /= CnatEint cfnorm_ge0 Cint_cfdot_vchar.
Qed.
Fact vchar_mulr_closed : mulr_closed 'Z[irr G].
Proof.
split; first exact: cfun1_vchar.
move=> _ _ /vcharP[xi1 Nxi1 [xi2 Nxi2 ->]] /vcharP[xi3 Nxi3 [xi4 Nxi4 ->]].
by rewrite mulrBl !mulrBr !(rpredB, rpredD) // char_vchar ?rpredM.
Qed.
Canonical vchar_mulrPred := MulrPred vchar_mulr_closed.
Canonical vchar_smulrPred := SmulrPred vchar_mulr_closed.
Canonical vchar_semiringPred := SemiringPred vchar_mulr_closed.
Canonical vchar_subringPred := SubringPred vchar_mulr_closed.
Lemma mul_vchar A :
{in 'Z[irr G, A] &, forall phi psi, phi * psi \in 'Z[irr G, A]}.
Proof.
move=> phi psi; rewrite zchar_split => /andP[Zphi Aphi] /zcharW Zpsi.
rewrite zchar_split rpredM //; apply/cfun_onP=> x A'x.
by rewrite cfunE (cfun_onP Aphi) ?mul0r.
Qed.
Section CfdotPairwiseOrthogonal.
Variables (M : {group gT}) (S : seq 'CF(G)) (nu : 'CF(G) -> 'CF(M)).
Hypotheses (Inu : {in 'Z[S] &, isometry nu}) (oSS : pairwise_orthogonal S).
Let freeS := orthogonal_free oSS.
Let uniqS : uniq S := free_uniq freeS.
Let Z_S : {subset S <= 'Z[S]}. Proof. by move=> phi; apply: mem_zchar. Qed.
Let notS0 : 0 \notin S. Proof. by case/andP: oSS. Qed.
Let dotSS := proj2 (pairwise_orthogonalP oSS).
Lemma map_pairwise_orthogonal : pairwise_orthogonal (map nu S).
Proof.
have inj_nu: {in S &, injective nu}.
move=> phi psi Sphi Spsi /= eq_nu; apply: contraNeq (memPn notS0 _ Sphi).
by rewrite -cfnorm_eq0 -Inu ?Z_S // {2}eq_nu Inu ?Z_S // => /dotSS->.
have notSnu0: 0 \notin map nu S.
apply: contra notS0 => /mapP[phi Sphi /esym/eqP].
by rewrite -cfnorm_eq0 Inu ?Z_S // cfnorm_eq0 => /eqP <-.
apply/pairwise_orthogonalP; split; first by rewrite /= notSnu0 map_inj_in_uniq.
move=>_ _ /mapP[phi Sphi ->] /mapP[psi Spsi ->].
by rewrite (inj_in_eq inj_nu) // Inu ?Z_S //; apply: dotSS.
Qed.
Lemma cfproj_sum_orthogonal P z phi :
phi \in S ->
'[\sum_(xi <- S | P xi) z xi *: nu xi, nu phi]
= if P phi then z phi * '[phi] else 0.
Proof.
move=> Sphi; have defS := perm_to_rem Sphi.
rewrite cfdot_suml (eq_big_perm _ defS) big_cons /= cfdotZl Inu ?Z_S //.
rewrite big1_seq ?addr0 // => xi; rewrite mem_rem_uniq ?inE //.
by case/and3P=> _ neq_xi Sxi; rewrite cfdotZl Inu ?Z_S // dotSS ?mulr0.
Qed.
Lemma cfdot_sum_orthogonal z1 z2 :
'[\sum_(xi <- S) z1 xi *: nu xi, \sum_(xi <- S) z2 xi *: nu xi]
= \sum_(xi <- S) z1 xi * (z2 xi)^* * '[xi].
Proof.
rewrite cfdot_sumr; apply: eq_big_seq => phi Sphi.
by rewrite cfdotZr cfproj_sum_orthogonal // mulrCA mulrA.
Qed.
Lemma cfnorm_sum_orthogonal z :
'[\sum_(xi <- S) z xi *: nu xi] = \sum_(xi <- S) `|z xi| ^+ 2 * '[xi].
Proof.
by rewrite cfdot_sum_orthogonal; apply: eq_bigr => xi _; rewrite normCK.
Qed.
Lemma cfnorm_orthogonal : '[\sum_(xi <- S) nu xi] = \sum_(xi <- S) '[xi].
Proof.
rewrite -(eq_bigr _ (fun _ _ => scale1r _)) cfnorm_sum_orthogonal.
by apply: eq_bigr => xi; rewrite normCK conjC1 !mul1r.
Qed.
End CfdotPairwiseOrthogonal.
Lemma orthogonal_span S phi :
pairwise_orthogonal S -> phi \in <<S>>%VS ->
{z | z = fun xi => '[phi, xi] / '[xi] & phi = \sum_(xi <- S) z xi *: xi}.
Proof.
move=> oSS /free_span[|c -> _]; first exact: orthogonal_free.
set z := fun _ => _ : algC; exists z => //; apply: eq_big_seq => u Su.
rewrite /z cfproj_sum_orthogonal // mulfK // cfnorm_eq0.
by rewrite (memPn _ u Su); case/andP: oSS.
Qed.
Section CfDotOrthonormal.
Variables (M : {group gT}) (S : seq 'CF(G)) (nu : 'CF(G) -> 'CF(M)).
Hypotheses (Inu : {in 'Z[S] &, isometry nu}) (onS : orthonormal S).
Let oSS := orthonormal_orthogonal onS.
Let freeS := orthogonal_free oSS.
Let nS1 : {in S, forall phi, '[phi] = 1}.
Proof. by move=> phi Sphi; case/orthonormalP: onS => _ -> //; rewrite eqxx. Qed.
Lemma map_orthonormal : orthonormal (map nu S).
Proof.
rewrite !orthonormalE map_pairwise_orthogonal // andbT.
by apply/allP=> _ /mapP[xi Sxi ->]; rewrite /= Inu ?nS1 // mem_zchar.
Qed.
Lemma cfproj_sum_orthonormal z phi :
phi \in S -> '[\sum_(xi <- S) z xi *: nu xi, nu phi] = z phi.
Proof. by move=> Sphi; rewrite cfproj_sum_orthogonal // nS1 // mulr1. Qed.
Lemma cfdot_sum_orthonormal z1 z2 :
'[\sum_(xi <- S) z1 xi *: xi, \sum_(xi <- S) z2 xi *: xi]
= \sum_(xi <- S) z1 xi * (z2 xi)^*.
Proof.
rewrite cfdot_sum_orthogonal //; apply: eq_big_seq => phi /nS1->.
by rewrite mulr1.
Qed.
Lemma cfnorm_sum_orthonormal z :
'[\sum_(xi <- S) z xi *: nu xi] = \sum_(xi <- S) `|z xi| ^+ 2.
Proof.
rewrite cfnorm_sum_orthogonal //.
by apply: eq_big_seq => xi /nS1->; rewrite mulr1.
Qed.
Lemma cfnorm_map_orthonormal : '[\sum_(xi <- S) nu xi] = (size S)%:R.
Proof.
by rewrite cfnorm_orthogonal // (eq_big_seq _ nS1) big_tnth sumr_const card_ord.
Qed.
Lemma orthonormal_span phi :
phi \in <<S>>%VS ->
{z | z = fun xi => '[phi, xi] & phi = \sum_(xi <- S) z xi *: xi}.
Proof.
case/orthogonal_span=> // _ -> {2}->; set z := fun _ => _ : algC.
by exists z => //; apply: eq_big_seq => xi /nS1->; rewrite divr1.
Qed.
End CfDotOrthonormal.
Lemma cfnorm_orthonormal S :
orthonormal S -> '[\sum_(xi <- S) xi] = (size S)%:R.
Proof. exact: cfnorm_map_orthonormal. Qed.
Lemma vchar_orthonormalP S :
{subset S <= 'Z[irr G]} ->
reflect (exists I : {set Iirr G}, exists b : Iirr G -> bool,
perm_eq S [seq (-1) ^+ b i *: 'chi_i | i in I])
(orthonormal S).
Proof.
move=> vcS; apply: (equivP orthonormalP).
split=> [[uniqS oSS] | [I [b defS]]]; last first.
split=> [|xi1 xi2]; rewrite ?(perm_eq_mem defS).
rewrite (perm_eq_uniq defS) map_inj_uniq ?enum_uniq // => i j /eqP.
by rewrite eq_signed_irr => /andP[_ /eqP].
case/mapP=> [i _ ->] /mapP[j _ ->]; rewrite eq_signed_irr.
rewrite cfdotZl cfdotZr rmorph_sign mulrA cfdot_irr -signr_addb mulr_natr.
by rewrite mulrb andbC; case: eqP => //= ->; rewrite addbb eqxx.
pose I := [set i | ('chi_i \in S) || (- 'chi_i \in S)].
pose b i := - 'chi_i \in S; exists I, b.
apply: uniq_perm_eq => // [|xi].
rewrite map_inj_uniq ?enum_uniq // => i j /eqP.
by rewrite eq_signed_irr => /andP[_ /eqP].
apply/idP/mapP=> [Sxi | [i Ii ->{xi}]]; last first.
move: Ii; rewrite mem_enum inE orbC -/(b i).
by case b_i: (b i); rewrite (scale1r, scaleN1r).
have: '[xi] = 1 by rewrite oSS ?eqxx.
have vc_xi := vcS _ Sxi; rewrite cfdot_sum_irr.
case/Cnat_sum_eq1 => [i _ | i [_ /eqP norm_xi_i xi_i'_0]].
by rewrite -normCK rpredX // Cnat_norm_Cint ?Cint_cfdot_vchar_irr.
suffices def_xi: xi = (-1) ^+ b i *: 'chi_i.
exists i; rewrite // mem_enum inE -/(b i) orbC.
by case: (b i) def_xi Sxi => // ->; rewrite scale1r.
move: Sxi; rewrite [xi]cfun_sum_cfdot (bigD1 i) //.
rewrite big1 //= ?addr0 => [|j ne_ji]; last first.
apply/eqP; rewrite scaler_eq0 -normr_eq0 -[_ == 0](expf_eq0 _ 2) normCK.
by rewrite xi_i'_0 ?eqxx.
have:= norm_xi_i; rewrite (aut_Cint _ (Cint_cfdot_vchar_irr _ _)) //.
rewrite -subr_eq0 subr_sqr_1 mulf_eq0 subr_eq0 addr_eq0 /b scaler_sign.
case/pred2P=> ->; last by rewrite scaleN1r => ->.
rewrite scale1r => Sxi; case: ifP => // SNxi.
have:= oSS _ _ Sxi SNxi; rewrite cfdotNr cfdot_irr eqxx; case: eqP => // _.
by move/eqP; rewrite oppr_eq0 oner_eq0.
Qed.
Lemma vchar_norm1P phi :
phi \in 'Z[irr G] -> '[phi] = 1 ->
exists b : bool, exists i : Iirr G, phi = (-1) ^+ b *: 'chi_i.
Proof.
move=> Zphi phiN1.
have: orthonormal phi by rewrite /orthonormal/= phiN1 eqxx.
case/vchar_orthonormalP=> [xi /predU1P[->|] // | I [b def_phi]].
have: phi \in (phi : seq _) := mem_head _ _.
by rewrite (perm_eq_mem def_phi) => /mapP[i _ ->]; exists (b i), i.
Qed.
Lemma zchar_small_norm phi n :
phi \in 'Z[irr G] -> '[phi] = n%:R -> (n < 4)%N ->
{S : n.-tuple 'CF(G) |
[/\ orthonormal S, {subset S <= 'Z[irr G]} & phi = \sum_(xi <- S) xi]}.
Proof.
move=> Zphi def_n lt_n_4.
pose S := [seq '[phi, 'chi_i] *: 'chi_i | i in irr_constt phi].
have def_phi: phi = \sum_(xi <- S) xi.
rewrite big_map /= big_filter big_mkcond {1}[phi]cfun_sum_cfdot.
by apply: eq_bigr => i _; rewrite if_neg; case: eqP => // ->; rewrite scale0r.
have orthS: orthonormal S.
apply/orthonormalP; split=> [|_ _ /mapP[i phi_i ->] /mapP[j _ ->]].
rewrite map_inj_in_uniq ?enum_uniq // => i j; rewrite mem_enum => phi_i _.
by move/eqP; rewrite eq_scaled_irr (negbTE phi_i) => /andP[_ /= /eqP].
rewrite eq_scaled_irr cfdotZl cfdotZr cfdot_irr mulrA mulr_natr mulrb.
rewrite mem_enum in phi_i; rewrite (negbTE phi_i) andbC; case: eqP => // <-.
have /CnatP[m def_m] := Cnat_norm_Cint (Cint_cfdot_vchar_irr i Zphi).
apply/eqP; rewrite eqxx /= -normCK def_m -natrX eqr_nat eqn_leq lt0n.
rewrite expn_eq0 andbT -eqC_nat -def_m normr_eq0 [~~ _]phi_i andbT.
rewrite (leq_exp2r _ 1) // -ltnS -(@ltn_exp2r _ _ 2) //.
apply: leq_ltn_trans lt_n_4; rewrite -leC_nat -def_n natrX.
rewrite cfdot_sum_irr (bigD1 i) //= -normCK def_m addrC -subr_ge0 addrK.
by rewrite sumr_ge0 // => ? _; apply: mul_conjC_ge0.
have <-: size S = n.
by apply/eqP; rewrite -eqC_nat -def_n def_phi cfnorm_orthonormal.
exists (in_tuple S); split=> // _ /mapP[i _ ->].
by rewrite scale_zchar ?irr_vchar // Cint_cfdot_vchar_irr.
Qed.
Lemma vchar_norm2 phi :
phi \in 'Z[irr G, G^#] -> '[phi] = 2%:R ->
exists i, exists2 j, j != i & phi = 'chi_i - 'chi_j.
Proof.
rewrite zchar_split cfunD1E => /andP[Zphi phi1_0].
case/zchar_small_norm => // [[[|chi [|xi [|?]]] //= S2]].
case=> /andP[/and3P[Nchi Nxi _] /= ochi] /allP/and3P[Zchi Zxi _].
rewrite big_cons big_seq1 => def_phi.
have [b [i def_chi]] := vchar_norm1P Zchi (eqP Nchi).
have [c [j def_xi]] := vchar_norm1P Zxi (eqP Nxi).
have neq_ji: j != i.
apply: contraTneq ochi; rewrite !andbT def_chi def_xi => ->.
rewrite cfdotZl cfdotZr rmorph_sign cfnorm_irr mulr1 -signr_addb.
by rewrite signr_eq0.
have neq_bc: b != c.
apply: contraTneq phi1_0; rewrite def_phi def_chi def_xi => ->.
rewrite -scalerDr !cfunE mulf_eq0 signr_eq0 eqr_le ltr_geF //.
by rewrite ltr_paddl ?ltrW ?irr1_gt0.
rewrite {}def_phi {}def_chi {}def_xi !scaler_sign.
case: b c neq_bc => [|] [|] // _; last by exists i, j.
by exists j, i; rewrite 1?eq_sym // addrC.
Qed.
End VChar.
Section Isometries.
Variables (gT : finGroupType) (L G : {group gT}) (S : seq 'CF(L)).
Implicit Type nu : {additive 'CF(L) -> 'CF(G)}.
Lemma Zisometry_of_cfnorm (tauS : seq 'CF(G)) :
pairwise_orthogonal S -> pairwise_orthogonal tauS ->
map cfnorm tauS = map cfnorm S -> {subset tauS <= 'Z[irr G]} ->
{tau : {linear 'CF(L) -> 'CF(G)} | map tau S = tauS
& {in 'Z[S], isometry tau, to 'Z[irr G]}}.
Proof.
move=> oSS oTT /isometry_of_cfnorm[||tau defT Itau] // Z_T; exists tau => //.
split=> [|_ /zchar_nth_expansion[u Zu ->]].
by apply: sub_in2 Itau; apply: zchar_span.
rewrite big_seq linear_sum rpred_sum // => xi Sxi.
by rewrite linearZ scale_zchar ?Z_T // -defT map_f ?mem_nth.
Qed.
Lemma Zisometry_of_iso f :
free S -> {in S, isometry f, to 'Z[irr G]} ->
{tau : {linear 'CF(L) -> 'CF(G)} | {in S, tau =1 f}
& {in 'Z[S], isometry tau, to 'Z[irr G]}}.
Proof.
move=> freeS [If Zf]; have [tau Dtau Itau] := isometry_of_free freeS If.
exists tau => //; split; first by apply: sub_in2 Itau; apply: zchar_span.
move=> _ /zchar_nth_expansion[a Za ->]; rewrite linear_sum rpred_sum // => i _.
by rewrite linearZ rpredZ_Cint ?Dtau ?Zf ?mem_nth.
Qed.
Lemma Zisometry_inj A nu :
{in 'Z[S, A] &, isometry nu} -> {in 'Z[S, A] &, injective nu}.
Proof. by move/isometry_raddf_inj; apply; apply: rpredB. Qed.
Lemma isometry_in_zchar nu : {in S &, isometry nu} -> {in 'Z[S] &, isometry nu}.
Proof.
move=> Inu _ _ /zchar_nth_expansion[u Zu ->] /zchar_nth_expansion[v Zv ->].
rewrite !raddf_sum; apply: eq_bigr => j _ /=.
rewrite !cfdot_suml; apply: eq_bigr => i _.
by rewrite !raddfZ_Cint //= !cfdotZl !cfdotZr Inu ?mem_nth.
Qed.
End Isometries.
Section AutVchar.
Variables (u : {rmorphism algC -> algC}) (gT : finGroupType) (G : {group gT}).
Local Notation "alpha ^u" := (cfAut u alpha).
Implicit Type (S : seq 'CF(G)) (phi chi : 'CF(G)).
Lemma cfAut_zchar S A psi :
cfAut_closed u S -> psi \in 'Z[S, A] -> psi^u \in 'Z[S, A].
Proof.
rewrite zchar_split => SuS /andP[/zchar_nth_expansion[z Zz Dpsi] Apsi].
rewrite zchar_split cfAut_on {}Apsi {psi}Dpsi rmorph_sum rpred_sum //= => i _.
by rewrite cfAutZ_Cint // scale_zchar // mem_zchar ?SuS ?mem_nth.
Qed.
Lemma cfAut_vchar A psi : psi \in 'Z[irr G, A] -> psi^u \in 'Z[irr G, A].
Proof. by apply: cfAut_zchar; apply: irr_aut_closed. Qed.
Lemma sub_aut_zchar S A psi :
{subset S <= 'Z[irr G]} -> psi \in 'Z[S, A] -> psi^u \in 'Z[S, A] ->
psi - psi^u \in 'Z[S, A^#].
Proof.
move=> Z_S Spsi Spsi_u; rewrite zcharD1 !cfunE subr_eq0 rpredB //=.
by rewrite aut_Cint // Cint_vchar1 // (zchar_trans Z_S) ?(zcharW Spsi).
Qed.
Lemma conjC_vcharAut chi x : chi \in 'Z[irr G] -> (u (chi x))^* = u (chi x)^*.
Proof.
case/vcharP=> chi1 Nchi1 [chi2 Nchi2 ->].
by rewrite !cfunE !rmorphB !conjC_charAut.
Qed.
Lemma cfdot_aut_vchar phi chi :
chi \in 'Z[irr G] -> '[phi^u , chi^u] = u '[phi, chi].
Proof.
case/vcharP=> chi1 Nchi1 [chi2 Nchi2 ->].
by rewrite !raddfB /= !cfdot_aut_char.
Qed.
Lemma vchar_aut A chi : (chi^u \in 'Z[irr G, A]) = (chi \in 'Z[irr G, A]).
Proof.
rewrite !(zchar_split _ A) cfAut_on; congr (_ && _).
apply/idP/idP=> [Zuchi|]; last exact: cfAut_vchar.
rewrite [chi]cfun_sum_cfdot rpred_sum // => i _.
rewrite scale_zchar ?irr_vchar //.
by rewrite -(Cint_aut u) -cfdot_aut_irr -aut_IirrE Cint_cfdot_vchar_irr.
Qed.
End AutVchar.
Definition cfConjC_vchar := cfAut_vchar conjC.
Section MoreVchar.
Variables (gT : finGroupType) (G H : {group gT}).
Lemma cfRes_vchar phi : phi \in 'Z[irr G] -> 'Res[H] phi \in 'Z[irr H].
Proof.
case/vcharP=> xi1 Nx1 [xi2 Nxi2 ->].
by rewrite raddfB rpredB ?char_vchar ?cfRes_char.
Qed.
Lemma cfRes_vchar_on A phi :
H \subset G -> phi \in 'Z[irr G, A] -> 'Res[H] phi \in 'Z[irr H, A].
Proof.
rewrite zchar_split => sHG /andP[Zphi Aphi]; rewrite zchar_split cfRes_vchar //.
apply/cfun_onP=> x /(cfun_onP Aphi); rewrite !cfunElock !genGid sHG => ->.
exact: mul0rn.
Qed.
Lemma cfInd_vchar phi : phi \in 'Z[irr H] -> 'Ind[G] phi \in 'Z[irr G].
Proof.
move=> /vcharP[xi1 Nx1 [xi2 Nxi2 ->]].
by rewrite raddfB rpredB ?char_vchar ?cfInd_char.
Qed.
Lemma sub_conjC_vchar A phi :
phi \in 'Z[irr G, A] -> phi - (phi^*)%CF \in 'Z[irr G, A^#].
Proof.
move=> Zphi; rewrite sub_aut_zchar ?cfAut_zchar // => _ /irrP[i ->].
exact: irr_vchar.
exact: cfConjC_irr.
Qed.
Lemma Frobenius_kernel_exists :
[Frobenius G with complement H] -> {K : {group gT} | [Frobenius G = K ><| H]}.
Proof.
move=> frobG; have [_ ntiHG] := andP frobG.
have [[_ sHG regGH][_ tiHG /eqP defNH]] := (normedTI_memJ_P ntiHG, and3P ntiHG).
suffices /sigW[K defG]: exists K, gval K ><| H == G by exists K; apply/andP.
pose K1 := G :\: cover (H^# :^: G).
have oK1: #|K1| = #|G : H|.
rewrite cardsD (setIidPr _); last first.
rewrite cover_imset; apply/bigcupsP=> x Gx.
by rewrite sub_conjg conjGid ?groupV // (subset_trans (subsetDl _ _)).
rewrite (cover_partition (partition_normedTI ntiHG)) -(Lagrange sHG).
by rewrite (card_support_normedTI ntiHG) (cardsD1 1%g) group1 mulSn addnK.
suffices extG i: {j | {in H, 'chi[G]_j =1 'chi[H]_i} & K1 \subset cfker 'chi_j}.
pose K := [group of \bigcap_i cfker 'chi_(s2val (extG i))].
have nKH: H \subset 'N(K).
by apply/norms_bigcap/bigcapsP=> i _; apply: subset_trans (cfker_norm _).
have tiKH: K :&: H = 1%g.
apply/trivgP; rewrite -(TI_cfker_irr H) /= setIC; apply/bigcapsP=> i _.
apply/subsetP=> x /setIP[Hx /bigcapP/(_ i isT)/=]; rewrite !cfkerEirr !inE.
by case: (extG i) => /= j def_j _; rewrite !def_j.
exists K; rewrite sdprodE // eqEcard TI_cardMg // mul_subG //=; last first.
by rewrite (bigcap_min (0 : Iirr H)) ?cfker_sub.
rewrite -(Lagrange sHG) mulnC leq_pmul2r // -oK1 subset_leq_card //.
by apply/bigcapsP=> i _; case: (extG i).
case i0: (i == 0).
exists 0 => [x Hx|]; last by rewrite irr0 cfker_cfun1 subsetDl.
by rewrite (eqP i0) !irr0 !cfun1E // (subsetP sHG) ?Hx.
have ochi1: '['chi_i, 1] = 0 by rewrite -irr0 cfdot_irr i0.
pose a := 'chi_i 1%g; have Za: a \in Cint by rewrite CintE Cnat_irr1.
pose theta := 'chi_i - a%:A; pose phi := 'Ind[G] theta + a%:A.
have /cfun_onP theta0: theta \in 'CF(H, H^#).
by rewrite cfunD1E !cfunE cfun11 mulr1 subrr.
have RItheta: 'Res ('Ind[G] theta) = theta.
apply/cfun_inP=> x Hx; rewrite cfResE ?cfIndE // (big_setID H) /= addrC.
apply: canLR (mulKf (neq0CG H)) _; rewrite (setIidPr sHG) mulr_natl.
rewrite big1 ?add0r => [|y /setDP[/regGH tiHy H'y]]; last first.
have [-> | ntx] := eqVneq x 1%g; first by rewrite conj1g theta0 ?inE ?eqxx.
by rewrite theta0 ?tiHy // !inE ntx.
by rewrite -sumr_const; apply: eq_bigr => y Hy; rewrite cfunJ.
have ophi1: '[phi, 1] = 0.
rewrite cfdotDl -cfdot_Res_r cfRes_cfun1 // cfdotBl !cfdotZl !cfnorm1.
by rewrite ochi1 add0r addNr.
have{ochi1} n1phi: '[phi] = 1.
have: '[phi - a%:A] = '[theta] by rewrite addrK -cfdot_Res_l RItheta.
rewrite !cfnormBd ?cfnormZ ?cfdotZr ?ophi1 ?ochi1 ?mulr0 //.
by rewrite !cfnorm1 cfnorm_irr => /addIr.
have Zphi: phi \in 'Z[irr G].
by rewrite rpredD ?cfInd_vchar ?rpredB ?irr_vchar // scale_zchar ?rpred1.
have def_phi: {in H, phi =1 'chi_i}.
move=> x Hx /=; rewrite !cfunE -[_ x](cfResE _ sHG) ?RItheta //.
by rewrite !cfunE !cfun1E ?(subsetP sHG) ?Hx ?subrK.
have [j def_chi_j]: {j | 'chi_j = phi}.
apply/sig_eqW; have [[] [j]] := vchar_norm1P Zphi n1phi; last first.
by rewrite scale1r; exists j.
move/cfunP/(_ 1%g)/eqP; rewrite scaleN1r def_phi // cfunE -addr_eq0 eqr_le.
by rewrite ltr_geF // ltr_paddl ?ltrW ?irr1_gt0.
exists j; rewrite ?cfkerEirr def_chi_j //; apply/subsetP => x /setDP[Gx notHx].
rewrite inE cfunE def_phi // cfunE -/a cfun1E // Gx mulr1 cfIndE //.
rewrite big1 ?mulr0 ?add0r // => y Gy; apply/theta0/(contra _ notHx) => Hxy.
by rewrite -(conjgK y x) cover_imset -class_supportEr mem_imset2 ?groupV.
Qed.
End MoreVchar.
Definition dirr (gT : finGroupType) (B : {set gT}) : pred_class :=
[pred f : 'CF(B) | (f \in irr B) || (- f \in irr B)].
Implicit Arguments dirr [[gT]].
Section Norm1vchar.
Variables (gT : finGroupType) (G : {group gT}).
Fact dirr_key : pred_key (dirr G). Proof. by []. Qed.
Canonical dirr_keyed := KeyedPred dirr_key.
Fact dirr_oppr_closed : oppr_closed (dirr G).
Proof. by move=> xi; rewrite !inE opprK orbC. Qed.
Canonical dirr_opprPred := OpprPred dirr_oppr_closed.
Lemma dirr_opp v : (- v \in dirr G) = (v \in dirr G). Proof. exact: rpredN. Qed.
Lemma dirr_sign n v : ((-1)^+ n *: v \in dirr G) = (v \in dirr G).
Proof. exact: rpredZsign. Qed.
Lemma irr_dirr i : 'chi_i \in dirr G.
Proof. by rewrite !inE mem_irr. Qed.
Lemma dirrP f :
reflect (exists b : bool, exists i, f = (-1) ^+ b *: 'chi_i) (f \in dirr G).
Proof.
apply: (iffP idP) => [| [b [i ->]]]; last by rewrite dirr_sign irr_dirr.
case/orP=> /irrP[i Hf]; first by exists false, i; rewrite scale1r.
by exists true, i; rewrite scaleN1r -Hf opprK.
Qed.
(* This should perhaps be the definition of dirr. *)
Lemma dirrE phi : phi \in dirr G = (phi \in 'Z[irr G]) && ('[phi] == 1).
Proof.
apply/dirrP/andP=> [[b [i ->]] | [Zphi /eqP/vchar_norm1P]]; last exact.
by rewrite rpredZsign irr_vchar cfnorm_sign cfnorm_irr.
Qed.
Lemma cfdot_dirr f g : f \in dirr G -> g \in dirr G ->
'[f, g] = (if f == - g then -1 else (f == g)%:R).
Proof.
case/dirrP=> [b1 [i1 ->]] /dirrP[b2 [i2 ->]].
rewrite cfdotZl cfdotZr rmorph_sign mulrA -signr_addb cfdot_irr.
rewrite -scaleNr -signrN !eq_scaled_irr signr_eq0 !(inj_eq (@signr_inj _)) /=.
by rewrite -!negb_add addbN mulr_sign -mulNrn mulrb; case: ifP.
Qed.
Lemma dirr_norm1 phi : phi \in 'Z[irr G] -> '[phi] = 1 -> phi \in dirr G.
Proof. by rewrite dirrE => -> -> /=. Qed.
Lemma dirr_aut u phi : (cfAut u phi \in dirr G) = (phi \in dirr G).
Proof.
rewrite !dirrE vchar_aut; apply: andb_id2l => /cfdot_aut_vchar->.
exact: fmorph_eq1.
Qed.
Definition dIirr (B : {set gT}) := (bool * (Iirr B))%type.
Definition dirr1 (B : {set gT}) : dIirr B := (false, 0).
Definition ndirr (B : {set gT}) (i : dIirr B) : dIirr B :=
(~~ i.1, i.2).
Lemma ndirr_diff (i : dIirr G) : ndirr i != i.
Proof. by case: i => [] [|] i. Qed.
Lemma ndirrK : involutive (@ndirr G).
Proof. by move=> [b i]; rewrite /ndirr /= negbK. Qed.
Lemma ndirr_inj : injective (@ndirr G).
Proof. exact: (inv_inj ndirrK). Qed.
Definition dchi (B : {set gT}) (i : dIirr B) : 'CF(B) :=
(-1)^+ i.1 *: 'chi_i.2.
Lemma dchi1 : dchi (dirr1 G) = 1.
Proof. by rewrite /dchi scale1r irr0. Qed.
Lemma dirr_dchi i : dchi i \in dirr G.
Proof. by apply/dirrP; exists i.1; exists i.2. Qed.
Lemma dIrrP phi : reflect (exists i, phi = dchi i) (phi \in dirr G).
Proof.
by apply: (iffP idP)=> [/dirrP[b]|] [i ->]; [exists (b, i) | apply: dirr_dchi].
Qed.
Lemma dchi_ndirrE (i : dIirr G) : dchi (ndirr i) = - dchi i.
Proof. by case: i => [b i]; rewrite /ndirr /dchi signrN scaleNr. Qed.
Lemma cfdot_dchi (i j : dIirr G) :
'[dchi i, dchi j] = (i == j)%:R - (i == ndirr j)%:R.
Proof.
case: i => bi i; case: j => bj j; rewrite cfdot_dirr ?dirr_dchi // !xpair_eqE.
rewrite -dchi_ndirrE !eq_scaled_irr signr_eq0 !(inj_eq (@signr_inj _)) /=.
by rewrite -!negb_add addbN negbK; case: andP => [[->]|]; rewrite ?subr0 ?add0r.
Qed.
Lemma dchi_vchar i : dchi i \in 'Z[irr G].
Proof. by case: i => b i; rewrite rpredZsign irr_vchar. Qed.
Lemma cfnorm_dchi (i : dIirr G) : '[dchi i] = 1.
Proof. by case: i => b i; rewrite cfnorm_sign cfnorm_irr. Qed.
Lemma dirr_inj : injective (@dchi G).
Proof.
case=> b1 i1 [b2 i2] /eqP; rewrite eq_scaled_irr (inj_eq (@signr_inj _)) /=.
by rewrite signr_eq0 -xpair_eqE => /eqP.
Qed.
Definition dirr_dIirr (B : {set gT}) J (f : J -> 'CF(B)) j : dIirr B :=
odflt (dirr1 B) [pick i | dchi i == f j].
Lemma dirr_dIirrPE J (f : J -> 'CF(G)) (P : pred J) :
(forall j, P j -> f j \in dirr G) ->
forall j, P j -> dchi (dirr_dIirr f j) = f j.
Proof.
rewrite /dirr_dIirr => dirrGf j Pj; case: pickP => [i /eqP //|].
by have /dIrrP[i-> /(_ i)/eqP] := dirrGf j Pj.
Qed.
Lemma dirr_dIirrE J (f : J -> 'CF(G)) :
(forall j, f j \in dirr G) -> forall j, dchi (dirr_dIirr f j) = f j.
Proof. by move=> dirrGf j; apply: (@dirr_dIirrPE _ _ xpredT). Qed.
Definition dirr_constt (B : {set gT}) (phi: 'CF(B)) : {set (dIirr B)} :=
[set i | 0 < '[phi, dchi i]].
Lemma dirr_consttE (phi : 'CF(G)) (i : dIirr G) :
(i \in dirr_constt phi) = (0 < '[phi, dchi i]).
Proof. by rewrite inE. Qed.
Lemma Cnat_dirr (phi : 'CF(G)) i :
phi \in 'Z[irr G] -> i \in dirr_constt phi -> '[phi, dchi i] \in Cnat.
Proof.
move=> PiZ; rewrite CnatEint dirr_consttE andbC => /ltrW -> /=.
by case: i => b i; rewrite cfdotZr rmorph_sign rpredMsign Cint_cfdot_vchar_irr.
Qed.
Lemma dirr_constt_oppr (i : dIirr G) (phi : 'CF(G)) :
(i \in dirr_constt (-phi)) = (ndirr i \in dirr_constt phi).
Proof. by rewrite !dirr_consttE dchi_ndirrE cfdotNl cfdotNr. Qed.
Lemma dirr_constt_oppI (phi: 'CF(G)) :
dirr_constt phi :&: dirr_constt (-phi) = set0.
Proof.
apply/setP=> i; rewrite inE !dirr_consttE cfdotNl inE.
apply/idP=> /andP [L1 L2]; have := ltr_paddl (ltrW L1) L2.
by rewrite subrr ltr_def eqxx.
Qed.
Lemma dirr_constt_oppl (phi: 'CF(G)) i :
i \in dirr_constt phi -> (ndirr i) \notin dirr_constt phi.
Proof.
rewrite !dirr_consttE dchi_ndirrE cfdotNr oppr_gt0.
by move/ltrW=> /ler_gtF ->.
Qed.
Definition to_dirr (B : {set gT}) (phi : 'CF(B)) (i : Iirr B) : dIirr B :=
('[phi, 'chi_i] < 0, i).
Definition of_irr (B : {set gT}) (i : dIirr B) : Iirr B := i.2.
Lemma irr_constt_to_dirr (phi: 'CF(G)) i : phi \in 'Z[irr G] ->
(i \in irr_constt phi) = (to_dirr phi i \in dirr_constt phi).
Proof.
move=> Zphi; rewrite irr_consttE dirr_consttE cfdotZr rmorph_sign /=.
by rewrite -real_normrEsign ?normr_gt0 ?Creal_Cint // Cint_cfdot_vchar_irr.
Qed.
Lemma to_dirrK (phi: 'CF(G)) : cancel (to_dirr phi) (@of_irr G).
Proof. by []. Qed.
Lemma of_irrK (phi: 'CF(G)) :
{in dirr_constt phi, cancel (@of_irr G) (to_dirr phi)}.
Proof.
case=> b i; rewrite dirr_consttE cfdotZr rmorph_sign /= /to_dirr mulr_sign.
by rewrite fun_if oppr_gt0; case: b => [|/ltrW/ler_gtF] ->.
Qed.
Lemma cfdot_todirrE (phi: 'CF(G)) i (phi_i := dchi (to_dirr phi i)) :
'[phi, phi_i] *: phi_i = '[phi, 'chi_i] *: 'chi_i.
Proof. by rewrite cfdotZr rmorph_sign mulrC -scalerA signrZK. Qed.
Lemma cfun_sum_dconstt (phi : 'CF(G)) :
phi \in 'Z[irr G] ->
phi = \sum_(i in dirr_constt phi) '[phi, dchi i] *: dchi i.
Proof.
(* GG -- rewrite pattern fails in trunk
move=> PiZ; rewrite [X in X = _]cfun_sum_constt. *)
move=> PiZ; rewrite {1}[phi]cfun_sum_constt.
rewrite (reindex (to_dirr phi))=> [/= |]; last first.
by exists (@of_irr _)=> //; apply: of_irrK .
by apply: eq_big=> i; rewrite ?irr_constt_to_dirr // cfdot_todirrE.
Qed.
Lemma cnorm_dconstt (phi : 'CF(G)) :
phi \in 'Z[irr G] ->
'[phi] = \sum_(i in dirr_constt phi) '[phi, dchi i] ^+ 2.
Proof.
move=> PiZ; rewrite {1 2}(cfun_sum_dconstt PiZ).
rewrite cfdot_suml; apply: eq_bigr=> i IiD.
rewrite cfdot_sumr (bigD1 i) //= big1 ?addr0 => [|j /andP [JiD IdJ]].
rewrite cfdotZr cfdotZl cfdot_dchi eqxx eq_sym (negPf (ndirr_diff i)).
by rewrite subr0 mulr1 aut_Cnat ?Cnat_dirr.
rewrite cfdotZr cfdotZl cfdot_dchi eq_sym (negPf IdJ) -natrB ?mulr0 //.
by rewrite (negPf (contraNneq _ (dirr_constt_oppl JiD))) => // <-.
Qed.
Lemma dirr_small_norm (phi : 'CF(G)) n :
phi \in 'Z[irr G] -> '[phi] = n%:R -> (n < 4)%N ->
[/\ #|dirr_constt phi| = n, dirr_constt phi :&: dirr_constt (- phi) = set0 &
phi = \sum_(i in dirr_constt phi) dchi i].
Proof.
move=> PiZ Pln; rewrite ltnNge -leC_nat => Nl4.
suffices Fd i: i \in dirr_constt phi -> '[phi, dchi i] = 1.
split; last 2 [by apply/setP=> u; rewrite !inE cfdotNl oppr_gt0 ltr_asym].
apply/eqP; rewrite -eqC_nat -sumr_const -Pln (cnorm_dconstt PiZ).
by apply/eqP/eq_bigr=> i Hi; rewrite Fd // expr1n.
rewrite {1}[phi]cfun_sum_dconstt //.
by apply: eq_bigr => i /Fd->; rewrite scale1r.
move=> IiD; apply: contraNeq Nl4 => phi_i_neq1.
rewrite -Pln cnorm_dconstt // (bigD1 i) ?ler_paddr ?sumr_ge0 //=.
by move=> j /andP[JiD _]; rewrite exprn_ge0 ?Cnat_ge0 ?Cnat_dirr.
have /CnatP[m Dm] := Cnat_dirr PiZ IiD; rewrite Dm -natrX ler_nat (leq_sqr 2).
by rewrite ltn_neqAle eq_sym -eqC_nat -ltC_nat -Dm phi_i_neq1 -dirr_consttE.
Qed.
Lemma cfdot_sum_dchi (phi1 phi2 : 'CF(G)) :
'[\sum_(i in dirr_constt phi1) dchi i,
\sum_(i in dirr_constt phi2) dchi i] =
#|dirr_constt phi1 :&: dirr_constt phi2|%:R -
#|dirr_constt phi1 :&: dirr_constt (- phi2)|%:R.
Proof.
rewrite addrC (big_setID (dirr_constt (- phi2))) /= cfdotDl; congr (_ + _).
rewrite cfdot_suml -sumr_const -sumrN; apply: eq_bigr => i /setIP[p1i p2i].
rewrite cfdot_sumr (bigD1 (ndirr i)) -?dirr_constt_oppr //= dchi_ndirrE.
rewrite cfdotNr cfnorm_dchi big1 ?addr0 // => j /andP[p2j i'j].
rewrite cfdot_dchi -(inv_eq ndirrK) [in rhs in - rhs]eq_sym (negPf i'j) subr0.
rewrite (negPf (contraTneq _ p2i)) // => ->.
by rewrite dirr_constt_oppr dirr_constt_oppl.
rewrite cfdot_sumr (big_setID (dirr_constt phi1)) setIC /= addrC.
rewrite big1 ?add0r => [|j /setDP[p2j p1'j]]; last first.
rewrite cfdot_suml big1 // => i /setDP[p1i p2'i].
rewrite cfdot_dchi (negPf (contraTneq _ p1i)) => [|-> //].
rewrite (negPf (contraNneq _ p2'i)) ?subrr // => ->.
by rewrite dirr_constt_oppr ndirrK.
rewrite -sumr_const; apply: eq_bigr => i /setIP[p1i p2i]; rewrite cfdot_suml.
rewrite (bigD1 i) /=; last by rewrite inE dirr_constt_oppr dirr_constt_oppl.
rewrite cfnorm_dchi big1 ?addr0 // => j /andP[/setDP[p1j _] i'j].
rewrite cfdot_dchi (negPf i'j) (negPf (contraTneq _ p1j)) ?subrr // => ->.
exact: dirr_constt_oppl.
Qed.
Lemma cfdot_dirr_eq1 :
{in dirr G &, forall phi psi, ('[phi, psi] == 1) = (phi == psi)}.
Proof.
move=> _ _ /dirrP[b1 [i1 ->]] /dirrP[b2 [i2 ->]].
rewrite eq_signed_irr cfdotZl cfdotZr rmorph_sign cfdot_irr mulrA -signr_addb.
rewrite pmulrn -rmorphMsign (eqr_int _ _ 1) -negb_add.
by case: (b1 (+) b2) (i1 == i2) => [] [].
Qed.
Lemma cfdot_add_dirr_eq1 :
{in dirr G & &, forall phi1 phi2 psi,
'[phi1 + phi2, psi] = 1 -> psi = phi1 \/ psi = phi2}.
Proof.
move=> _ _ _ /dirrP[b1 [i1 ->]] /dirrP[b2 [i2 ->]] /dirrP[c [j ->]] /eqP.
rewrite cfdotDl !cfdotZl !cfdotZr !rmorph_sign !cfdot_irr !mulrA -!signr_addb.
rewrite 2!{1}signrE !mulrBl !mul1r -!natrM addrCA -subr_eq0 -!addrA.
rewrite -!opprD addrA subr_eq0 -mulrSr -!natrD eqr_nat => eq_phi_psi.
apply/pred2P; rewrite /= !eq_signed_irr -!negb_add !(eq_sym j) !(addbC c).
by case: (i1 == j) eq_phi_psi; case: (i2 == j); do 2!case: (_ (+) c).
Qed.
End Norm1vchar.
|
{"author": "math-comp", "repo": "mathcomp-history-before-github", "sha": "19ef9415e2b509a2327f9ef704268ce8570b607c", "save_path": "github-repos/coq/math-comp-mathcomp-history-before-github", "path": "github-repos/coq/math-comp-mathcomp-history-before-github/mathcomp-history-before-github-19ef9415e2b509a2327f9ef704268ce8570b607c/attic/theories/vcharacter.v"}
|
import sys
import os
import pandas as pd
import numpy as np
import scipy as sp
import camoco as co
import matplotlib.pylab as plt
try:
from sklearn.neighbors import KernelDensity
except ImportError as e:
raise ImportError('This command requires sklearn')
def cistrans(args):
cob = co.COB(args.cob)
if args.out == None:
args.out = '{}_cistrans'.format(cob.name)
# open an output file
out = open(args.out+'.summary.txt','w')
# np.newaxis adds an empty axis in that position of the slice
# the sklearn module requires the values to be in the rows:
# http://scikit-learn.org/stable/auto_examples/neighbors/plot_kde_1d.html
coex = cob._coex_DataFrame(sig_only=False)
cis = coex \
.score[coex.distance <= args.cis_distance]\
.values[:,np.newaxis]
trans = coex\
.score[np.isinf(coex.distance)]\
.values[:,np.newaxis]
X_plot = np.linspace(-10,10,1000)[:,np.newaxis]
str = 'Found {:,} cis interactions and {:,} trans interactions'.format(
cis.shape[0],
trans.shape[0]
)
print(str)
print(str,file=out)
# Fit the kernel
kd=KernelDensity(bandwidth=0.2)
kd.fit(cis)
cis_kde = np.exp(kd.score_samples(X_plot))
plt.fill(X_plot,cis_kde,alpha=0.5,label='Cis Interactions')
# Fit the trans
kd.fit(trans[0:50000])
trans_kde = np.exp(kd.score_samples(X_plot))
plt.fill(X_plot,trans_kde,alpha=0.5,label='Trans Interactions')
plt.legend()
plt.title('Cis vs Trans Density: {}'.format(cob.name))
# Calculate the mann whitney U test
u,pval = sp.stats.mannwhitneyu(cis[:,0],trans[:,0])
print('P-val: {}'.format(pval))
print('P-val: {}'.format(pval),file=out)
print('Figure saved: {}'.format(args.out+'.png'))
plt.savefig(args.out+'.png')
|
{"hexsha": "fc4964107e780f00b02e313b7a98f8122926331d", "size": 1827, "ext": "py", "lang": "Python", "max_stars_repo_path": "camoco/cli/commands/cistrans.py", "max_stars_repo_name": "jonahcullen/Camoco", "max_stars_repo_head_hexsha": "2e95950f996329e27c00e5155e3768c0de9b8b7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "camoco/cli/commands/cistrans.py", "max_issues_repo_name": "jonahcullen/Camoco", "max_issues_repo_head_hexsha": "2e95950f996329e27c00e5155e3768c0de9b8b7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "camoco/cli/commands/cistrans.py", "max_forks_repo_name": "jonahcullen/Camoco", "max_forks_repo_head_hexsha": "2e95950f996329e27c00e5155e3768c0de9b8b7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.625, "max_line_length": 77, "alphanum_fraction": 0.6486042693, "include": true, "reason": "import numpy,import scipy", "num_tokens": 505}
|
###
# Format and filters
# Reader
support_filter_all(archive::Reader) =
@_la_call(archive_read_support_filter_all, (Ptr{Void},), archive)
support_filter_bzip2(archive::Reader) =
@_la_call(archive_read_support_filter_bzip2, (Ptr{Void},), archive)
support_filter_compress(archive::Reader) =
@_la_call(archive_read_support_filter_compress, (Ptr{Void},), archive)
support_filter_gzip(archive::Reader) =
@_la_call(archive_read_support_filter_gzip, (Ptr{Void},), archive)
support_filter_grzip(archive::Reader) =
@_la_call(archive_read_support_filter_grzip, (Ptr{Void},), archive)
support_filter_lrzip(archive::Reader) =
@_la_call(archive_read_support_filter_lrzip, (Ptr{Void},), archive)
support_filter_lzip(archive::Reader) =
@_la_call(archive_read_support_filter_lzip, (Ptr{Void},), archive)
support_filter_lzma(archive::Reader) =
@_la_call(archive_read_support_filter_lzma, (Ptr{Void},), archive)
support_filter_lzop(archive::Reader) =
@_la_call(archive_read_support_filter_lzop, (Ptr{Void},), archive)
support_filter_none(archive::Reader) =
@_la_call(archive_read_support_filter_none, (Ptr{Void},), archive)
"""
Data is fed through the specified external program before being dearchived.
Note that this disables automatic detection of the compression format, so it
makes no sense to specify this in conjunction with any other decompression
option.
"""
support_filter_program(archive::Reader, cmd) =
@_la_call(archive_read_support_filter_program,
(Ptr{Void}, Cstring), archive, cmd)
support_filter_program_signature(archive::Reader, cmd, sig) =
@_la_call(archive_read_support_filter_program_signature,
(Ptr{Void}, Cstring, Ptr{Void}, Csize_t),
archive, cmd, sig, sizeof(sig))
support_filter_rpm(archive::Reader) =
@_la_call(archive_read_support_filter_rpm, (Ptr{Void},), archive)
support_filter_uu(archive::Reader) =
@_la_call(archive_read_support_filter_uu, (Ptr{Void},), archive)
support_filter_xz(archive::Reader) =
@_la_call(archive_read_support_filter_xz, (Ptr{Void},), archive)
support_format_7zip(archive::Reader) =
@_la_call(archive_read_support_format_7zip, (Ptr{Void},), archive)
support_format_all(archive::Reader) =
@_la_call(archive_read_support_format_all, (Ptr{Void},), archive)
support_format_ar(archive::Reader) =
@_la_call(archive_read_support_format_ar, (Ptr{Void},), archive)
support_format_by_code(archive::Reader, code) =
@_la_call(archive_read_support_format_by_code,
(Ptr{Void}, Cint), archive, code)
support_format_cab(archive::Reader) =
@_la_call(archive_read_support_format_cab, (Ptr{Void},), archive)
support_format_cpio(archive::Reader) =
@_la_call(archive_read_support_format_cpio, (Ptr{Void},), archive)
support_format_empty(archive::Reader) =
@_la_call(archive_read_support_format_empty, (Ptr{Void},), archive)
support_format_gnutar(archive::Reader) =
@_la_call(archive_read_support_format_gnutar, (Ptr{Void},), archive)
support_format_iso9660(archive::Reader) =
@_la_call(archive_read_support_format_iso9660, (Ptr{Void},), archive)
support_format_lha(archive::Reader) =
@_la_call(archive_read_support_format_lha, (Ptr{Void},), archive)
support_format_mtree(archive::Reader) =
@_la_call(archive_read_support_format_mtree, (Ptr{Void},), archive)
support_format_rar(archive::Reader) =
@_la_call(archive_read_support_format_rar, (Ptr{Void},), archive)
support_format_raw(archive::Reader) =
@_la_call(archive_read_support_format_raw, (Ptr{Void},), archive)
support_format_tar(archive::Reader) =
@_la_call(archive_read_support_format_tar, (Ptr{Void},), archive)
support_format_xar(archive::Reader) =
@_la_call(archive_read_support_format_xar, (Ptr{Void},), archive)
support_format_zip(archive::Reader) =
@_la_call(archive_read_support_format_zip, (Ptr{Void},), archive)
# Functions to manually set the format and filters to be used. This is
# useful to bypass the bidding process when the format and filters to use
# is known in advance.
set_format(archive::Reader, fmt) =
@_la_call(archive_read_set_format, (Ptr{Void}, Cint), archive, fmt)
append_filter(archive::Reader, filter) =
@_la_call(archive_read_append_filter, (Ptr{Void}, Cint), archive, filter)
append_filter_program(archive::Reader, cmd) =
@_la_call(archive_read_append_filter_program, (Ptr{Void}, Cstring),
archive, cmd)
append_filter_program_signature(archive::Reader, cmd, sig) =
@_la_call(archive_read_append_filter_program_signature,
(Ptr{Void}, Cstring, Ptr{Void}, Csize_t),
archive, cmd, sig, sizeof(sig))
# Writer
"A convenience function to set the filter based on the code."
add_filter(archive::Writer, filter_code::Integer) =
@_la_call(archive_write_add_filter, (Ptr{Void}, Cint),
archive, filter_code)
add_filter(archive::Writer, name::AbstractString) =
@_la_call(archive_write_add_filter_by_name, (Ptr{Void}, Cstring),
archive, name)
add_filter_b64encode(archive::Writer) =
@_la_call(archive_write_add_filter_b64encode, (Ptr{Void},), archive)
add_filter_bzip2(archive::Writer) =
@_la_call(archive_write_add_filter_bzip2, (Ptr{Void},), archive)
add_filter_compress(archive::Writer) =
@_la_call(archive_write_add_filter_compress, (Ptr{Void},), archive)
add_filter_grzip(archive::Writer) =
@_la_call(archive_write_add_filter_grzip, (Ptr{Void},), archive)
add_filter_gzip(archive::Writer) =
@_la_call(archive_write_add_filter_gzip, (Ptr{Void},), archive)
add_filter_lrzip(archive::Writer) =
@_la_call(archive_write_add_filter_lrzip, (Ptr{Void},), archive)
add_filter_lzip(archive::Writer) =
@_la_call(archive_write_add_filter_lzip, (Ptr{Void},), archive)
add_filter_lzma(archive::Writer) =
@_la_call(archive_write_add_filter_lzma, (Ptr{Void},), archive)
add_filter_lzop(archive::Writer) =
@_la_call(archive_write_add_filter_lzop, (Ptr{Void},), archive)
add_filter_none(archive::Writer) =
@_la_call(archive_write_add_filter_none, (Ptr{Void},), archive)
add_filter_program(archive::Writer, cmd::AbstractString) =
@_la_call(archive_write_add_filter_program,
(Ptr{Void}, Cstring), archive, cmd)
add_filter_uuencode(archive::Writer) =
@_la_call(archive_write_add_filter_uuencode, (Ptr{Void},), archive)
add_filter_xz(archive::Writer) =
@_la_call(archive_write_add_filter_xz, (Ptr{Void},), archive)
"A convenience function to set the format based on the code or name."
set_format(archive::Writer, format_code::Integer) =
@_la_call(archive_write_set_format, (Ptr{Void}, Cint),
archive, format_code)
set_format(archive::Writer, name::AbstractString) =
@_la_call(archive_write_set_format_by_name, (Ptr{Void}, Cstring),
archive, name)
set_format_7zip(archive::Writer) =
@_la_call(archive_write_set_format_7zip, (Ptr{Void},), archive)
set_format_ar_bsd(archive::Writer) =
@_la_call(archive_write_set_format_ar_bsd, (Ptr{Void},), archive)
set_format_ar_svr4(archive::Writer) =
@_la_call(archive_write_set_format_ar_svr4, (Ptr{Void},), archive)
set_format_cpio(archive::Writer) =
@_la_call(archive_write_set_format_cpio, (Ptr{Void},), archive)
set_format_cpio_newc(archive::Writer) =
@_la_call(archive_write_set_format_cpio_newc, (Ptr{Void},), archive)
set_format_gnutar(archive::Writer) =
@_la_call(archive_write_set_format_gnutar, (Ptr{Void},), archive)
set_format_iso9660(archive::Writer) =
@_la_call(archive_write_set_format_iso9660, (Ptr{Void},), archive)
set_format_mtree(archive::Writer) =
@_la_call(archive_write_set_format_mtree, (Ptr{Void},), archive)
set_format_mtree_classic(archive::Writer) =
@_la_call(archive_write_set_format_mtree_classic, (Ptr{Void},), archive)
set_format_pax(archive::Writer) =
@_la_call(archive_write_set_format_pax, (Ptr{Void},), archive)
set_format_pax_restricted(archive::Writer) =
@_la_call(archive_write_set_format_pax_restricted, (Ptr{Void},), archive)
set_format_shar(archive::Writer) =
@_la_call(archive_write_set_format_shar, (Ptr{Void},), archive)
set_format_shar_dump(archive::Writer) =
@_la_call(archive_write_set_format_shar_dump, (Ptr{Void},), archive)
set_format_ustar(archive::Writer) =
@_la_call(archive_write_set_format_ustar, (Ptr{Void},), archive)
set_format_v7tar(archive::Writer) =
@_la_call(archive_write_set_format_v7tar, (Ptr{Void},), archive)
set_format_xar(archive::Writer) =
@_la_call(archive_write_set_format_xar, (Ptr{Void},), archive)
set_format_zip(archive::Writer) =
@_la_call(archive_write_set_format_zip, (Ptr{Void},), archive)
zip_set_compression_deflate(archive::Writer) =
@_la_call(archive_write_zip_set_compression_deflate, (Ptr{Void},), archive)
zip_set_compression_store(archive::Writer) =
@_la_call(archive_write_zip_set_compression_store, (Ptr{Void},), archive)
|
{"hexsha": "37fd9fdcc2920655b7be19b980e683a3a17be6c6", "size": 8881, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/format.jl", "max_stars_repo_name": "JuliaPackageMirrors/LibArchive.jl", "max_stars_repo_head_hexsha": "74ebbe81ff8f4f6908b38caef58767906c20308d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/format.jl", "max_issues_repo_name": "JuliaPackageMirrors/LibArchive.jl", "max_issues_repo_head_hexsha": "74ebbe81ff8f4f6908b38caef58767906c20308d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/format.jl", "max_forks_repo_name": "JuliaPackageMirrors/LibArchive.jl", "max_forks_repo_head_hexsha": "74ebbe81ff8f4f6908b38caef58767906c20308d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.1751412429, "max_line_length": 79, "alphanum_fraction": 0.7627519423, "num_tokens": 2390}
|
"""
===================
Entropy
===================
"""
from skimage import data
from skimage.filter.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
# defining a 8- and a 16-bit test images
a8 = data.camera()
a16 = data.camera().astype(np.uint16)*4
ent8 = entropy(a8,disk(5)) # pixel value contain 10x the local entropy
ent16 = entropy(a16,disk(5)) # pixel value contain 1000x the local entropy
# display results
plt.figure(figsize=(10, 10))
plt.subplot(2,2,1)
plt.imshow(a8, cmap=plt.cm.gray)
plt.xlabel('8-bit image')
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(ent8, cmap=plt.cm.jet)
plt.xlabel('entropy*10')
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow(a16, cmap=plt.cm.gray)
plt.xlabel('16-bit image')
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(ent16, cmap=plt.cm.jet)
plt.xlabel('entropy*1000')
plt.colorbar()
plt.show()
|
{"hexsha": "f019d79c194f3ecfee704cb5fad901f91fa9d882", "size": 899, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc/examples/plot_entropy.py", "max_stars_repo_name": "RKDSOne/scikit-image", "max_stars_repo_head_hexsha": "baa67eafcace9cde1b94ad2d467e2f2e0468e759", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-27T18:42:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-27T18:42:22.000Z", "max_issues_repo_path": "doc/examples/plot_entropy.py", "max_issues_repo_name": "RKDSOne/scikit-image", "max_issues_repo_head_hexsha": "baa67eafcace9cde1b94ad2d467e2f2e0468e759", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/examples/plot_entropy.py", "max_forks_repo_name": "RKDSOne/scikit-image", "max_forks_repo_head_hexsha": "baa67eafcace9cde1b94ad2d467e2f2e0468e759", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-12-29T17:04:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-17T15:47:30.000Z", "avg_line_length": 19.9777777778, "max_line_length": 74, "alphanum_fraction": 0.6963292547, "include": true, "reason": "import numpy", "num_tokens": 251}
|
# Copyright 2019-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytket.circuit import Circuit, OpType, Op, PauliExpBox, Unitary2qBox, Node, Qubit # type: ignore
from pytket.pauli import Pauli # type: ignore
from pytket.passes import ( # type: ignore
BasePass,
SequencePass,
RemoveRedundancies,
SynthesiseTket,
SynthesiseHQS,
SynthesiseUMD,
RepeatUntilSatisfiedPass,
CommuteThroughMultis,
RepeatPass,
DecomposeMultiQubitsCX,
SquashTK1,
RepeatWithMetricPass,
RebaseCustom,
EulerAngleReduction,
RoutingPass,
CXMappingPass,
PlacementPass,
RenameQubitsPass,
FullMappingPass,
DefaultMappingPass,
AASRouting,
DecomposeSwapsToCXs,
DecomposeSwapsToCircuit,
PauliSimp,
KAKDecomposition,
ThreeQubitSquash,
DecomposeArbitrarilyControlledGates,
DecomposeBoxes,
PeepholeOptimise2Q,
FullPeepholeOptimise,
RebaseCirq,
RebaseHQS,
RebaseProjectQ,
RebasePyZX,
RebaseQuil,
RebaseTket,
RebaseUMD,
RebaseUFR,
RebaseOQC,
SquashHQS,
FlattenRegisters,
SquashCustom,
DelayMeasures,
CliffordSimp,
OptimisePhaseGadgets,
GuidedPauliSimp,
RemoveDiscarded,
SimplifyMeasured,
SimplifyInitial,
RemoveBarriers,
PauliSquash,
)
from pytket.predicates import ( # type: ignore
GateSetPredicate,
NoClassicalControlPredicate,
DirectednessPredicate,
CompilationUnit,
UserDefinedPredicate,
)
from pytket.routing import Architecture, Placement, GraphPlacement # type: ignore
from pytket.transform import Transform, PauliSynthStrat, CXConfigType # type: ignore
from pytket._tket.passes import SynthesiseOQC # type: ignore
import numpy as np
import pytest # type: ignore
from typing import Dict, Any, List
circ2 = Circuit(1)
circ2.Rx(0.25, 0)
ots = {OpType.X, OpType.Z}
gsp = GateSetPredicate(ots)
nccp = NoClassicalControlPredicate()
def tk1_to_phasedxrz(a: float, b: float, c: float) -> Circuit:
circ = Circuit(1)
circ.Rz(a + c, 0)
phasedx_op = Op.create(OpType.PhasedX, [b, a])
circ.add_gate(phasedx_op, [0])
Transform.RemoveRedundancies().apply(circ)
return circ
def test_predicate_generation() -> None:
string_gsp = repr(gsp)
assert string_gsp.find("X") != -1
assert string_gsp.find("Z") != -1
assert repr(nccp) == "NoClassicalControlPredicate"
def test_compilation_unit_generation() -> None:
pp_list = [gsp, nccp]
circ = Circuit(2)
circ.X(0).Z(1)
cu = CompilationUnit(circ, pp_list)
assert cu.check_all_predicates()
cu2 = CompilationUnit(circ2, pp_list)
assert not cu2.check_all_predicates()
def test_compilerpass_seq() -> None:
passlist = [SynthesiseTket(), SynthesiseOQC(), SynthesiseUMD(), SynthesiseHQS()]
seq = SequencePass(passlist)
circ = Circuit(2)
circ.X(0).Z(1)
cu = CompilationUnit(circ)
cu2 = CompilationUnit(circ2)
assert seq.apply(cu)
assert seq.apply(cu2)
def test_rebase_pass_generation() -> None:
cx = Circuit(2)
cx.CX(0, 1)
pz_rebase = RebaseCustom(
{OpType.CX}, cx, {OpType.PhasedX, OpType.Rz}, tk1_to_phasedxrz
)
circ = Circuit(2)
circ.X(0).Y(1)
cu = CompilationUnit(circ)
assert pz_rebase.apply(cu)
coms = cu.circuit.get_commands()
assert str(coms) == "[PhasedX(1, 0) q[0];, PhasedX(1, 0.5) q[1];]"
passlist = [pz_rebase, SynthesiseTket()]
seq = SequencePass(passlist)
assert seq.apply(cu)
coms = cu.circuit.get_commands()
assert str(coms) == "[tk1(0.5, 1, 0.5) q[0];, tk1(0.5, 1, 3.5) q[1];]"
def test_custom_combinator_generation() -> None:
def test_CX_size_threshold(circ: Circuit) -> bool:
return bool(circ.n_gates_of_type(OpType.CX) == 0)
seq_pass = SequencePass([RemoveRedundancies(), CommuteThroughMultis()])
custom_pass = RepeatUntilSatisfiedPass(seq_pass, test_CX_size_threshold)
circ = Circuit(2)
circ.CX(0, 1)
circ.X(1)
circ.CX(0, 1)
circ.X(1)
circ.CX(0, 1)
circ.X(1)
circ.CX(0, 1)
circ.Z(1)
circ.CX(1, 0)
circ.Z(1)
circ.CX(1, 0)
cu = CompilationUnit(circ)
assert custom_pass.apply(cu)
# Test in-place application
circ1 = cu.circuit
assert custom_pass.apply(circ)
assert circ == circ1
assert not custom_pass.apply(circ)
def test_routing_and_placement_pass() -> None:
circ = Circuit()
q = circ.add_q_register("q", 5)
circ.CX(0, 1)
circ.H(0)
circ.Z(1)
circ.CX(0, 3)
circ.Rx(1.5, 3)
circ.CX(2, 4)
circ.X(2)
circ.CX(1, 4)
circ.CX(0, 4)
n0 = Node("b", 0)
n1 = Node("b", 1)
n2 = Node("b", 2)
n3 = Node("a", 0)
n4 = Node("f", 0)
n5 = Node("f", 2)
arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4], [n1, n5]])
pl = Placement(arc)
routing = RoutingPass(arc)
placement = PlacementPass(pl)
cu = CompilationUnit(circ.copy())
assert placement.apply(cu)
assert routing.apply(cu)
expected_map = {q[0]: n1, q[1]: n0, q[2]: n2, q[3]: n5, q[4]: n3}
assert cu.initial_map == expected_map
# check composition works ok
seq_pass = SequencePass([SynthesiseTket(), placement, routing, SynthesiseUMD()])
cu2 = CompilationUnit(circ.copy())
assert seq_pass.apply(cu2)
assert cu2.initial_map == expected_map
full_pass = FullMappingPass(arc, pl)
cu3 = CompilationUnit(circ.copy())
assert full_pass.apply(cu3)
assert cu3.initial_map == expected_map
assert cu.circuit == cu3.circuit
def test_default_mapping_pass() -> None:
circ = Circuit()
q = circ.add_q_register("q", 5)
circ.CX(0, 1)
circ.H(0)
circ.Z(1)
circ.CX(0, 3)
circ.Rx(1.5, 3)
circ.CX(2, 4)
circ.X(2)
circ.CX(1, 4)
circ.CX(0, 4)
n0 = Node("b", 0)
n1 = Node("b", 1)
n2 = Node("b", 2)
n3 = Node("a", 0)
n4 = Node("f", 0)
arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4]])
pl = GraphPlacement(arc)
routing = RoutingPass(arc)
placement = PlacementPass(pl)
default = DefaultMappingPass(arc)
cu_rp = CompilationUnit(circ.copy())
cu_def = CompilationUnit(circ.copy())
assert placement.apply(cu_rp)
assert routing.apply(cu_rp)
assert default.apply(cu_def)
assert cu_rp.circuit == cu_def.circuit
def test_default_mapping_pass_phase_poly_aas() -> None:
circ = Circuit()
q = circ.add_q_register("q", 5)
circ.CX(0, 1)
circ.H(0)
circ.Z(1)
circ.CX(0, 3)
circ.Rx(1.5, 3)
circ.CX(2, 4)
circ.X(2)
circ.CX(1, 4)
circ.CX(0, 4)
n0 = Node("a", 0)
n1 = Node("b", 1)
n2 = Node("c", 2)
n3 = Node("d", 3)
n4 = Node("e", 4)
arc = Architecture([[n0, n1], [n1, n2], [n2, n3], [n3, n4]])
default = AASRouting(arc, lookahead=1)
assert default.apply(circ)
def test_rename_qubits_pass() -> None:
circ = Circuit()
qbs = circ.add_q_register("a", 2)
circ.CX(Qubit("a", 0), Qubit("a", 1))
qm = {Qubit("a", 0): Qubit("b", 1), Qubit("a", 1): Qubit("b", 0)}
p = RenameQubitsPass(qm)
cu = CompilationUnit(circ)
p.apply(cu)
newcirc = cu.circuit
assert set(newcirc.qubits) == set([Qubit("b", i) for i in range(2)])
def gate_count_metric(circ: Circuit) -> int:
return int(circ.n_gates)
def test_RebaseOQC_and_SynthesiseOQC() -> None:
oqc_gateset = {OpType.SX, OpType.Rz, OpType.ECR}
oqc_gateset_pred = GateSetPredicate(oqc_gateset)
circ = Circuit(3)
circ.CX(0, 1)
circ.H(0)
circ.Z(1)
circ.CX(0, 2)
circ.Rx(1.5, 2)
circ.CX(2, 1)
circ.X(2)
circ.CX(1, 0)
circ.CX(0, 1)
u = circ.get_unitary()
# Test SynthesiseOQC
circ2 = circ.copy()
SynthesiseOQC().apply(circ2)
assert oqc_gateset_pred.verify(circ2)
u_with_oqc = circ2.get_unitary()
assert np.allclose(u, u_with_oqc)
RebaseTket().apply(circ2)
u2 = circ2.get_unitary()
assert np.allclose(u, u2)
# Test RebaseOQC
circ3 = circ.copy()
u_before_oqc = circ3.get_unitary()
assert np.allclose(u, u_before_oqc)
RebaseOQC().apply(circ3)
assert oqc_gateset_pred.verify(circ3)
u_before_rebase_tket = circ3.get_unitary()
assert np.allclose(u, u_before_rebase_tket)
RebaseTket().apply(circ3)
u3 = circ3.get_unitary()
assert np.allclose(u, u3)
def test_SynthesiseTket_creation() -> None:
# my_synthesise_tket should act on a CompilationUnit the same as SynthesiseTket
seq_pass = SequencePass([CommuteThroughMultis(), RemoveRedundancies()])
repeat_pass = RepeatPass(seq_pass)
synth_pass = SequencePass(
[DecomposeMultiQubitsCX(), RemoveRedundancies(), repeat_pass, SquashTK1()]
)
small_part = SequencePass([RemoveRedundancies(), repeat_pass, SquashTK1()])
repeat_synth_pass = RepeatWithMetricPass(small_part, gate_count_metric)
my_synthesise_tket = SequencePass([synth_pass, repeat_synth_pass])
circ1 = Circuit(3)
circ1.X(0).Y(1).CX(0, 1).Z(0).Rx(1.3, 1).CX(0, 1).Rz(0.4, 0).Ry(0.53, 0).H(1).H(
2
).Rx(1.5, 2).Rx(0.5, 2).H(2)
cu1 = CompilationUnit(circ1)
my_synthesise_tket.apply(cu1)
circ2 = cu1.circuit
assert circ2.n_gates == 2
cu2 = CompilationUnit(circ1)
# Blue Peter voice: here's one I made earlier
SynthesiseTket().apply(cu2)
circ3 = cu2.circuit
assert circ3.n_gates == 2
assert circ2 == circ3
# now let's run with routing
arc = Architecture([(0, 2), (1, 2)])
pl = Placement(arc)
pl.place(circ1)
routing_pass = RoutingPass(arc)
cu3 = CompilationUnit(circ1)
cu4 = CompilationUnit(circ1)
cu5 = CompilationUnit(circ1)
assert routing_pass.apply(cu3)
full_pass = SequencePass([SynthesiseTket(), routing_pass])
full_pass2 = SequencePass([my_synthesise_tket, routing_pass])
assert full_pass.apply(cu4)
assert full_pass2.apply(cu5)
assert cu4.circuit == cu5.circuit
def test_directed_cx_pass() -> None:
circ = Circuit(5)
circ.CX(0, 1)
circ.Rx(1.4, 0)
circ.H(1)
circ.CX(0, 2)
circ.CX(0, 3)
circ.Sdg(4)
circ.CX(2, 3)
circ.CX(3, 4)
circ.CX(4, 3)
circ.CX(3, 1)
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
pl = Placement(arc)
cu1 = CompilationUnit(circ)
dir_router = CXMappingPass(arc, pl, directed_cx=True)
assert dir_router.apply(cu1)
circ2 = cu1.circuit
dir_pred = DirectednessPredicate(arc)
assert dir_pred.verify(circ2)
def test_decompose_routing_gates_to_cxs() -> None:
circ = Circuit(4)
circ.CX(1, 0)
circ.SWAP(0, 1)
circ.SWAP(1, 2)
circ.CX(2, 3)
cu = CompilationUnit(circ)
arc = Architecture([[0, 1], [1, 2], [2, 3]])
pss = DecomposeSwapsToCXs(arc)
assert pss.apply(cu)
circ1 = cu.circuit
for cmd in circ1.get_commands():
assert cmd.op.type == OpType.CX
def test_remove_barriers() -> None:
circ = Circuit(4)
circ.CX(0, 1)
circ.CX(1, 2)
circ.add_barrier([1, 2])
circ.CX(2, 3)
cu = CompilationUnit(circ)
pss = RemoveBarriers()
assert pss.apply(cu)
circ1 = cu.circuit
for cmd in circ1.get_commands():
assert cmd.op.type == OpType.CX
def test_user_defined_swap_decomp() -> None:
circ = Circuit(2)
circ.SWAP(0, 1)
cu = CompilationUnit(circ)
repcirc = Circuit(2)
repcirc.X(0)
repcirc.CX(0, 1)
repcirc.CX(1, 0)
repcirc.CX(0, 1)
repcirc.X(1)
pss = DecomposeSwapsToCircuit(repcirc)
assert pss.apply(cu)
circ1 = cu.circuit
assert circ1 == repcirc
def test_pauligraph_synth() -> None:
circ = Circuit(4, 4)
pg = PauliExpBox([Pauli.X, Pauli.Z, Pauli.Y, Pauli.I], 0.3)
circ.add_pauliexpbox(pg, [0, 1, 2, 3])
circ.measure_all()
cu = CompilationUnit(circ)
pss = PauliSimp(PauliSynthStrat.Sets, CXConfigType.Tree)
assert pss.apply(cu)
circ1 = cu.circuit
assert circ1.depth_by_type(OpType.CX) == 4
def test_squash_chains() -> None:
# XY
c = Circuit(2)
c.H(0).H(1)
c.Rx(0.1, 0).Ry(0.2, 0).Rx(0.3, 0).Ry(0.4, 0).Ry(0.5, 0).Rx(0.6, 0)
c.CX(0, 1).H(1)
c.Ry(0.7, 1).Rx(0.8, 1).Rx(0.9, 1).Ry(1.1, 1).Rx(1.2, 1)
u = c.get_unitary()
EulerAngleReduction(OpType.Rx, OpType.Ry, strict=True).apply(c)
u1 = c.get_unitary()
assert np.allclose(u, u1)
optypes = [cmd.op.type for cmd in c.get_commands()]
assert optypes == [
OpType.H,
OpType.H,
OpType.Ry,
OpType.Rx,
OpType.Ry,
OpType.CX,
OpType.H,
OpType.Ry,
OpType.Rx,
OpType.Ry,
]
# XZ
c = Circuit(2)
c.H(0).H(1)
c.Rx(0.1, 0).Rz(0.2, 0).Rx(0.3, 0).Rz(0.4, 0).Rz(0.5, 0).Rx(0.6, 0)
c.CX(0, 1).H(1)
c.Rz(0.7, 1).Rx(0.8, 1).Rx(0.9, 1).Rz(1.1, 1).Rx(1.2, 1)
u = c.get_unitary()
EulerAngleReduction(OpType.Rx, OpType.Rz, strict=True).apply(c)
u1 = c.get_unitary()
assert np.allclose(u, u1)
optypes = [cmd.op.type for cmd in c.get_commands()]
assert optypes == [
OpType.H,
OpType.H,
OpType.Rz,
OpType.Rx,
OpType.Rz,
OpType.CX,
OpType.H,
OpType.Rz,
OpType.Rx,
OpType.Rz,
]
# YZ
c = Circuit(2)
c.H(0).H(1)
c.Ry(0.1, 0).Rz(0.2, 0).Ry(0.3, 0).Rz(0.4, 0).Rz(0.5, 0).Ry(0.6, 0)
c.CX(0, 1).H(1)
c.Rz(0.7, 1).Ry(0.8, 1).Ry(0.9, 1).Rz(1.1, 1).Ry(1.2, 1)
u = c.get_unitary()
EulerAngleReduction(OpType.Ry, OpType.Rz, strict=True).apply(c)
u1 = c.get_unitary()
assert np.allclose(u, u1)
optypes = [cmd.op.type for cmd in c.get_commands()]
assert optypes == [
OpType.H,
OpType.H,
OpType.Rz,
OpType.Ry,
OpType.Rz,
OpType.CX,
OpType.H,
OpType.Rz,
OpType.Ry,
OpType.Rz,
]
def test_library_pass_config() -> None:
assert KAKDecomposition().to_dict()["StandardPass"]["name"] == "KAKDecomposition"
assert ThreeQubitSquash().to_dict()["StandardPass"]["name"] == "ThreeQubitSquash"
assert (
CommuteThroughMultis().to_dict()["StandardPass"]["name"]
== "CommuteThroughMultis"
)
assert (
DecomposeArbitrarilyControlledGates().to_dict()["StandardPass"]["name"]
== "DecomposeArbitrarilyControlledGates"
)
assert DecomposeBoxes().to_dict()["StandardPass"]["name"] == "DecomposeBoxes"
assert (
DecomposeMultiQubitsCX().to_dict()["StandardPass"]["name"]
== "DecomposeMultiQubitsCX"
)
assert (
PeepholeOptimise2Q().to_dict()["StandardPass"]["name"] == "PeepholeOptimise2Q"
)
assert (
FullPeepholeOptimise().to_dict()["StandardPass"]["name"]
== "FullPeepholeOptimise"
)
assert RebaseCirq().to_dict()["StandardPass"]["name"] == "RebaseCirq"
assert RebaseHQS().to_dict()["StandardPass"]["name"] == "RebaseHQS"
assert RebaseProjectQ().to_dict()["StandardPass"]["name"] == "RebaseProjectQ"
assert RebasePyZX().to_dict()["StandardPass"]["name"] == "RebasePyZX"
assert RebaseQuil().to_dict()["StandardPass"]["name"] == "RebaseQuil"
assert RebaseTket().to_dict()["StandardPass"]["name"] == "RebaseTket"
assert RebaseUMD().to_dict()["StandardPass"]["name"] == "RebaseUMD"
assert RebaseUFR().to_dict()["StandardPass"]["name"] == "RebaseUFR"
assert (
RemoveRedundancies().to_dict()["StandardPass"]["name"] == "RemoveRedundancies"
)
assert SynthesiseHQS().to_dict()["StandardPass"]["name"] == "SynthesiseHQS"
assert SynthesiseTket().to_dict()["StandardPass"]["name"] == "SynthesiseTket"
assert SynthesiseOQC().to_dict()["StandardPass"]["name"] == "SynthesiseOQC"
assert SynthesiseUMD().to_dict()["StandardPass"]["name"] == "SynthesiseUMD"
# Share name with SquashCustom
assert SquashHQS().to_dict()["StandardPass"]["name"] == "SquashCustom"
assert set(SquashHQS().to_dict()["StandardPass"]["basis_singleqs"]) == {
"Rz",
"PhasedX",
}
assert FlattenRegisters().to_dict()["StandardPass"]["name"] == "FlattenRegisters"
assert DelayMeasures().to_dict()["StandardPass"]["name"] == "DelayMeasures"
def check_arc_dict(arc: Architecture, d: dict) -> bool:
links = [
{"link": [n1.to_list(), n2.to_list()], "weight": 1} for n1, n2 in arc.coupling
]
if d["links"] != links:
return False
else:
nodes = [Node(n[0], n[1]) for n in d["nodes"]]
return set(nodes) == set(arc.nodes)
def test_generated_pass_config() -> None:
# SquashCustom
def sq(a: float, b: float, c: float) -> Circuit:
circ = Circuit(1)
if c != 0:
circ.Rz(c, 0)
if b != 0:
circ.Rx(b, 0)
if a != 0:
circ.Rz(a, 0)
return circ
squash_pass = SquashCustom({OpType.Rz, OpType.Rx, OpType.Ry}, sq)
assert squash_pass.to_dict()["StandardPass"]["name"] == "SquashCustom"
assert set(squash_pass.to_dict()["StandardPass"]["basis_singleqs"]) == {
"Rz",
"Rx",
"Ry",
}
# RebaseCustom
cx = Circuit(2)
cx.CX(0, 1)
pz_rebase = RebaseCustom(
{OpType.CX}, cx, {OpType.PhasedX, OpType.Rz}, tk1_to_phasedxrz
)
assert pz_rebase.to_dict()["StandardPass"]["name"] == "RebaseCustom"
assert pz_rebase.to_dict()["StandardPass"]["basis_multiqs"] == ["CX"]
assert set(pz_rebase.to_dict()["StandardPass"]["basis_singleqs"]) == {
"PhasedX",
"Rz",
}
assert cx.to_dict() == pz_rebase.to_dict()["StandardPass"]["basis_cx_replacement"]
# EulerAngleReduction
euler_pass = EulerAngleReduction(OpType.Ry, OpType.Rx)
assert euler_pass.to_dict()["StandardPass"]["name"] == "EulerAngleReduction"
assert euler_pass.to_dict()["StandardPass"]["euler_q"] == "Ry"
assert euler_pass.to_dict()["StandardPass"]["euler_p"] == "Rx"
# RoutingPass
arc = Architecture([[0, 2], [1, 3], [2, 3], [2, 4]])
r_pass = RoutingPass(arc, swap_lookahead=10, bridge_interactions=10)
assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass"
assert r_pass.to_dict()["StandardPass"]["routing_config"]["depth_limit"] == 10
assert (
r_pass.to_dict()["StandardPass"]["routing_config"]["interactions_limit"] == 10
)
assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"])
# PlacementPass
placer = GraphPlacement(arc)
p_pass = PlacementPass(placer)
assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass"
assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement"
assert p_pass.to_dict()["StandardPass"]["placement"]["config"]["depth_limit"] == 5
# RenameQubitsPass
qm = {Qubit("a", 0): Qubit("b", 1), Qubit("a", 1): Qubit("b", 0)}
rn_pass = RenameQubitsPass(qm)
assert rn_pass.to_dict()["StandardPass"]["name"] == "RenameQubitsPass"
assert rn_pass.to_dict()["StandardPass"]["qubit_map"] == [
[k.to_list(), v.to_list()] for k, v in qm.items()
]
# FullMappingPass
fm_pass = FullMappingPass(arc, placer)
assert fm_pass.to_dict()["pass_class"] == "SequencePass"
p_pass = fm_pass.get_sequence()[0]
r_pass = fm_pass.get_sequence()[1]
assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass"
assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass"
assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"])
assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement"
# DefaultMappingPass
dm_pass = DefaultMappingPass(arc)
assert dm_pass.to_dict()["pass_class"] == "SequencePass"
p_pass = dm_pass.get_sequence()[0]
r_pass = dm_pass.get_sequence()[1]
assert p_pass.to_dict()["StandardPass"]["name"] == "PlacementPass"
assert r_pass.to_dict()["StandardPass"]["name"] == "RoutingPass"
assert check_arc_dict(arc, r_pass.to_dict()["StandardPass"]["architecture"])
assert p_pass.to_dict()["StandardPass"]["placement"]["type"] == "GraphPlacement"
# AASRouting
aas_pass = AASRouting(arc, lookahead=2)
assert aas_pass.to_dict()["pass_class"] == "SequencePass"
aas_pass_0 = aas_pass.get_sequence()[0]
assert aas_pass_0.to_dict()["pass_class"] == "SequencePass"
aasrou_pass = aas_pass.get_sequence()[1]
assert aasrou_pass.to_dict()["StandardPass"]["name"] == "AASRoutingPass"
assert check_arc_dict(arc, aasrou_pass.to_dict()["StandardPass"]["architecture"])
aas_pass_00 = aas_pass_0.get_sequence()[0]
assert aas_pass_00.to_dict()["pass_class"] == "SequencePass"
aaspla_pass = aas_pass_0.get_sequence()[1]
assert aaspla_pass.to_dict()["StandardPass"]["name"] == "PlacementPass"
rebase_pass = aas_pass_00.get_sequence()[0]
assert rebase_pass.to_dict()["StandardPass"]["name"] == "RebaseUFR"
comppb_pass = aas_pass_00.get_sequence()[1]
assert comppb_pass.to_dict()["StandardPass"]["name"] == "ComposePhasePolyBoxes"
# CXMappingPass
cxm_pass = CXMappingPass(arc, placer, directed_cx=True, delay_measures=True)
assert cxm_pass.to_dict()["pass_class"] == "SequencePass"
p0 = cxm_pass.get_sequence()[0]
p1 = cxm_pass.get_sequence()[1]
assert p0.to_dict()["pass_class"] == "SequencePass"
assert p1.to_dict()["StandardPass"]["name"] == "DecomposeSwapsToCXs"
assert p1.to_dict()["StandardPass"]["directed"] == True
p00 = p0.get_sequence()[0]
p01 = p0.get_sequence()[1]
assert p00.to_dict()["pass_class"] == "SequencePass"
assert p01.to_dict()["StandardPass"]["name"] == "RebaseCustom"
assert p01.to_dict()["StandardPass"]["basis_cx_replacement"] == cx.to_dict()
p000 = p00.get_sequence()[0]
p001 = p00.get_sequence()[1]
assert p000.to_dict()["pass_class"] == "SequencePass"
assert p001.to_dict()["StandardPass"]["name"] == "DelayMeasures"
p0000 = p000.get_sequence()[0]
p0001 = p000.get_sequence()[1]
assert p0000.to_dict()["StandardPass"]["name"] == "RebaseCustom"
assert p0001.to_dict()["pass_class"] == "SequencePass"
p00010 = p0001.get_sequence()[0]
p00011 = p0001.get_sequence()[1]
assert p00010.to_dict()["StandardPass"]["name"] == "PlacementPass"
assert p00011.to_dict()["StandardPass"]["name"] == "RoutingPass"
assert check_arc_dict(arc, p00011.to_dict()["StandardPass"]["architecture"])
# CliffordSimp
clifford_pass = CliffordSimp(allow_swaps=False)
assert clifford_pass.to_dict()["StandardPass"]["name"] == "CliffordSimp"
assert clifford_pass.to_dict()["StandardPass"]["allow_swaps"] == False
# DecomposeSwapsToCXs
swap_cx_pass = DecomposeSwapsToCXs(arc)
assert swap_cx_pass.to_dict()["StandardPass"]["name"] == "DecomposeSwapsToCXs"
assert swap_cx_pass.to_dict()["StandardPass"]["directed"] == False
# DecomposeSwapsToCircuit
repcirc = Circuit(2)
repcirc.X(0)
repcirc.CX(0, 1)
repcirc.CX(1, 0)
repcirc.CX(0, 1)
repcirc.X(1)
swap_circ_pass = DecomposeSwapsToCircuit(repcirc)
assert swap_circ_pass.to_dict()["StandardPass"]["name"] == "DecomposeSwapsToCircuit"
assert (
swap_circ_pass.to_dict()["StandardPass"]["swap_replacement"]
== repcirc.to_dict()
)
# OptimisePhaseGadgets
pg_pass = OptimisePhaseGadgets(CXConfigType.Tree)
assert pg_pass.to_dict()["StandardPass"]["name"] == "OptimisePhaseGadgets"
assert pg_pass.to_dict()["StandardPass"]["cx_config"] == "Tree"
# PauliSimp
pauli_pass = PauliSimp()
assert pauli_pass.to_dict()["StandardPass"]["name"] == "PauliSimp"
assert pauli_pass.to_dict()["StandardPass"]["cx_config"] == "Snake"
assert pauli_pass.to_dict()["StandardPass"]["pauli_synth_strat"] == "Sets"
# GuidedPauliSimp
gpauli_pass = GuidedPauliSimp()
assert gpauli_pass.to_dict()["StandardPass"]["name"] == "GuidedPauliSimp"
assert gpauli_pass.to_dict()["StandardPass"]["cx_config"] == "Snake"
assert gpauli_pass.to_dict()["StandardPass"]["pauli_synth_strat"] == "Sets"
def test_repeat_pass_config() -> None:
# RepeatPass
rp = RepeatPass(SequencePass([CommuteThroughMultis(), RemoveRedundancies()]))
assert rp.to_dict()["pass_class"] == "RepeatPass"
sps = rp.get_pass().get_sequence()
assert sps[0].to_dict()["StandardPass"]["name"] == "CommuteThroughMultis"
assert sps[1].to_dict()["StandardPass"]["name"] == "RemoveRedundancies"
# RepeatWithMetricPass
def number_of_CX(circ: Circuit) -> object:
return circ.n_gates_of_type(OpType.CX)
rp = RepeatWithMetricPass(
SequencePass([CommuteThroughMultis(), RemoveRedundancies()]), number_of_CX
)
assert rp.to_dict()["pass_class"] == "RepeatWithMetricPass"
sps = rp.get_pass().get_sequence()
assert sps[0].to_dict()["StandardPass"]["name"] == "CommuteThroughMultis"
assert sps[1].to_dict()["StandardPass"]["name"] == "RemoveRedundancies"
cx = Circuit(2)
cx.CX(0, 1)
cx.CX(1, 0)
assert number_of_CX(cx) == rp.get_metric()(cx)
# RepeatUntilSatisfiedPass
def no_CX(circ: Circuit) -> object:
return circ.n_gates_of_type(OpType.CX) == 0
rp = RepeatUntilSatisfiedPass(
SequencePass([CommuteThroughMultis(), RemoveRedundancies()]), no_CX
)
assert rp.to_dict()["pass_class"] == "RepeatUntilSatisfiedPass"
sps = rp.get_pass().get_sequence()
assert sps[0].to_dict()["StandardPass"]["name"] == "CommuteThroughMultis"
assert sps[1].to_dict()["StandardPass"]["name"] == "RemoveRedundancies"
assert rp.get_predicate().__repr__() == "UserDefinedPredicate"
assert (
rp.to_dict()["RepeatUntilSatisfiedPass"]["predicate"]["type"]
== "UserDefinedPredicate"
)
rp = RepeatUntilSatisfiedPass(
SequencePass([CommuteThroughMultis(), RemoveRedundancies()]), nccp
)
assert rp.to_dict()["pass_class"] == "RepeatUntilSatisfiedPass"
sps = rp.get_pass().get_sequence()
assert sps[0].to_dict()["StandardPass"]["name"] == "CommuteThroughMultis"
assert sps[1].to_dict()["StandardPass"]["name"] == "RemoveRedundancies"
assert rp.get_predicate().__repr__() == "NoClassicalControlPredicate"
assert (
rp.to_dict()["RepeatUntilSatisfiedPass"]["predicate"]["type"]
== "NoClassicalControlPredicate"
)
def test_apply_pass_with_callbacks() -> None:
class CallbackHandler:
def __init__(self) -> None:
self.pass_names: List[str] = []
def before_apply(self, cu: CompilationUnit, config: Dict[str, Any]) -> None:
if "StandardPass" in config:
self.pass_names.append(config["StandardPass"]["name"])
else:
self.pass_names.append(config["pass_class"])
def after_apply(self, cu: CompilationUnit, config: Dict[str, Any]) -> None:
return
def compile(circ: Circuit, handler: CallbackHandler) -> bool:
p = SequencePass([CommuteThroughMultis(), RemoveRedundancies()])
return p.apply(circ, handler.before_apply, handler.after_apply) # type: ignore
circ = Circuit(5)
circ.CX(0, 1)
circ.CX(2, 4)
circ.CX(0, 1)
handler = CallbackHandler()
compile(circ, handler)
assert circ.n_gates_of_type(OpType.CX) == 1
assert len(handler.pass_names) == 3
assert handler.pass_names[0] == "SequencePass"
assert handler.pass_names[1] == "CommuteThroughMultis"
assert handler.pass_names[2] == "RemoveRedundancies"
def test_remove_discarded() -> None:
c = Circuit(3, 2)
c.H(0).H(1).H(2).CX(0, 1).Measure(0, 0).Measure(1, 1).H(0).H(1)
c.qubit_discard(Qubit(0))
c.qubit_discard(Qubit(2))
assert not c.qubit_is_discarded(Qubit(1))
assert c.qubit_is_discarded(Qubit(2))
assert RemoveDiscarded().apply(c)
print(c.get_commands())
assert c.n_gates_of_type(OpType.H) == 3
assert c.n_gates_of_type(OpType.CX) == 1
assert c.n_gates_of_type(OpType.Measure) == 2
def test_simplify_measured() -> None:
c = Circuit(3, 3)
c.H(0).H(1).Z(2)
u = np.array(
[
[0, 0, 0, np.exp(0.1j)],
[np.exp(0.2j), 0, 0, 0],
[0, np.exp(0.3j), 0, 0],
[0, 0, np.exp(0.4j), 0],
],
dtype=complex,
)
ubox = Unitary2qBox(u)
c.add_unitary2qbox(ubox, 0, 1)
c.measure_all()
c.qubit_discard(Qubit(0))
c.qubit_discard(Qubit(1))
assert SimplifyMeasured().apply(c)
assert c.n_gates_of_type(OpType.H) == 2
assert c.n_gates_of_type(OpType.Z) == 1
assert c.n_gates_of_type(OpType.Unitary2qBox) == 0
assert c.n_gates_of_type(OpType.Measure) == 3
assert c.n_gates_of_type(OpType.ClassicalTransform) == 1
def test_simplify_initial_1() -> None:
c = Circuit(4)
c.H(0).X(1).CY(1, 2).CX(0, 1).CX(2, 3).H(1).H(2)
assert not c.qubit_is_created(Qubit(0))
c.qubit_create(Qubit(0))
assert c.qubit_is_created(Qubit(0))
c.qubit_create(Qubit(1))
c.qubit_create(Qubit(2))
assert SimplifyInitial().apply(c)
assert c.n_gates_of_type(OpType.CY) == 0
assert c.n_gates_of_type(OpType.CX) == 2
def test_simplify_initial_2() -> None:
c = Circuit(1, 1).Y(0).measure_all()
c.qubit_create_all()
c1 = c.copy()
assert SimplifyInitial(False).apply(c1)
assert c1.n_gates_of_type(OpType.Y) == 0
assert c1.n_gates_of_type(OpType.X) == 1
assert c1.n_gates_of_type(OpType.Measure) == 1
assert c1.n_gates_of_type(OpType.SetBits) == 0
c2 = c.copy()
xcirc = Circuit(1).Rx(1, 0)
assert SimplifyInitial(xcirc=xcirc).apply(c2)
assert c2.n_gates_of_type(OpType.Y) == 0
assert c2.n_gates_of_type(OpType.Rx) == 1
assert c2.n_gates_of_type(OpType.Measure) == 0
assert c2.n_gates_of_type(OpType.SetBits) == 1
def test_simplify_initial_3() -> None:
c = Circuit(2).X(0).CX(0, 1).CX(1, 0).X(1).CX(1, 0)
c0 = c.copy()
assert SimplifyInitial(create_all_qubits=True, remove_redundancies=False).apply(c0)
c0_cmds = c0.get_commands()
assert len(c0_cmds) > 0
assert all(cmd.op.type == OpType.X for cmd in c0_cmds)
c1 = c.copy()
assert SimplifyInitial(create_all_qubits=True, remove_redundancies=True).apply(c1)
c1_cmds = c1.get_commands()
assert len(c1_cmds) == 0
def test_pauli_squash() -> None:
c = Circuit(3)
c.add_pauliexpbox(PauliExpBox([Pauli.Z, Pauli.X, Pauli.Z], 0.8), [0, 1, 2])
c.add_pauliexpbox(PauliExpBox([Pauli.Y, Pauli.X, Pauli.X], 0.2), [0, 1, 2])
for strat in [
PauliSynthStrat.Individual,
PauliSynthStrat.Pairwise,
PauliSynthStrat.Sets,
]:
for cx_config in [CXConfigType.Snake, CXConfigType.Star, CXConfigType.Tree]:
c1 = c.copy()
assert PauliSquash().apply(c1)
assert c1.n_gates_of_type(OpType.CX) <= 4
def test_three_qubit_squash() -> None:
c = Circuit(3)
for i in range(21):
c.H(i % 3)
c.CX(i % 3, (i + 1) % 3)
c.measure_all()
assert ThreeQubitSquash().apply(c)
assert c.n_gates_of_type(OpType.CX) <= 18
if __name__ == "__main__":
test_predicate_generation()
test_compilation_unit_generation()
test_compilerpass_seq()
test_rebase_pass_generation()
test_routing_and_placement_pass()
test_default_mapping_pass()
test_SynthesiseTket_creation()
test_directed_cx_pass()
test_decompose_routing_gates_to_cxs()
test_user_defined_swap_decomp()
test_squash_chains()
test_library_pass_config()
test_generated_pass_config()
test_repeat_pass_config()
test_apply_pass_with_callbacks()
test_remove_barriers()
test_RebaseOQC_and_SynthesiseOQC()
|
{"hexsha": "5a70ae847e54040ee60dffb8b94f742360d24f0b", "size": 32046, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytket/tests/predicates_test.py", "max_stars_repo_name": "NewGitter2017/tket", "max_stars_repo_head_hexsha": "6ff81af26280770bf2ca80bfb2140e8fa98182aa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytket/tests/predicates_test.py", "max_issues_repo_name": "NewGitter2017/tket", "max_issues_repo_head_hexsha": "6ff81af26280770bf2ca80bfb2140e8fa98182aa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytket/tests/predicates_test.py", "max_forks_repo_name": "NewGitter2017/tket", "max_forks_repo_head_hexsha": "6ff81af26280770bf2ca80bfb2140e8fa98182aa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9691358025, "max_line_length": 102, "alphanum_fraction": 0.6446046308, "include": true, "reason": "import numpy", "num_tokens": 10196}
|
import os
import sys
import glob
import argparse
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.models import load_model
from tensorflow.python.keras.applications.resnet50 import preprocess_input
from tensorflow.python.keras.preprocessing import image
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
files = get_files('1.jpg')
cls_list = ['Cat', 'Dog']
net = load_model('model-resnet50-final.h5')
for f in files:
img = image.load_img(f,target_size=(224,224))
if img is None:
continue
x = image.img_to_array(img)
x = preprocess_input(x)
x = np.expand_dims(x, axis=0)
pred = net.predict(x)[0]
top_inds = pred.argsort()[::-1][:5]
for i in top_inds:
if pred[i] > 0.75:
print(cls_list[i])
html = "<html><body><h1>"
html += cls_list[i]+"</h1></body></html>"
return html
else:
return "I am Not sure"
def get_files(path):
if os.path.isdir(path):
files = glob.glob(os.path.join(path, '*'))
elif path.find('*') > 0:
files = glob.glob(path)
else:
files = [path]
files = [f for f in files if f.endswith('JPG') or f.endswith('jpg')]
if not len(files):
sys.exit('No images found by the given path!')
return files
if __name__ == '__main__':
app.run(debug = True)
|
{"hexsha": "9bfd1c9b1a4afaf855dcc4c0d4dc926d37979247", "size": 1360, "ext": "py", "lang": "Python", "max_stars_repo_path": "DCrec.py", "max_stars_repo_name": "aditya210/Image-Classification-Cats-vs-Dogs", "max_stars_repo_head_hexsha": "8c96c405180ad8132243a4f00262004d4cbbb272", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DCrec.py", "max_issues_repo_name": "aditya210/Image-Classification-Cats-vs-Dogs", "max_issues_repo_head_hexsha": "8c96c405180ad8132243a4f00262004d4cbbb272", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DCrec.py", "max_forks_repo_name": "aditya210/Image-Classification-Cats-vs-Dogs", "max_forks_repo_head_hexsha": "8c96c405180ad8132243a4f00262004d4cbbb272", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7272727273, "max_line_length": 74, "alphanum_fraction": 0.6588235294, "include": true, "reason": "import numpy", "num_tokens": 360}
|
# 2D FWI gradient test with 4 sources
# The receiver positions and the source wavelets are the same for each of the four experiments.
# Author: Philipp Witte, pwitte@eos.ubc.ca
# Date: January 2017
#
# Mathias Louboutin, mlouboutin3@gatech.edu
# Updated July 2020
parsed_args = parse_commandline()
nlayer = parsed_args["nlayer"]
tti = parsed_args["tti"]
fs = parsed_args["fs"]
### Model
model, model0, dm = setup_model(parsed_args["tti"], 4)
q, srcGeometry, recGeometry, info = setup_geom(model)
dt = srcGeometry.dt[1]
###################################################################################################
@testset "TWRI gradient test w.r.t m with $(nlayer) layers and tti $(tti) and freesurface $(fs)" begin
optw = TWRIOptions(grad_corr=false, comp_alpha=false, weight_fun=nothing, eps=0, params=:m)
# Gradient test
h = 5f-2
maxiter = 5
err1 = zeros(maxiter)
err2 = zeros(maxiter)
h_all = zeros(maxiter)
modelH = deepcopy(model0)
# Observed data
opt = Options(sum_padding=true, free_surface=parsed_args["fs"])
F = judiModeling(info, model, srcGeometry, recGeometry; options=opt)
F0 = judiModeling(info, model0, srcGeometry, recGeometry; options=opt)
d = F*q
d0 = F0*q
y = 2.5f0*(d0 - d)
# FWI gradient and function value for m0
Jm0, gradm = twri_objective(model0, q, d, y; options=opt, optionswri=optw)
dJ = dot(gradm, dm)
@printf("Perturbation size is %2.2e and reference objective function is %2.4e \n", dJ, Jm0)
for j=1:maxiter
# FWI gradient and function falue for m0 + h*dm
modelH.m = model0.m + h*dm
Jm, _ = twri_objective(modelH, q, d, y; options=opt, optionswri=optw)
# Check convergence
err1[j] = abs(Jm - Jm0)
err2[j] = abs(Jm - Jm0 - h*dJ)
j == 1 ? prev = 1 : prev = j - 1
@printf("h = %2.2e, phi = %2.4e, e1 = %2.2e, rate = %2.2e", h, Jm, err1[j], err1[prev]/err1[j])
@printf(", e2 = %2.2e, rate = %2.2e \n", err2[j], err2[prev]/err2[j])
h_all[j] = h
h = h * .8f0
end
# CHeck convergence rates
rate_1 = sum(err1[1:end-1]./err1[2:end])/(maxiter - 1)
rate_2 = sum(err2[1:end-1]./err2[2:end])/(maxiter - 1)
@test isapprox(rate_1, 1.25f0; rtol=5f-2)
@test isapprox(rate_2, 1.5625f0; rtol=5f-2)
end
@testset "TWRI gradient test w.r.t y with $(nlayer) layers and tti $(tti) and freesurface $(fs)" begin
optw = TWRIOptions(grad_corr=false, comp_alpha=false, weight_fun=nothing, eps=0, params=:y)
# Gradient test
h = 5f-2
maxiter = 5
err1 = zeros(maxiter)
err2 = zeros(maxiter)
h_all = zeros(maxiter)
modelH = deepcopy(model0)
# Observed data
opt = Options(sum_padding=true, free_surface=parsed_args["fs"])
F = judiModeling(info, model, srcGeometry, recGeometry; options=opt)
F0 = judiModeling(info, model0, srcGeometry, recGeometry; options=opt)
d = F*q
d0 = F0*q
y = .1f0 * (d0 - d)
dy = .05f0*(d0 - d)
# FWI gradient and function value for m0
Jm0, grady = twri_objective(model0, q, d, y; options=opt, optionswri=optw)
dJ = dot(grady, dy)
@printf("Perturbation size is %2.4e and reference objective function is %2.4e \n", dJ, Jm0)
for j=1:maxiter
# FWI gradient and function falue for m0 + h*dm
yloc = y + h * dy
Jm, _ = twri_objective(model0, q, d, yloc;options=opt, optionswri=optw)
# Check convergence
err1[j] = abs(Jm - Jm0)
err2[j] = abs(Jm - Jm0 - h*dJ)
j == 1 ? prev = 1 : prev = j - 1
@printf("h = %2.2e, phi = %2.4e, e1 = %2.2e, rate = %2.2e", h, Jm, err1[j], err1[prev]/err1[j])
@printf(", e2 = %2.2e, rate = %2.2e \n", err2[j], err2[prev]/err2[j])
h_all[j] = h
h = h * .8f0
end
# CHeck convergence rates
rate_1 = sum(err1[1:end-1]./err1[2:end])/(maxiter - 1)
rate_2 = sum(err2[1:end-1]./err2[2:end])/(maxiter - 1)
@test isapprox(rate_1, 1.25f0; rtol=5f-2)
@test isapprox(rate_2, 1.5625f0; rtol=5f-2)
end
|
{"hexsha": "26958100316a11b0e16f4392e47dd28a3f6e5246", "size": 3751, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_gradient_twri.jl", "max_stars_repo_name": "nogueirapeterson/JUDI", "max_stars_repo_head_hexsha": "cc76e950929f0b7a3cf29c2dff71e432e8ea26f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 71, "max_stars_repo_stars_event_min_datetime": "2018-01-13T00:20:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T02:55:25.000Z", "max_issues_repo_path": "test/test_gradient_twri.jl", "max_issues_repo_name": "nogueirapeterson/JUDI", "max_issues_repo_head_hexsha": "cc76e950929f0b7a3cf29c2dff71e432e8ea26f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2018-02-08T18:01:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:44:37.000Z", "max_forks_repo_path": "test/test_gradient_twri.jl", "max_forks_repo_name": "nogueirapeterson/JUDI", "max_forks_repo_head_hexsha": "cc76e950929f0b7a3cf29c2dff71e432e8ea26f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2018-02-08T11:07:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T16:35:25.000Z", "avg_line_length": 33.4910714286, "max_line_length": 102, "alphanum_fraction": 0.6475606505, "num_tokens": 1431}
|
# Zero Momentum
function lam4piflow(k, m2, T, Npi, lamda4pi)
return lamda4pi^2 * (8 + Npi) * (dkF1TildeAll(k, m2, T)
+ dkF2TildeAll(k, m2, T)
)
end
function dkVintqs_Zero(q0, k, kprim, m, T, lam4pik, Npi; kwargs...)
return lam4pik^2 *(2 + Npi) *(
k^3 *
(2 + Npi) *
(dkF1TildeAll(kprim, m, T)
+ dkF2TildeAll(kprim, m, T)
) / 3 +
6 * dkF1TildeintqsAll(q0, k, kprim, m, T; kwargs...)
+
6 * dkF2TildeintqsAll(q0, k, kprim, m, T; kwargs...)
)
end
function Vintqs_Zero(k, T, Npi, hfun, UVScale; kwargs...)
-2 * hquadrature(
kprim -> dkVintqs_Zero(
Epi(k, -hfun(k)[2]),
k,
kprim,
-hfun(kprim)[2],
T,
hfun(kprim)[1],
Npi;
kwargs...,
),
k,
UVScale;
kwargs...,
# initdiv=200,
)[1] + (2 * k^3 * hfun(UVScale)[1] * (2 + Npi)) / 3
end
function Vintqs_LPA(k, Npi, lamdapik)
(2 * k^3 * lamdapik * (2 + Npi)) / 3
end
function propReZeroflow(k, T, Npi, hfun, UVScale; kwargs...)
2 *
Coeffgamm2Simple(k, T, -hfun(k)[2]) *
Vintqs_Zero(k, T, Npi, hfun, UVScale; kwargs...)
end
function propReLPAflow(k, T, Npi, hfun)
2 * Coeffgamm2Simple(k, T, -hfun(k)[2]) * Vintqs_LPA(k, Npi, hfun(k)[1])
end
function dkV4piResimple(p0, ps, q0, qs, costhe, k, m, T, lam4pik, Npi)
return lam4pik^2 *
(2 + Npi) *
(
2 * (2 + Npi) * (dkF1TildeAll(k, m, T) + dkF2TildeAll(k, m, T)) +
6 * dkF1TildeAll(
p0 - q0,
max(0.1, sqrt(ps^2 + 2 * costhe * ps * qs + qs^2)),
k,
m,
T,
) +
6 * dkF1TildeAll(
p0 + q0,
max(0.1, sqrt(ps^2 + 2 * costhe * ps * qs + qs^2)),
k,
m,
T,
) +
6 * (
dkF2TildeAll(
p0 - q0,
max(0.1, sqrt(ps^2 + 2 * costhe * ps * qs + qs^2)),
k,
m,
T,
) + dkF2TildeAll(
p0 + q0,
max(0.1, sqrt(ps^2 + 2 * costhe * ps * qs + qs^2)),
k,
m,
T,
)
)
)
end
@doc raw"""
dkVReintqs(p0, ps, q0, qsmax, k, m, T, Npi, lam4pik)
compute $\int_0^{qsmax}dq_s qs^2\int_{-1}^{1}d\cos\theta \tilde{\partial_k}\mathrm{Re}V(q_0)$.
`dkVImintqs` only contains $V(q_0)$, for $-q_0$, we have $\int d\cos\theta V(q_0)=\int d\cos\theta V(-q_0)$,
so we need an extra $2$ at somewhere.
`dkVReintqs` contains type-1 and type-2 delta function
# Arguments
- `qsmax`: we integrate $q_s$ from $0$ to $k$, `qsmax` will set to `k` when we do the integration $dk'$, it should be distinguished from $k'$
- `m`: mass square, it will be $m(k')$ when we do the integration $dk'$.
- `lam4pik`: $\lambda_{4\pi}$, it will be $\lambda_{4\pi}(k')$ when we do the integration $dk'$ .
"""
function dkVReintqs(p0, ps, q0, qsmax, k, m, T, Npi, lam4pik; kwarg...)
return lam4pik^2 * (2 + Npi) * (3 * (
dkF1TildeintqsAll(p0 - q0, ps, qsmax, k, m, T; kwarg...) +
dkF1TildeintqsAll(p0 + q0, ps, qsmax, k, m, T; kwarg...)
+
dkF2TildeintqsAll(p0 - q0, ps, qsmax, k, m, T; kwarg...) +
dkF2TildeintqsAll(p0 + q0, ps, qsmax, k, m, T; kwarg...)
) +
(Npi + 2) * 2 / 3 *
qsmax^3 *
(dkF1TildeAll(k, m, T)
+ dkF2TildeAll(k, m, T)
)
)
end
function dkVReintqs_Compensate(p0, ps, q0, qsmax, k, m, T, Npi, lam4pik)
return lam4pik^2 *
(2 + Npi) *
(
3 * (
dkF1TildeintqsAll_Compensate(p0 - q0, ps, qsmax, k, m, T) +
dkF1TildeintqsAll_Compensate(p0 + q0, ps, qsmax, k, m, T)
)
)
end
function dkVReintqs_delta1(p0, ps, q0, qsmax, k, m, T, Npi, lam4pik)
return lam4pik^2 *
(2 + Npi) *
(
3 * (
dkF1TildeintqsAll_delta1(p0 - q0, ps, qsmax, k, m, T) +
dkF1TildeintqsAll_delta1(p0 + q0, ps, qsmax, k, m, T)
)
)
end
function dkVReintqs_delta2(p0, ps, q0, qsmax, k, m, T, Npi, lam4pik)
return lam4pik^2 *
(2 + Npi) *
(
3 * (
dkF1TildeintqsAll_delta2(p0 - q0, ps, qsmax, k, m, T) +
dkF1TildeintqsAll_delta2(p0 + q0, ps, qsmax, k, m, T)
)
)
end
@doc raw"""
VReintqs(p0, ps, k, T, Npi,IRScale,UVScale, mfun, lamfun)
compute $\int_0^{k}dq_s qs^2\int_{-1}^{1}d\cos\theta \mathrm{Re}V(q_0,k)$.
In our code, we perform integration over `kprim`, `q0` & `qs` does not involved,
so `qs=k`, `q0=Epi(k, mfun(k))`.
`VReintqs` contains type-1 and type-2 delta function.
# Arguments
- `mfun::Function`: $m^2(k)$, input from zero momentum result
- `lampifun::Function`: $\lambda_{4\pi}(k)$, input from zero momentum result.
"""
function VReintqs(p0, ps, k, T, Npi, IRScale, UVScale, mfun, lamfun; kwarg...)
-hquadrature(
kprim -> dkVReintqs(
p0,
ps,
Epi(k, mfun(k)),
k,
kprim,
mfun(kprim),
T,
Npi,
lamfun(kprim);
kwarg...,
),
k,
UVScale;
kwarg...,
)[1] + 2 * (k^3 * lamfun(UVScale) * (2 + Npi)) / 3
end
function propReintqs(p0, ps, T, IRScale, UVScale, Npi, mfun, lamfun; kwarg...)
-hquadrature(
k ->
2 *
VReintqs(
p0,
ps,
k,
T,
Npi,
IRScale,
UVScale,
mfun,
lamfun;
kwarg...,
) *
Coeffgamm2(k, T, mfun),
IRScale,
UVScale,
rtol=1e-8,
atol=1e-8,
)[1] + p0^2 - ps^2 - mfun(UVScale)
end
function fastpropReintqs(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
return -hcubature(
x ->
(x[1] - UVScale) *
2 *
dkVReintqs(
p0,
ps,
Epi(x[1], mfun(x[1])),
x[1],
x[2] * (x[1] - UVScale) + UVScale,
mfun(x[2] * (x[1] - UVScale) + UVScale),
T,
Npi,
lamfun(x[2] * (x[1] - UVScale) + UVScale);
kwarg...,
) *
Coeffgamm2(x[1], T, mfun) +
2 *
Coeffgamm2(x[1], T, mfun) *
(2 * x[1]^3 * lamfun(UVScale) * (2 + Npi)) / 3,
[IRScale, 0.0],
[UVScale, 1.0];
kwarg...,
)[1] + p0^2 - ps^2 - mfun(UVScale)
end
function fastpropReintqs_Compensate(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
return -hcubature(
x ->
(x[1] - UVScale) *
2 *
dkVReintqs_Compensate(
p0,
ps,
Epi(x[1], mfun(x[1])),
x[1],
x[2] * (x[1] - UVScale) + UVScale,
mfun(x[2] * (x[1] - UVScale) + UVScale),
T,
Npi,
lamfun(x[2] * (x[1] - UVScale) + UVScale),
) *
Coeffgamm2(x[1], T, mfun),
[IRScale, 0.0],
[UVScale, 1.0];
kwarg...,
)[1]
end
function fastpropReintqs_delta1(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
return -hcubature(
x ->
(x[1] - UVScale) *
2 *
dkVReintqs_delta1(
p0,
ps,
Epi(x[1], mfun(x[1])),
x[1],
x[2] * (x[1] - UVScale) + UVScale,
mfun(x[2] * (x[1] - UVScale) + UVScale),
T,
Npi,
lamfun(x[2] * (x[1] - UVScale) + UVScale),
) *
Coeffgamm2(x[1], T, mfun),
[IRScale, 0.0],
[UVScale, 1.0];
kwarg...,
)[1]
end
function fastpropReintqs_delta2(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
return -hcubature(
x ->
(x[1] - UVScale) *
2 *
dkVReintqs_delta2(
p0,
ps,
Epi(x[1], mfun(x[1])),
x[1],
x[2] * (x[1] - UVScale) + UVScale,
mfun(x[2] * (x[1] - UVScale) + UVScale),
T,
Npi,
lamfun(x[2] * (x[1] - UVScale) + UVScale),
) *
Coeffgamm2(x[1], T, mfun),
[IRScale, 0.0],
[UVScale, 1.0];
kwarg...,
)[1]
end
function fastpropReintqs_All(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
return fastpropReintqs(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
) +
fastpropReintqs_Compensate(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
) +
fastpropReintqs_delta1(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
) +
fastpropReintqs_delta2(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
end
function fastpropReintqs2(
p0,
ps,
T,
IRScale,
UVScale,
Npi,
mfun,
lamfun;
kwarg...,
)
return -hcubature(
x ->
(x[1] - UVScale) *
2 *
dkVReintqs(
p0,
ps,
Epi(x[1], mfun(x[1])),
x[1],
x[2] * (x[1] - UVScale) + UVScale,
mfun(x[2] * (x[1] - UVScale) + UVScale),
T,
Npi,
lamfun(x[2] * (x[1] - UVScale) + UVScale);
kwarg...,
) *
Coeffgamm2(x[1], T, mfun),
[IRScale, 0.0],
[UVScale, 1.0];
kwarg...,
)[1] - hquadrature(
k ->
2 *
Coeffgamm2(k, T, mfun) *
(2 * (k^3 * lamfun(UVScale) * (2 + Npi)) / 3),
IRScale,
UVScale;
kwarg...,
)[1] + p0^2 - ps^2 - mfun(UVScale)
end
|
{"hexsha": "1b33ed00d69d85f6ad9a460c65cf947d7a1e7a6a", "size": 11640, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/vertexRe.jl", "max_stars_repo_name": "Yangyang-Tan/FRGRealTime.jl", "max_stars_repo_head_hexsha": "6581b783432a5d5d08d00c887b483f9596d12fe3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-11T06:52:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T13:04:20.000Z", "max_issues_repo_path": "src/vertexRe.jl", "max_issues_repo_name": "Yangyang-Tan/FRGRealTime.jl", "max_issues_repo_head_hexsha": "6581b783432a5d5d08d00c887b483f9596d12fe3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vertexRe.jl", "max_forks_repo_name": "Yangyang-Tan/FRGRealTime.jl", "max_forks_repo_head_hexsha": "6581b783432a5d5d08d00c887b483f9596d12fe3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9785407725, "max_line_length": 142, "alphanum_fraction": 0.3800687285, "num_tokens": 3713}
|
# Comment from 9/14/2019 --> Wow. This is the first neural network program for classifying images.
# I was pretty clueless, but I won't touch the original comments just for history!
# My experience with the TensorFlow tutorial on "basic" classification. (i found this rly hard)
# Website: https://www.tensorflow.org/tutorials/keras/basic_classification
# Github: https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb
# I realized I spelled a bunch of words wrong
# Importing Libraries
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib
# fixes a matlab bug
matplotlib.use('TkAgg')
from time import sleep
import matplotlib.pyplot as plt
# getting TensorFlow version and fetching dataset
print("TensorFlow version: " + tf .__version__ + "\n--------")
print("loading data...\n")
fashionDataSet = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashionDataSet.load_data()
# class names (y)
classNames = ['t-shirt', 'pants', 'hoodie', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
# Getting dataset details
print('(Number of images, pixel width, pixel height)')
print(train_images.shape)
print("Training Labels: " + str(len(train_labels)))
print()
print("(Number of images, pixel width, pixel height)")
print(test_images.shape)
print("Testing Labels: " + str(len(test_labels)))
print()
# asking user what to test
labrat = int(input("Test number: "))
# what it says below:
print("Seting up the first visual figure")
plt.figure()
plt.imshow(test_images[labrat])
plt.colorbar()
plt.grid(False)
# Making the classes in a visual format
print("Seting up the second visual figure")
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(classNames[train_labels[i]])
print("Would you like to see the visuals?")
answer = input("y/n: ")
if (answer == "y"):
plt.show()
# convert pixels color to values between 1-0 (orginial 0-225)
train_images = train_images / 255.00
test_images = test_images / 255.00
print("Preparing pixel data for activation function")
print("Creating layers in Nuerel Network")
nuerelNetModel = keras.Sequential([
# layer to convert images from 2D to 1D (not trainable)
keras.layers.Flatten(input_shape=(28,28)),
# Dense means that they are fully connected and trainable (the og layer)
# units means the amount of nodes in the layer (circle things)
# activation is which activation (IDK what it is but relu is the best) Sigmoid, Tanh are also nice
keras.layers.Dense(128, activation=tf.nn.relu),
# this is the output layer where it creates an array of probabilites of the 10 items.
keras.layers.Dense(10, activation=tf.nn.softmax)
])
# compiling the nuerel net
nuerelNetModel.compile(
# optimizer depends on the dataset/problem and adam is faster, but others work, but slower
optimizer=tf.train.AdamOptimizer(),
# loss function (sparse is used cuz the classes are not represented as vectors, but integers)
loss='sparse_categorical_crossentropy',
# there are types of units of measuring the preciseness of the model and accuracy is good enough.
metrics=['accuracy']
)
# asking user for epochs(you'll see later)
print("Done.")
epochs = input("Enter amount of epochs: ")
input("Press [Enter] to start training")
# making the presentation of the data nicer ;)
print("\n================================")
print("[TRAINING OUTPUT]")
print("================================\n")
# TRAINING TIME! so excited.
# ML peeps like to refer real life phyisical excercise terms to ML
# loging the results (they are formatted in arrays) into a variable
# (x axis data, y axis data, amount of time to use whole data set)
epochsResult = nuerelNetModel.fit(train_images, train_labels, epochs=int(epochs))
# Let's grade the work!
# assigning these variables to the success rate of the nuerel net
print("\n================================")
print("[TESTING OUTPUT]")
print("================================\n")
testLoss, testAccuracy = nuerelNetModel.evaluate(test_images, test_labels)
print("Accuracy during testing: ", testAccuracy)
# get the previous history of the accuracy in each epoch and get the mean average
trainAccuracy = np.mean(epochsResult.history["acc"])
print("Accuracy during training: ", trainAccuracy)
# rip the testing accuracy is less than the training accuracy
# that means that we overfit our nuerel network. This can be fixed by using different
# functins and stuff but my knowledge is limited so maybe ill tackle this next time when i get more IQ
# LETS PREDICT!
print("\n================================")
print("[PREDICTION OUTPUT]")
print("================================\n")
# our model can now predict images! Lets use out testing images
predictions = nuerelNetModel.predict(test_images)
# we can get the output layer when we input a image (this case its the second test image)
print(predictions[labrat], "\n")
# we need the highest value becuase it's most likeley to be that
typeOfClothing = (np.argmax(predictions[labrat]))
# converts the integer into Enlish
print("The Nuerel Net says its a form of " + classNames[typeOfClothing] + ".")
print("It's a " + classNames[test_labels[labrat]])
|
{"hexsha": "1141078304712ea161305aa255fffbd3c794e58f", "size": 5360, "ext": "py", "lang": "Python", "max_stars_repo_path": "Neural Networks/Tensorflow/FasionMNIST/fasionMNST_classifier.py", "max_stars_repo_name": "B0kCh01/Machine-Learning-COdes", "max_stars_repo_head_hexsha": "8fc442cd3a139ecd6e6d0a3f352e7bfc8470365e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-12T01:49:11.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-12T01:49:11.000Z", "max_issues_repo_path": "Neural Networks/Tensorflow/FasionMNIST/fasionMNST_classifier.py", "max_issues_repo_name": "B0kCh01/Machine-Learning-COdes", "max_issues_repo_head_hexsha": "8fc442cd3a139ecd6e6d0a3f352e7bfc8470365e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Neural Networks/Tensorflow/FasionMNIST/fasionMNST_classifier.py", "max_forks_repo_name": "B0kCh01/Machine-Learning-COdes", "max_forks_repo_head_hexsha": "8fc442cd3a139ecd6e6d0a3f352e7bfc8470365e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4825174825, "max_line_length": 107, "alphanum_fraction": 0.7177238806, "include": true, "reason": "import numpy", "num_tokens": 1276}
|
/*
* Copyright (c) 2010-2012 Steffen Kieß
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef HDF5_EXCEPTION_HPP_INCLUDED
#define HDF5_EXCEPTION_HPP_INCLUDED
// Error handling for HDF5
#include <HDF5/Forward.hpp>
#include <Core/Util.hpp>
#include <Core/Assert.hpp>
#include <Core/Exception.hpp>
#include <hdf5.h>
#include <limits>
#include <boost/utility/enable_if.hpp>
namespace HDF5 {
class Exception : public Core::Exception {
std::string methodName_;
public:
Exception (const std::string& methodName);
virtual ~Exception () throw ();
std::string message () const override;
const std::string& methodName () const {
return methodName_;
}
static NORETURN error (const char* methodName);
#define C(T) \
static T check (const char* methodName, T value) { \
if (value < 0) \
error (methodName); \
return value; \
}
C(H5I_type_t) C(H5S_class_t) C(H5T_class_t) C(H5T_sign_t)
#undef C
template <typename T>
static T check (typename boost::enable_if_c<std::numeric_limits<T>::is_integer && std::numeric_limits<T>::is_signed, const char*>::type methodName, T value) {
if (value < 0)
error (methodName);
return value;
}
template <typename T>
static T* check (const char* methodName, T* value) {
if (!value)
error (methodName);
return value;
}
};
}
#endif // !HDF5_EXCEPTION_HPP_INCLUDED
|
{"hexsha": "438729c666d23450c74fb23ee47650ffc1b466ce", "size": 2611, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/PluginHDF5/HDF5/Exception.hpp", "max_stars_repo_name": "voxie-viewer/voxie", "max_stars_repo_head_hexsha": "d2b5e6760519782e9ef2e51f5322a3baa0cb1198", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2016-06-03T18:41:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-17T20:28:58.000Z", "max_issues_repo_path": "src/PluginHDF5/HDF5/Exception.hpp", "max_issues_repo_name": "voxie-viewer/voxie", "max_issues_repo_head_hexsha": "d2b5e6760519782e9ef2e51f5322a3baa0cb1198", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PluginHDF5/HDF5/Exception.hpp", "max_forks_repo_name": "voxie-viewer/voxie", "max_forks_repo_head_hexsha": "d2b5e6760519782e9ef2e51f5322a3baa0cb1198", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6375, "max_line_length": 162, "alphanum_fraction": 0.6660283416, "num_tokens": 582}
|
# Joint TV for multi-contrast MR
This demonstration shows how to do a synergistic reconstruction of two MR images with different contrast. Both MR images show the same underlying anatomy but of course with different contrast. In order to make use of this similarity a joint total variation (TV) operator is used as a regularisation in an iterative image reconstruction approach.
This demo is a jupyter notebook, i.e. intended to be run step by step.
You could export it as a Python file and run it one go, but that might
make little sense as the figures are not labelled.
Author: Christoph Kolbitsch, Evangelos Papoutsellis, Edoardo Pasca
First version: 16th of June 2021
CCP PETMR Synergistic Image Reconstruction Framework (SIRF).
Copyright 2021 Rutherford Appleton Laboratory STFC.
Copyright 2021 Physikalisch-Technische Bundesanstalt.
This is software developed for the Collaborative Computational
Project in Positron Emission Tomography and Magnetic Resonance imaging
(http://www.ccppetmr.ac.uk/).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# Initial set-up
```python
# Make sure figures appears inline and animations works
%matplotlib notebook
```
```python
# Make sure everything is installed that we need
!pip install brainweb nibabel --user
```
```python
# Initial imports etc
import numpy
from numpy.linalg import norm
import matplotlib.pyplot as plt
import random
import os
import sys
import shutil
import brainweb
from tqdm.auto import tqdm
# Import SIRF functionality
import notebook_setup
import sirf.Gadgetron as mr
from sirf_exercises import exercises_data_path
# Import CIL functionality
from cil.framework import AcquisitionGeometry, BlockDataContainer, BlockGeometry, ImageGeometry
from cil.optimisation.functions import Function, OperatorCompositionFunction, SmoothMixedL21Norm, L1Norm, L2NormSquared, BlockFunction, MixedL21Norm, IndicatorBox, TotalVariation, LeastSquares, ZeroFunction
from cil.optimisation.operators import GradientOperator, BlockOperator, ZeroOperator, CompositionOperator, LinearOperator, FiniteDifferenceOperator
from cil.optimisation.algorithms import PDHG, FISTA, GD
from cil.plugins.ccpi_regularisation.functions import FGP_TV
```
# Utilities
```python
# First define some handy function definitions
# To make subsequent code cleaner, we have a few functions here. You can ignore
# ignore them when you first see this demo.
def plot_2d_image(idx,vol,title,clims=None,cmap="viridis"):
"""Customized version of subplot to plot 2D image"""
plt.subplot(*idx)
plt.imshow(vol,cmap=cmap)
if not clims is None:
plt.clim(clims)
plt.colorbar()
plt.title(title)
plt.axis("off")
def crop_and_fill(templ_im, vol):
"""Crop volumetric image data and replace image content in template image object"""
# Get size of template image and crop
idim_orig = templ_im.as_array().shape
idim = (1,)*(3-len(idim_orig)) + idim_orig
offset = (numpy.array(vol.shape) - numpy.array(idim)) // 2
vol = vol[offset[0]:offset[0]+idim[0], offset[1]:offset[1]+idim[1], offset[2]:offset[2]+idim[2]]
# Make a copy of the template to ensure we do not overwrite it
templ_im_out = templ_im.copy()
# Fill image content
templ_im_out.fill(numpy.reshape(vol, idim_orig))
return(templ_im_out)
# This functions creates a regular (pattern='regular') or random (pattern='random') undersampled k-space data
# with an undersampling factor us_factor and num_ctr_lines fully sampled k-space lines in the k-space centre.
# For more information on this function please see the notebook f_create_undersampled_kspace
def create_undersampled_kspace(acq_orig, us_factor, num_ctr_lines, pattern='regular'):
"""Create a regular (pattern='regular') or random (pattern='random') undersampled k-space data"""
# Get ky indices
ky_index = acq_orig.parameter_info('kspace_encode_step_1')
# K-space centre in the middle of ky_index
ky0_index = len(ky_index)//2
# Fully sampled k-space centre
ky_index_subset = numpy.arange(ky0_index-num_ctr_lines//2, ky0_index+num_ctr_lines//2)
if pattern == 'regular':
ky_index_outside = numpy.arange(start=0, stop=len(ky_index), step=us_factor)
elif pattern == 'random':
ky_index_outside = numpy.asarray(random.sample(list(ky_index), len(ky_index)//us_factor))
else:
raise ValueError('pattern should be "random" or "linear"')
# Combine fully sampled centre and outer undersampled region
ky_index_subset = numpy.concatenate((ky_index_subset, ky_index_outside), axis=0)
# Ensure k-space points are note repeated
ky_index_subset = numpy.unique(ky_index_subset)
# Create new k-space data
acq_new = preprocessed_data.new_acquisition_data(empty=True)
# Select raw data
for jnd in range(len(ky_index_subset)):
cacq = preprocessed_data.acquisition(ky_index_subset[jnd])
acq_new.append_acquisition(cacq)
acq_new.sort()
return(acq_new)
```
### Joint TV reconstruction of two MR images
Assume we want to reconstruct two MR images $u$ and $v$ and utilse the similarity between both images using a joint TV ($JTV$) operator we can formulate the reconstruction problem as:
$$
\begin{equation}
(u^{*}, v^{*}) \in \underset{u,v}{\operatorname{argmin}} \frac{1}{2} \| A_{1} u - g\|^{2}_{2} + \frac{1}{2} \| A_{2} v - h\|^{2}_{2} + \alpha\,\mathrm{JTV}_{\eta, \lambda}(u, v)
\end{equation}
$$
* $JTV_{\eta, \lambda}(u, v) = \sum \sqrt{ \lambda|\nabla u|^{2} + (1-\lambda)|\nabla v|^{2} + \eta^{2}}$
* $A_{1}$, $A_{2}$: __MR__ `AcquisitionModel`
* $g_{1}$, $g_{2}$: __MR__ `AcquisitionData`
### Solving this problem
In order to solve the above minimization problem, we will use an alternating minimisation approach, where one variable is fixed and we solve wrt to the other variable:
$$
\begin{align*}
u^{k+1} & = \underset{u}{\operatorname{argmin}} \frac{1}{2} \| A_{1} u - g\|^{2}_{2} + \alpha_{1}\,\mathrm{JTV}_{\eta, \lambda}(u, v^{k}) \quad \text{subproblem 1}\\
v^{k+1} & = \underset{v}{\operatorname{argmin}} \frac{1}{2} \| A_{2} v - h\|^{2}_{2} + \alpha_{2}\,\mathrm{JTV}_{\eta, 1-\lambda}(u^{k+1}, v) \quad \text{subproblem 2}\\
\end{align*}$$
We are going to use a gradient descent approach to solve each of these subproblems alternatingly.
The *regularisation parameter* `alpha` should be different for each subproblem. But not to worry at this stage. Maybe we should use $\alpha_{1}, \alpha_{2}$ in front of the two JTVs and a $\lambda$, $1-\lambda$ for the first JTV and $1-\lambda$, $\lambda$, for the second JTV with $0<\lambda<1$.
This notebook builds on several other notebooks and hence certain steps will be carried out with minimal documentation. If you want more explainations, then we would like to ask you to refer to the corresponding notebooks which are mentioned in the following list. The steps we are going to carry out are
- (A) Get a T1 and T2 map from brainweb which we are going to use as ground truth $u_{gt}$ and $v_{gt}$ for our reconstruction (further information: `introduction` notebook)
- (B) Create __MR__ `AcquisitionModel` $A_{1}$ and $A_{2}$ and simulate undersampled __MR__ `AcquisitionData` $g_{1}$ and $g_{2}$ (further information: `acquisition_model_mr_pet_ct` notebook)
- (C) Set up the joint TV reconstruction problem
- (D) Solve the joint TV reconstruction problem (further information on gradient descent: `gradient_descent_mr_pet_ct` notebook)
# (A) Get brainweb data
We will download and use data from the brainweb.
```python
fname, url= sorted(brainweb.utils.LINKS.items())[0]
files = brainweb.get_file(fname, url, ".")
data = brainweb.load_file(fname)
brainweb.seed(1337)
```
```python
for f in tqdm([fname], desc="mMR ground truths", unit="subject"):
vol = brainweb.get_mmr_fromfile(f, petNoise=1, t1Noise=0.75, t2Noise=0.75, petSigma=1, t1Sigma=1, t2Sigma=1)
```
```python
T2_arr = vol['T2']
T1_arr = vol['T1']
# Normalise image data
T2_arr /= numpy.max(T2_arr)
T1_arr /= numpy.max(T1_arr)
```
```python
# Display it
plt.figure();
slice_show = T1_arr.shape[0]//2
plot_2d_image([1,2,1], T1_arr[slice_show, :, :], 'T1', cmap="Greys_r")
plot_2d_image([1,2,2], T2_arr[slice_show, :, :], 'T2', cmap="Greys_r")
```
Ok, we got to two images with T1 and T2 contrast BUT they brain looks a bit small. Spoiler alert: We are going to reconstruct MR images with a FOV 256 x 256 voxels. As the above image covers 344 x 344 voxels, the brain would only cover a small part of our MR FOV. In order to ensure the brain fits well into our MR FOV, we are going to scale the images.
In order to do this we are going to use an image `rescale` from the skimage package and simply rescale the image by a factor 2 and then crop it. To speed things up, we are going to already select a single slice because also our MR scan is going to be 2D.
```python
from skimage.transform import rescale
# Select central slice
central_slice = T1_arr.shape[0]//2
T1_arr = T1_arr[central_slice, :, :]
T2_arr = T2_arr[central_slice, :, :]
# Rescale by a factor 2.0
T1_arr = rescale(T1_arr, 2,0)
T2_arr = rescale(T2_arr, 2.0)
# Select a central ROI with 256 x 256
# We could also skip this because it is automaticall done by crop_and_fill()
# but we would like to check if we did the right thing
idim = [256, 256]
offset = (numpy.array(T1_arr.shape) - numpy.array(idim)) // 2
T1_arr = T1_arr[offset[0]:offset[0]+idim[0], offset[1]:offset[1]+idim[1]]
T2_arr = T2_arr[offset[0]:offset[0]+idim[0], offset[1]:offset[1]+idim[1]]
# Now we make sure our image is of shape (1, 256, 256) again because in __SIRF__ even 2D images
# are expected to have 3 dimensions.
T1_arr = T1_arr[numpy.newaxis,...]
T2_arr = T2_arr[numpy.newaxis,...]
```
```python
# Display it
plt.figure();
slice_show = T1_arr.shape[0]//2
plot_2d_image([1,2,1], T1_arr[slice_show, :, :], 'T1', cmap="Greys_r")
plot_2d_image([1,2,2], T2_arr[slice_show, :, :], 'T2', cmap="Greys_r")
```
Now, that looks better. Now we have got images we can use for our MR simulation.
# (B) Simulate undersampled MR AcquisitionData
```python
# Create MR AcquisitionData
mr_acq = mr.AcquisitionData(exercises_data_path('MR', 'PTB_ACRPhantom_GRAPPA')
+ '/ptb_resolutionphantom_fully_ismrmrd.h5' )
```
```python
# Calculate CSM
preprocessed_data = mr.preprocess_acquisition_data(mr_acq)
csm = mr.CoilSensitivityData()
csm.smoothness = 200
csm.calculate(preprocessed_data)
```
```python
# Calculate image template
recon = mr.FullySampledReconstructor()
recon.set_input(preprocessed_data)
recon.process()
im_mr = recon.get_output()
```
```python
# Display the coil maps
plt.figure();
csm_arr = numpy.abs(csm.as_array())
plot_2d_image([1,2,1], csm_arr[0, 0, :, :], 'Coil 0', cmap="Greys_r")
plot_2d_image([1,2,2], csm_arr[2, 0, :, :], 'Coil 2', cmap="Greys_r")
```
We want to use these coilmaps to simulate our MR raw data. Nevertheless, they are obtained from a phantom scan which unfortunately has got some signal voids inside. If we used these coil maps directly, then these signal voids would cause artefacts. We are therefore going to interpolate the coil maps first.
We are going to calculate a mask from the `ImageData` `im_mr`:
```python
im_mr_arr = numpy.squeeze(numpy.abs(im_mr.as_array()))
im_mr_arr /= numpy.max(im_mr_arr)
mask = numpy.zeros_like(im_mr_arr)
mask[im_mr_arr > 0.2] = 1
plt.figure();
plot_2d_image([1,1,1], mask, 'Mask', cmap="Greys_r")
```
Now we are going to interpolate between the values defined by the mask:
```python
from scipy.interpolate import griddata
# Target grid for a square image
xi = yi = numpy.arange(0, im_mr_arr.shape[0])
xi, yi = numpy.meshgrid(xi, yi)
# Define grid points in mask
idx = numpy.where(mask == 1)
x = xi[idx[0], idx[1]]
y = yi[idx[0], idx[1]]
# Go through each coil and interpolate linearly
csm_arr = csm.as_array()
for cnd in range(csm_arr.shape[0]):
cdat = csm_arr[cnd, 0, idx[0], idx[1]]
cdat_intp = griddata((x,y), cdat, (xi,yi), method='linear')
csm_arr[cnd, 0, :, :] = cdat_intp
# No extrapolation was done by griddate and we will set these values to 0
csm_arr[numpy.isnan(csm_arr)] = 0
```
```python
# Display the coil maps
plt.figure();
plot_2d_image([1,2,1], numpy.abs(csm_arr[0, 0, :, :]), 'Coil 0', cmap="Greys_r")
plot_2d_image([1,2,2], numpy.abs(csm_arr[2, 0, :, :]), 'Coil 2', cmap="Greys_r")
```
This is not the world's best interpolation but it will do for the moment. Let's replace the data in the coils maps with the new interpolation
```python
csm.fill(csm_arr);
```
Next we are going to create the two __MR__ `AcquisitionModel` $A_{1}$ and $A_{2}$
```python
# Create undersampled acquisition data
us_factor = 2
num_ctr_lines = 30
pattern = 'random'
acq_us = create_undersampled_kspace(preprocessed_data, us_factor, num_ctr_lines, pattern)
# Create two MR acquisition models
A1 = mr.AcquisitionModel(acq_us, im_mr)
A1.set_coil_sensitivity_maps(csm)
A2 = mr.AcquisitionModel(acq_us, im_mr)
A2.set_coil_sensitivity_maps(csm)
```
and simulate undersampled __MR__ `AcquisitionData` $g_{1}$ and $g_{2}$
```python
# MR
u_gt = crop_and_fill(im_mr, T1_arr)
g1 = A1.forward(u_gt)
v_gt = crop_and_fill(im_mr, T2_arr)
g2 = A2.forward(v_gt)
```
Lastly we are going to add some noise
```python
g1_arr = g1.as_array()
g1_max = numpy.max(numpy.abs(g1_arr))
g1_arr += (numpy.random.random(g1_arr.shape) - 0.5 + 1j*(numpy.random.random(g1_arr.shape) - 0.5)) * g1_max * 0.01
g1.fill(g1_arr)
g2_arr = g2.as_array()
g2_max = numpy.max(numpy.abs(g2_arr))
g2_arr += (numpy.random.random(g2_arr.shape) - 0.5 + 1j*(numpy.random.random(g2_arr.shape) - 0.5)) * g2_max * 0.01
g2.fill(g2_arr)
```
Just to check we are going to apply the backward/adjoint operation to do a simply image reconstruction.
```python
# Simple reconstruction
u_simple = A1.backward(g1)
v_simple = A2.backward(g2)
```
```python
# Display it
plt.figure();
plot_2d_image([1,2,1], numpy.abs(u_simple.as_array())[0, :, :], '$u_{simple}$', cmap="Greys_r")
plot_2d_image([1,2,2], numpy.abs(v_simple.as_array())[0, :, :], '$v_{simple}$', cmap="Greys_r")
```
These images look quite poor compared to the ground truth input images, because they are reconstructed from an undersampled k-space. In addition, you can see a strange "structure" going through the centre of the brain. This has something to do with the coil maps. As mentioned above, our coil maps have these two "holes" in the centre and this creates this artefacts. Nevertheless, this is not going to be a problem for our reconstruction as we will see later on.
# (C) Set up the joint TV reconstruction problem
So far we have used mainly __SIRF__ functionality, now we are going to use __CIL__ in order to set up the reconstruction problem and then solve it. In order to be able to reconstruct both $u$ and $v$ at the same time, we will make use of `BlockDataContainer`. In the following we will define an operator which allows us to project a $(u,v)$ `BlockDataContainer` object into either $u$ or $v$. In literature, this operator is called **[Projection Map (or Canonical Projection)](https://proofwiki.org/wiki/Definition:Projection_(Mapping_Theory))** and is defined as:
$$ \pi_{i}: X_{1}\times\cdots\times X_{n}\rightarrow X_{i}$$
with
$$\pi_{i}(x_{0},\dots,x_{i},\dots,x_{n}) = x_{i},$$
mapping an element $x$ from a Cartesian Product $X =\prod_{k=1}^{n}X_{k}$ to the corresponding element $x_{i}$ determined by the index $i$.
```python
class ProjectionMap(LinearOperator):
def __init__(self, domain_geometry, index, range_geometry=None):
self.index = index
if range_geometry is None:
range_geometry = domain_geometry.geometries[self.index]
super(ProjectionMap, self).__init__(domain_geometry=domain_geometry,
range_geometry=range_geometry)
def direct(self,x,out=None):
if out is None:
return x[self.index]
else:
out.fill(x[self.index])
def adjoint(self,x, out=None):
if out is None:
tmp = self.domain_geometry().allocate()
tmp[self.index].fill(x)
return tmp
else:
out[self.index].fill(x)
```
In the following we define the `SmoothJointTV` class. Our plan is to use the Gradient descent (`GD`) algorithm to solve the above problems. This implements the `__call__` method required to monitor the objective value and the `gradient` method that evaluates the gradient of `JTV`.
For the two subproblems, the first variations with respect to $u$ and $v$ variables are:
$$
\begin{equation}
\begin{aligned}
& A_{1}^{T}*(A_{1}u - g_{1}) - \alpha_{1} \mathrm{div}\bigg( \frac{\nabla u}{|\nabla(u, v)|_{2,\eta,\lambda}}\bigg)\\
& A_{2}^{T}*(A_{2}v - g_{2}) - \alpha_{2} \mathrm{div}\bigg( \frac{\nabla v}{|\nabla(u, v)|_{2,\eta,1-\lambda}}\bigg)
\end{aligned}
\end{equation}
$$
where $$|\nabla(u, v)|_{2,\eta,\lambda} = \sqrt{ \lambda|\nabla u|^{2} + (1-\lambda)|\nabla v|^{2} + \eta^{2}}.$$
```python
class SmoothJointTV(Function):
def __init__(self, eta, axis, lambda_par):
r'''
:param eta: smoothing parameter making SmoothJointTV differentiable
'''
super(SmoothJointTV, self).__init__(L=8)
# smoothing parameter
self.eta = eta
# GradientOperator
FDy = FiniteDifferenceOperator(u_simple, direction=1)
FDx = FiniteDifferenceOperator(u_simple, direction=2)
self.grad = BlockOperator(FDy, FDx)
# Which variable to differentiate
self.axis = axis
if self.eta==0:
raise ValueError('Need positive value for eta')
self.lambda_par=lambda_par
def __call__(self, x):
r""" x is BlockDataContainer that contains (u,v). Actually x is a BlockDataContainer that contains 2 BDC.
"""
if not isinstance(x, BlockDataContainer):
raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x)))
tmp = numpy.abs((self.lambda_par*self.grad.direct(x[0]).pnorm(2).power(2) + (1-self.lambda_par)*self.grad.direct(x[1]).pnorm(2).power(2)+\
self.eta**2).sqrt().sum())
return tmp
def gradient(self, x, out=None):
denom = (self.lambda_par*self.grad.direct(x[0]).pnorm(2).power(2) + (1-self.lambda_par)*self.grad.direct(x[1]).pnorm(2).power(2)+\
self.eta**2).sqrt()
if self.axis==0:
num = self.lambda_par*self.grad.direct(x[0])
else:
num = (1-self.lambda_par)*self.grad.direct(x[1])
if out is None:
tmp = self.grad.range.allocate()
tmp[self.axis].fill(self.grad.adjoint(num.divide(denom)))
return tmp
else:
self.grad.adjoint(num.divide(denom), out=out[self.axis])
```
Now we are going to put everything together and define our two objective functions which solve the two subproblems which we defined at the beginning
```python
alpha1 = 0.05
alpha2 = 0.05
lambda_par = 0.5
eta = 1e-12
# BlockGeometry for the two modalities
bg = BlockGeometry(u_simple, v_simple)
# Projection map, depending on the unkwown variable
L1 = ProjectionMap(bg, index=0)
L2 = ProjectionMap(bg, index=1)
# Fidelity terms based on the acqusition data
f1 = 0.5*L2NormSquared(b=g1)
f2 = 0.5*L2NormSquared(b=g2)
# JTV for each of the subproblems
JTV1 = alpha1*SmoothJointTV(eta=eta, axis=0, lambda_par = lambda_par )
JTV2 = alpha2*SmoothJointTV(eta=eta, axis=1, lambda_par = 1-lambda_par)
# Compose the two objective functions
objective1 = OperatorCompositionFunction(f1, CompositionOperator(A1, L1)) + JTV1
objective2 = OperatorCompositionFunction(f2, CompositionOperator(A2, L2)) + JTV2
```
# (D) Solve the joint TV reconstruction problem
```python
# We start with zero-filled images
x0 = bg.allocate(0.0)
# We use a fixed step-size for the gradient descent approach
step_size = 0.1
# We are also going to log the value of the objective functions
obj1_val_it = []
obj2_val_it = []
for i in range(10):
gd1 = GD(x0, objective1, step_size=step_size, \
max_iteration = 4, update_objective_interval = 1)
gd1.run(verbose=1)
# We skip the first one because it gets repeated
obj1_val_it.extend(gd1.objective[1:])
# Here we are going to do a little "trick" in order to better see, when each subproblem is optimised, we
# are going to append NaNs to the objective function which is currently not optimised. The NaNs will not
# show up in the final plot and hence we can nicely see each subproblem.
obj2_val_it.extend(numpy.ones_like(gd1.objective[1:])*numpy.nan)
gd2 = GD(gd1.solution, objective2, step_size=step_size, \
max_iteration = 4, update_objective_interval = 1)
gd2.run(verbose=1)
obj2_val_it.extend(gd2.objective[1:])
obj1_val_it.extend(numpy.ones_like(gd2.objective[1:])*numpy.nan)
x0.fill(gd2.solution)
print('* * * * * * Outer Iteration ', i, ' * * * * * *\n')
```
Finally we can look at the images $u_{jtv}$ and $v_{jtv}$ and compare them to the simple reconstruction $u_{simple}$ and $v_{simple}$ and the original ground truth images.
```python
u_jtv = numpy.squeeze(numpy.abs(x0[0].as_array()))
v_jtv = numpy.squeeze(numpy.abs(x0[1].as_array()))
plt.figure()
plot_2d_image([2,3,1], numpy.squeeze(numpy.abs(u_simple.as_array()[0, :, :])), '$u_{simple}$', cmap="Greys_r")
plot_2d_image([2,3,2], u_jtv, '$u_{JTV}$', cmap="Greys_r")
plot_2d_image([2,3,3], numpy.squeeze(numpy.abs(u_gt.as_array()[0, :, :])), '$u_{gt}$', cmap="Greys_r")
plot_2d_image([2,3,4], numpy.squeeze(numpy.abs(v_simple.as_array()[0, :, :])), '$v_{simple}$', cmap="Greys_r")
plot_2d_image([2,3,5], v_jtv, '$v_{JTV}$', cmap="Greys_r")
plot_2d_image([2,3,6], numpy.squeeze(numpy.abs(v_gt.as_array()[0, :, :])), '$v_{gt}$', cmap="Greys_r")
```
And let's look at the objective functions
```python
plt.figure()
plt.plot(obj1_val_it, 'o-', label='subproblem 1')
plt.plot(obj2_val_it, '+-', label='subproblem 2')
plt.xlabel('Number of iterations')
plt.ylabel('Value of objective function')
plt.title('Objective functions')
plt.legend()
# Logarithmic y-axis
plt.yscale('log')
```
# Next steps
The above is a good demonstration for a synergistic image reconstruction of two different images. The following gives a few suggestions of what to do next and also how to extend this notebook to other applications.
## Number of iterations
In our problem we have several regularisation parameters such as $\alpha_{1}$, $\alpha_{2}$ and $\lambda$. In addition, the number of inner iterations for each subproblem (currently set to 3) and the number of outer iterations (currently set to 10) also determine the final solution. Of course, for infinite number of total iterations it shouldn't matter but usually we don't have that much time.
__TODO__: Change the number of iterations and see what happens to the objective functions. For a given number of total iterations, do you think it is better to have a high number of inner or high number of outer iterations? Why? Does this also depend on the undersampling factor?
## Spatial misalignment
In the above example we simulated our data such that there is a perfect spatial match between $u$ and $v$. For real world applications this usually cannot be assumed.
__TODO__: Add spatial misalignment between $u$ and $v$. This can be achieved e.g. by calling `numpy.roll` on `T2_arr` before calling `v_gt = crop_and_fill(im_mr, T2_arr)`. What is the effect on the reconstructed images? For a more "advanced" misalignment, have a look at notebook `BrainWeb`.
__TODO__: One way to minimize spatial misalignment is to use image registration to ensure both $u$ and $v$ are well aligned. In the notebook `sirf_registration` you find information about how to register two images and also how to resample one image based on the spatial transformation estimated from the registration. Try to use this to correct for the misalignment you introduced above. For a real world example, at which point in the code would you have to carry out the registration+resampling? (some more information can also be found at the end of notebook `de_Pierro_MAPEM`)
## Pathologies
The images $u$ and $v$ show the same anatomy, just with a different contrast. Clinically more useful are of course images which show complementary image information.
__TODO__: Add a pathology to either $u$ and $v$ and see how this effects the reconstruction. For something more advanced, have a loot at the notebook `BrainWeb`.
## Single anatomical prior
So far we have alternated between two reconstruction problems. Another option is to do a single regularised reconstruction and simply use a previously reconstructed image for regularisation.
__TODO__: Adapt the above code such that $u$ is reconstructed first without regularisation and is then used for a regularised reconstruction of $v$ without any further updates of $u$.
## Complementary k-space trajectories
We used the same k-space trajectory for $u$ and $v$. This is of course not ideal for such an optimisation, because the same k-space trajectory also means the same pattern of undersampling artefacts. Of course the artefacts in each image will be different because of the different image content but it still would be better if $u$ and $v$ were acquired with different k-space trajectories.
__TODO__: Create two different k-space trajectories and compare the results to a reconstruction using the same k-space trajectories.
__TODO__: Try different undersampling factors and compare results for _regular_ and _random_ undersampling patterns.
## Other regularisation options
In this example we used a TV-based regularisation, but of course other regularisers could also be used, such as directional TV.
__TODO__: Have a look at the __CIL__ notebook `02_Dynamic_CT` and adapt the `SmoothJointTV` class above to use directional TV.
|
{"hexsha": "203d4fa76fd087d4b08f6e43a6c40035268c2af9", "size": 37671, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/Synergistic/cil_joint_tv_mr.ipynb", "max_stars_repo_name": "johannesmayer/SIRF-Exercises", "max_stars_repo_head_hexsha": "772f132ca3639f364189258d558a8f06a8666fb1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-25T12:16:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-25T12:16:44.000Z", "max_issues_repo_path": "notebooks/Synergistic/cil_joint_tv_mr.ipynb", "max_issues_repo_name": "johannesmayer/SIRF-Exercises", "max_issues_repo_head_hexsha": "772f132ca3639f364189258d558a8f06a8666fb1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/Synergistic/cil_joint_tv_mr.ipynb", "max_forks_repo_name": "johannesmayer/SIRF-Exercises", "max_forks_repo_head_hexsha": "772f132ca3639f364189258d558a8f06a8666fb1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.746492986, "max_line_length": 587, "alphanum_fraction": 0.5869236283, "converted": true, "num_tokens": 7312}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Catchment specific water flux plots.
###############################################################
runoff_et/catchment_analysis.py
Authors ESMValToolV1 Version
Philipp Sommer (philipp.sommer@mpimet.mpg.de)
Stefan Hagemann (stefan.hagemann@hzg.de)
Alexander Loew
Port to ESMValTool Version 2
Tobias Stacke (tobias.stacke@mpimet.mpg.de)
###############################################################
Description
-----------
Plots temporal and spatial averages of precipitation, runoff and
evaporation for specific land surface catchments. Additionally,
relations of runoff coefficient to relative precipitation bias
and runoff coefficient to evaporation coefficient are computed.
Default reference data are included in this routine (default class)
but can be replaced with other datasets. In case a custom catchment
mask is used, the default class (catchment names, IDs, reference data)
has to be adapted.
###############################################################
"""
import calendar
import logging
import os
from itertools import cycle
import iris
import iris.coord_categorisation
import numpy as np
import esmvaltool.diag_scripts.shared as diag
logger = logging.getLogger(os.path.basename(__file__))
def get_defaults():
"""Return default reference values for predefined catchments.
The entries are used in the routine analysecatchments. Catchments and
reference values are specific for the default catchment mask. All reference
values are given in mm a-1. Precip data is based on WFDEI, runoff is based
on GRDC, ET is derived as the difference of both. The values are updated
and differ slightly from the ESMValTool 1 version.
Dictionary entries are
catchments
mrro
pr
evspsbl
"""
defaults = {
'catchments': {
# Catchments with name as used in make_catchment_plots and
# associated ID used in the catchment mask netCDF file
"Amazon": 94,
"Parana": 98,
"Mackenzie": 76,
"Mississippi": 86,
"Danube": 14,
"Congo": 68,
"Niger_Malanville": 65,
"Nile": 60,
"Lena": 40,
"Yangtze-Kiang": 52,
"Ganges-Brahmaputra": 54,
"Murray": 100,
},
'mrro': {
'Amazon': 1194.63,
'Congo': 365.45,
'Danube': 250.75,
'Ganges-Brahmaputra': 672.11,
'Lena': 199.61,
'Mackenzie': 173.87,
'Mississippi': 182.12,
'Murray': 8.20,
'Niger_Malanville': 31.49,
'Nile': 48.72,
'Parana': 202.87,
'Yangtze-Kiang': 531.33,
},
'pr': {
'Amazon': 2210.25,
'Congo': 1571.41,
'Danube': 808.04,
'Ganges-Brahmaputra': 1405.84,
'Lena': 387.01,
'Mackenzie': 450.16,
'Mississippi': 897.18,
'Murray': 474.62,
'Niger_Malanville': 437.90,
'Nile': 655.62,
'Parana': 1314.66,
'Yangtze-Kiang': 1074.79,
},
'evspsbl': {
'Amazon': 1015.62,
'Congo': 1205.96,
'Danube': 557.29,
'Ganges-Brahmaputra': 733.73,
'Lena': 187.40,
'Mackenzie': 276.29,
'Mississippi': 715.06,
'Murray': 466.42,
'Niger_Malanville': 406.41,
'Nile': 606.90,
'Parana': 1111.80,
'Yangtze-Kiang': 543.46,
}
}
return defaults
def format_coef_plot(my_ax):
"""Move axis from border to center, adapts ticks and labels accordingly.
Parameters
----------
my_ax : object
plot axis object
"""
# Add infos to axis
my_ax.xaxis.set_label_coords(0.5, -0.025)
my_ax.yaxis.set_label_coords(-0.025, 0.5)
# Adapt axis range to center zero
xmax = np.ceil(
(np.absolute(np.array(my_ax.get_xlim())).max() + 5) / 10.0) * 10 - 5
my_ax.set_xlim(xmax * -1, xmax)
ymax = np.ceil(
(np.absolute(np.array(my_ax.get_ylim())).max() + 5) / 10.0) * 10 - 5
my_ax.set_ylim(ymax * -1, ymax)
# remove 0 from y and x axis
for key in ['x', 'y']:
ticks = list(getattr(my_ax, 'get_%sticks' % key)())
try:
ticks.remove(0)
except ValueError:
pass
getattr(my_ax, 'set_%sticks' % key)(ticks)
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
my_ax.spines['left'].set_position('center')
my_ax.spines['bottom'].set_position('center')
# Eliminate upper and right axes
my_ax.spines['right'].set_color('none')
my_ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
my_ax.xaxis.set_ticks_position('bottom')
my_ax.yaxis.set_ticks_position('left')
def data2file(cfg, filename, title, filedata):
"""Write data dictionary into ascii file.
Parameters
----------
cfg : dict
Configuration dictionary of the recipe
filename : str
String containing the file name
title : str
String containing the file header
filedata : dict
Dictionary of catchment averages per river
"""
# Write experiment data
filepath = os.path.join(cfg[diag.names.WORK_DIR], filename)
with open(filepath, 'w') as out:
out.write(title + '\n\n')
for river, value in sorted(filedata.items()):
out.write('{:25} : {:8.2f}\n'.format(river, value))
def write_plotdata(cfg, plotdata, catchments):
"""Write catchment averaged values for all datasets.
Parameters
----------
cfg : dict
Configuration dictionary of the recipe
plotdata : dict
Dictionary containing the catchment averages
catchments : dict
Dictionary containing infomation about catchment mask,
grid cell size, and reference values
"""
ref_vars = []
metric = "catchment averages"
unit = "[mm a-1]"
for var in plotdata.keys():
for identifier in plotdata[var].keys():
# Write experiment data
filename = '_'.join([var, identifier]) + '.txt'
title = " ".join(identifier.split(' ') + [var, metric, unit])
filedata = plotdata[var][identifier]
data2file(cfg, filename, title, filedata)
# Write reference data
if var not in ref_vars:
filename = '_'.join([var, 'reference']) + '.txt'
title = " ".join([catchments['refname'], metric, unit])
filedata = catchments[var]
data2file(cfg, filename, title, filedata)
ref_vars.append(var)
def get_expdata(expdict, refdict):
"""Get list with catchment averages for experiment and reference.
Parameters
----------
expdict : dict
the catchment averages experiments dictionary
refdict : dict
the catchment averages reference dictionary
"""
expdata, refdata, rivers = [], [], []
for riv, ref in sorted(refdict.items()):
rivers.append(riv)
refdata.append(ref)
for riv in rivers:
expdata.append(expdict[riv])
return rivers, np.array(refdata), np.array(expdata)
def compute_diags(plotdata, identifier, catchments):
"""Compute diagnostics for all variables of an experiment.
Parameters
----------
plotdata : dict
Dictionary containing the catchment averages
identifier : str
Dataset name
catchments : dict
Dictionary containing infomation about catchment mask,
grid cell size, and reference values
"""
diags = {'ref': {}, 'exp': {}, 'abs': {}, 'rel': {}}
# 1. Absolute and relative variable biases
for var in plotdata.keys():
diags['riv'], diags['ref'][var], diags['exp'][var] = get_expdata(
plotdata[var][identifier], catchments[var])
diags['abs'][var] = diags['exp'][var] - diags['ref'][var]
diags['rel'][var] = diags['exp'][var] / diags['ref'][var] * 100
diags['xrv'] = range(len(diags['riv']))
# 2. Coefficients
diags['prbias'] = diags['abs']['pr'] / diags['ref']['pr'] * 100
diags['rocoef'] = (diags['exp']['mrro'] / diags['exp']['pr'] * 100) - (
diags['ref']['mrro'] / diags['ref']['pr'] * 100)
diags['etcoef'] = (diags['exp']['evspsbl'] / diags['exp']['pr'] * 100) - (
diags['ref']['evspsbl'] / diags['ref']['pr'] * 100)
return diags
def setup_pdf(pltdir, identifier, outtype):
"""Prepare pdf output.
Parameters
----------
pltdir : str
Output directory for pdf plot
identifier : str
Dataset name
outtype : str
Plot file type [pdf,other]
"""
from matplotlib.backends.backend_pdf import PdfPages
if outtype == 'pdf':
filepath = os.path.join(pltdir, identifier + ".pdf")
pdf = PdfPages(filepath)
else:
pdf = None
return pdf
def prep_barplot(diags, defs, identifier, var, pdf):
"""Prepare barplot.
Parameters
----------
diags : dict
Dictionary containing all metrics for plotting
defs : dict
Dictionary containing plot settings
identifier : str
Dataset name
var : str
short name of the actual variable
pdf : obj
pdf oject is pdf output is chosen, None otherwise
"""
import matplotlib.pyplot as plt
fig, my_axs = plt.subplots(nrows=1, ncols=2, sharex=False)
fig.suptitle(identifier.upper() + ' vs ' + defs['refname'].upper())
fig.subplots_adjust(bottom=0.35)
plottitle = ['\nBias for ', '\nRelative bias for ']
ylabel = [var.upper() + ' [mm a-1]', 'Relative bias [%]']
# Setup both plot axis
for iax, axs in enumerate(my_axs.tolist()):
axs.set_title(plottitle[iax] + var.upper())
axs.set_ylabel(ylabel[iax])
axs.set_xlabel('Catchment')
axs.set_xticks(diags['xrv'])
axs.set_xticklabels((diags['riv']), fontsize='small')
for tick in axs.get_xticklabels():
tick.set_rotation(90)
axs.axhline(c='black', lw=2)
# Plot absolut bias for every catchment
my_axs[0].bar(diags['xrv'], diags['abs'][var], color="C{}".format(0))
# Plot relative bias for every catchment
my_axs[1].bar(diags['xrv'], diags['ref'][var], color="C{}".format(1))
# Finish plot
finish_plot(fig, defs['pltdir'], identifier + '_' + var + '-bias', pdf)
def prep_scatplot(coeftype, diags, defs, identifier, pdf):
"""Prepare scatterplot for different coefficients.
Parameters
----------
coeftype : str
string indicting plot type [prbias,etcoef]
diags : dict
Dictionary containing all metrics for plotting
defs : dict
Dictionary containing plot settings
identifier : str
Dataset name
pdf : obj
pdf oject is pdf output is chosen, None otherwise
"""
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=1, sharex=False)
axs.set_title(identifier.upper() + ' vs ' + defs['refname'].upper())
axs.set_ylabel('Bias of runoff coefficient [%]')
marker = cycle(defs['markerlist'])
if coeftype == 'prbias':
for prbias, rocoef in zip(diags['prbias'], diags['rocoef']):
axs.scatter(prbias, rocoef, marker=next(marker))
axs.set_xlabel('Relative bias of precipitation [%]')
tag = '_pr-vs-ro'
elif coeftype == 'etcoef':
for etcoef, rocoef in zip(diags['etcoef'], diags['rocoef']):
axs.scatter(etcoef, rocoef, marker=next(marker))
axs.set_xlabel('Bias of ET coefficient [%]')
tag = '_et-vs-ro'
else:
raise ValueError('Unexpected coefficient combination in prep_scatplot')
format_coef_plot(axs)
add_legend(fig, diags['riv'], defs['markerlist'])
finish_plot(fig, defs['pltdir'], identifier + tag, pdf)
def add_legend(fig, rivers, markerlist):
"""Add scatter plot legend with separate axis.
Parameters
----------
fig : obj
plot figure object
rivers : list
list of river catchment names
markerlist : list
list of marker strings for scatterplot legend
"""
# Define legend
fig.subplots_adjust(bottom=0.30)
marker = cycle(markerlist)
caxe = fig.add_axes([0.05, 0.01, 0.9, 0.20])
for label in rivers:
caxe.scatter([], [], marker=next(marker), label=label)
caxe.legend(ncol=3, numpoints=1, loc="lower center", mode="expand")
caxe.set_axis_off()
def finish_plot(fig, pltdir, name, pdf):
"""Save actual figure to either png or pdf.
Parameters
----------
fig : obj
actual figure
pltdir : str
target directory to store plots
name : str
filename for png output without extension
pdf : obj
pdf object collection all pages in case of pdf output
"""
import matplotlib.pyplot as plt
if '-bias' in name:
plt.tight_layout()
if pdf is None:
filepath = os.path.join(pltdir, name + ".png")
fig.savefig(filepath)
else:
fig.savefig(pdf, dpi=80, format='pdf')
plt.close()
def make_catchment_plots(cfg, plotdata, catchments):
"""Plot catchment averages for different metrics.
Parameters
----------
cfg : dict
Configuration dictionary of the recipe
plotdata : dict
Dictionary containing the catchment averages
catchments : dict
Dictionary containing infomation about catchment mask,
grid cell size, and reference values
"""
import matplotlib.pyplot as plt
# Get colorscheme from recipe
defs = {
'colorscheme': cfg.get('colorscheme', 'default'),
'markerlist': ('s', '+', 'o', '*', 'x', 'D'),
'pltdir': cfg[diag.names.PLOT_DIR],
'plttype': cfg.get('output_file_type', 'png'),
'refname': catchments['refname']
}
plt.style.use(defs['colorscheme'])
# Loop over datasets
for identifier in plotdata[list(plotdata.keys())[0]].keys():
# Prepare pdf file if output type chosen
pdf = setup_pdf(defs['pltdir'], identifier, defs['plttype'])
# Compute diagnostics for plots
diags = compute_diags(plotdata, identifier, catchments)
# Barplots for single variables
for var in plotdata.keys():
prep_barplot(diags, defs, identifier, var, pdf)
# Runoff coefficient vs relative precipitation bias
prep_scatplot('prbias', diags, defs, identifier, pdf)
# Runoff coefficient vs evaporation coefficient bias
prep_scatplot('etcoef', diags, defs, identifier, pdf)
# Finish pdf if it is the chosen output
if pdf is not None:
pdf.close()
def get_catchment_data(cfg):
"""Read and prepare catchment mask.
Parameters
----------
cfg : dict
Configuration dictionary of the recipe
"""
catchments = get_defaults()
catchments['refname'] = 'default'
if not cfg.get('catchmentmask'):
raise ValueError('A catchment mask file needs to be specified in the '
'recipe (see recipe description for details)')
catchment_filepath = os.path.join(cfg['auxiliary_data_dir'],
cfg.get('catchmentmask'))
if not os.path.isfile(catchment_filepath):
raise IOError('Catchment file {} not found'.format(catchment_filepath))
catchments['cube'] = iris.load_cube(catchment_filepath)
if catchments['cube'].coord('latitude').bounds is None:
catchments['cube'].coord('latitude').guess_bounds()
if catchments['cube'].coord('longitude').bounds is None:
catchments['cube'].coord('longitude').guess_bounds()
catchments['area'] = iris.analysis.cartography.area_weights(
catchments['cube'])
return catchments
def get_sim_data(cfg, datapath, catchment_cube):
"""Read and postprocess netcdf data from experiments.
Check units, aggregate to long term mean yearly sum and
regrid to resolution of catchment mask.
Parameters
----------
cfg : dict
Configuration dictionary of the recipe.
dataset_path : str
Path to the netcdf file
catchment_cube : obj
iris cube object containing simulation data
"""
datainfo = diag.Datasets(cfg).get_dataset_info(path=datapath)
identifier = "_".join(
[datainfo['dataset'].upper(), datainfo['exp'], datainfo['ensemble']])
# Load data into iris cube
new_cube = iris.load(datapath, diag.Variables(cfg).standard_names())[0]
# Check for expected unit
if new_cube.units != 'kg m-2 s-1':
raise ValueError('Unit [kg m-2 s-1] is expected for ',
new_cube.long_name.lower(), ' flux')
# Convert to unit mm per month
timelist = new_cube.coord('time')
daypermonth = []
for mydate in timelist.units.num2date(timelist.points):
daypermonth.append(calendar.monthrange(mydate.year, mydate.month)[1])
new_cube.data *= 86400.0
for i, days in enumerate(daypermonth):
new_cube.data[i] *= days
# Aggregate over year --> unit mm per year
iris.coord_categorisation.add_year(new_cube, 'time')
year_cube = new_cube.aggregated_by('year', iris.analysis.SUM)
year_cube.units = "mm a-1"
# Compute long term mean
mean_cube = year_cube.collapsed([diag.names.TIME], iris.analysis.MEAN)
# Regrid to catchment data grid --> maybe use area_weighted instead?
if mean_cube.coord('latitude').bounds is None:
mean_cube.coord('latitude').guess_bounds()
if mean_cube.coord('longitude').bounds is None:
mean_cube.coord('longitude').guess_bounds()
m_grid = [iris.analysis.Linear(), iris.analysis.AreaWeighted()]
mean_cube_regrid = mean_cube.regrid(catchment_cube, m_grid[1])
return datainfo['short_name'], identifier, mean_cube_regrid
def get_catch_avg(catchments, sim_cube):
"""Compute area weighted averages for river catchments.
Parameters
----------
catchments : dict
Dictionary containing infomation about catchment mask,
grid cell size, and reference values
sim_cube : obj
iris cube object containing the simulation data
"""
avg = {}
for river, rid in catchments['catchments'].items():
data_catch = np.ma.masked_where(
catchments['cube'].data.astype(np.int) != rid, sim_cube.data)
area_catch = np.ma.masked_where(
catchments['cube'].data.astype(np.int) != rid,
catchments['area'].data)
avg[river] = (data_catch * (area_catch / area_catch.sum())).sum()
return avg
def update_reference(catchments, model, rivervalues, var):
"""Update reference catchment averages.
Parameters
----------
catchments : dict
Dictionary containing infomation about catchment mask,
grid cell size, and reference values
model : str
name of the data set
rivervalues : dict
dictionary of river catchment averages
var : str
short name of the variable
"""
if catchments['refname'] != model and catchments['refname'] != 'default':
raise ValueError('Reference must be the same for all variables!')
catchments[var] = rivervalues
catchments['refname'] = model
def update_plotdata(identifier, plotdata, rivervalues, var):
"""Update simulation catchment averages.
Parameters
----------
identifier : str
string consisting of dataset, experiment and ensemble information
plotdata : dict
river catchment averages for different variables and datasets
rivervalues : dict
river catchment averages for different variables
var : str
short name of the variable
"""
if var not in plotdata.keys():
plotdata[var] = {}
if identifier in plotdata[var].keys():
raise ValueError('Variable', var, 'already exists in plot dict')
else:
plotdata[var][identifier] = rivervalues
def main(cfg):
"""Run the diagnostic.
Parameters
----------
cfg : dict
Configuration dictionary of the recipe.
"""
# Get dataset and variable information
logging.debug("Found datasets in recipe:\n%s", diag.Datasets(cfg))
logging.debug("Found variables in recipe:\n%s", diag.Variables(cfg))
# Check for correct variables
if not diag.Variables(cfg).vars_available('pr', 'mrro', 'evspsbl'):
raise ValueError(
"Diagnostic requires precipitation, runoff and evaporation data")
# Read catchmentmask
# to check: Correct way to read auxillary data using recipes?
my_catch = get_catchment_data(cfg)
# Read data, convert units and compute long term means
# to check: Shouldn't this be part of preprocessing?
# to check: How to regrid onto catchment_cube grid
# with preproc recipe statements
# instead of using regrid here?
allcubes = {}
plotdata = {}
for datapath in diag.Datasets(cfg):
# Get simulation data
var, identifier, cube = get_sim_data(cfg, datapath, my_catch['cube'])
# Get river catchment averages
rivervalues = get_catch_avg(my_catch, cube)
# Sort into data dictionaries
datainfo = diag.Datasets(cfg).get_dataset_info(path=datapath)
model = datainfo['dataset']
if model == datainfo.get('reference_dataset', None):
update_reference(my_catch, model, rivervalues, var)
else:
update_plotdata(identifier, plotdata, rivervalues, var)
# Append to cubelist for temporary output
if model not in allcubes.keys():
allcubes[model] = []
allcubes[model].append(cube)
# Write regridded and temporal aggregated netCDF data files (one per model)
# to do: update attributes, something fishy with unlimited dimension
for model, mcube in allcubes.items():
filepath = os.path.join(cfg[diag.names.WORK_DIR],
'_'.join(['postproc', model]) + '.nc')
if cfg[diag.names.WRITE_NETCDF]:
iris.save(mcube, filepath)
logger.info("Writing %s", filepath)
# Write plotdata as ascii files for user information
write_plotdata(cfg, plotdata, my_catch)
# Plot catchment data
make_catchment_plots(cfg, plotdata, my_catch)
if __name__ == '__main__':
with diag.run_diagnostic() as config:
main(config)
|
{"hexsha": "3fe8997e0c7ce4d739612a42cb3114bf0ee370a5", "size": 22614, "ext": "py", "lang": "Python", "max_stars_repo_path": "esmvaltool/diag_scripts/runoff_et/catchment_analysis.py", "max_stars_repo_name": "YanchunHe/ESMValTool", "max_stars_repo_head_hexsha": "3d9238a83f11c3f01266c6eca357471b7bb02116", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-20T13:59:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T13:59:07.000Z", "max_issues_repo_path": "esmvaltool/diag_scripts/runoff_et/catchment_analysis.py", "max_issues_repo_name": "YanchunHe/ESMValTool", "max_issues_repo_head_hexsha": "3d9238a83f11c3f01266c6eca357471b7bb02116", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-08-20T13:29:14.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-17T08:24:34.000Z", "max_forks_repo_path": "esmvaltool/diag_scripts/runoff_et/catchment_analysis.py", "max_forks_repo_name": "YanchunHe/ESMValTool", "max_forks_repo_head_hexsha": "3d9238a83f11c3f01266c6eca357471b7bb02116", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-20T13:59:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T13:59:21.000Z", "avg_line_length": 33.4526627219, "max_line_length": 79, "alphanum_fraction": 0.6132042098, "include": true, "reason": "import numpy", "num_tokens": 5595}
|
#ifndef HTOOL_SPMATRIX_HPP
#define HTOOL_SPMATRIX_HPP
#include <Eigen/Dense>
#include <Eigen/Sparse>
#include <cassert>
#include <complex>
#include <iostream>
#include <vector>
//#include "point.hpp"
namespace htool {
//================================//
// DECLARATIONS DE TYPE //
//================================//
//typedef vector<Cplx> vectCplx;
//=================================================================//
// CLASS SPARSE MATRIX
/******************************************************************/ /**
* Class for sparse matrices (in coordinate list format).
* Its member objects are:
* - I: vector of the row indices,
* - J: vector of the column indices,
* - K: vector of the (complex) coefficients of the matrix,
* - nr: the number of rows,
* - nc: the number of columns.
*********************************************************************/
class SpMatrix {
private:
std::vector<int> I, J;
std::vector<Cplx> K;
int nr;
int nc;
public:
//! ### Default constructor
/*!
Initializes the matrix to the size 0*0.
*/
SpMatrix() : nr(0), nc(0) {}
//! ### Another constructor
/*!
Initializes the matrix with _nrp_ rows and _ncp_ columns,
_Ip_ as vector of the row indices, _Jp_ as vector of the column indices,
_Kp_ as vector of the coefficients of the matrix.
*/
SpMatrix(const std::vector<int> &Ip, const std::vector<int> &Jp, std::vector<Cplx> &Kp, const int &nrp, const int &ncp) : I(Ip), J(Jp), K(Kp), nr(nrp), nc(ncp) {}
//! ### Copy constructor
/*!
*/
SpMatrix(const SpMatrix &A) : I(A.I), J(A.J), K(A.K), nr(A.nr), nc(A.nc) {}
//! ### Assignement operator with a sparse matrix input argument
/*!
Copies the _I_, _J_, _K_ of the input _A_ argument
(which is a sparse matrix) into the vectors of
calling instance.
*/
void operator=(const SpMatrix &A) {
assert(nr == A.nr && nc == A.nc);
I = A.I;
J = A.J;
K = A.K;
}
//! ### Matrix-vector product
/*!
The input parameter _u_ is the input vector
(i.e. the right operand).
*/
vectCplx operator*(const vectCplx &u) {
int ncoef = I.size();
vectCplx v(nr, 0.);
for (int j = 0; j < ncoef; j++)
v[I[j]] += K[j] * u[J[j]];
return v;
}
//! ### Matrix-vector product
/*!
Another instanciation of the matrix-vector product
that avoids the generation of temporary instance for the
output vector. This routine achieves the operation
lhs = m*rhs
The left and right operands (_lhs_ and _rhs_) are templated
and can then be of any type (not necessarily of type vectCplx).
*/
template <typename LhsType, typename RhsType>
friend void MvProd(LhsType &lhs, const SpMatrix &m, const RhsType &rhs) {
int ncoef = m.I.size();
for (int j = 0; j < ncoef; j++)
lhs[m.I[j]] += m.K[j] * rhs[m.J[j]];
}
//! ### Modifies the size of the matrix
/*!
Changes the size of the matrix so that
the number of rows is set to _nbr_ and
the number of columns is set to _nbc_ and
the sizes of the 3 member vectors are set to _nbcoef_.
*/
void resize(const int nbr, const int nbc, const int nbcoef) {
assert(nbcoef <= nbr * nbc);
nr = nbr;
nc = nbc;
I.resize(nbcoef);
J.resize(nbcoef);
K.resize(nbcoef);
}
//! ### Access to row indices
/*!
Returns the _i_th row index of the input argument _A_.
*/
int &I_(const int i) {
assert(i < I.size());
return I[i];
}
//! ### Access to column indices
/*!
Returns the _i_th column index of the input argument _A_.
*/
int &J_(const int i) {
assert(i < J.size());
return J[i];
}
//! ### Access to coefficients
/*!
Returns the _i_th coefficients inside _K_ of the input argument _A_.
*/
Cplx &K_(const int i) {
assert(i < K.size());
return K[i];
}
//! ### Access to number of rows
/*!
Returns the number of rows of the input argument _A_.
*/
friend const int &nb_rows(const SpMatrix &A) { return A.nr; }
//! ### Access to number of columns
/*!
Returns the number of columns of the input argument _A_.
*/
friend const int &nb_cols(const SpMatrix &A) { return A.nc; }
//! ### Access to number of non zero coefficients
/*!
Returns the number of non zero coefficients of the input argument _A_.
*/
friend int nb_coeff(const SpMatrix &A) { return A.I.size(); }
//! ### Compute the compression rate
/*!
1 - number of non zero coefficients/(nb_rows*nb_columns)
*/
friend Real CompressionRate(const SpMatrix &A) {
Real comp;
comp = ((double)A.I.size()) / ((double)(A.nr * A.nc)); // number of non zero coefficients/(nb_rows*nb_columns)
return (1 - comp);
}
};
} // namespace htool
#endif
|
{"hexsha": "0bf7fdaebea57cce3467b8244021a8f7b5d7abe2", "size": 4988, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/htool/types/sparsematrix.hpp", "max_stars_repo_name": "htool-ddm/htool", "max_stars_repo_head_hexsha": "e4dbec7c08c5008e62344fd0d5ebfdf95ef8863f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15.0, "max_stars_repo_stars_event_min_datetime": "2020-05-06T15:20:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T10:27:56.000Z", "max_issues_repo_path": "include/htool/types/sparsematrix.hpp", "max_issues_repo_name": "htool-ddm/htool", "max_issues_repo_head_hexsha": "e4dbec7c08c5008e62344fd0d5ebfdf95ef8863f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14.0, "max_issues_repo_issues_event_min_datetime": "2020-05-25T13:59:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T16:40:45.000Z", "max_forks_repo_path": "include/htool/types/sparsematrix.hpp", "max_forks_repo_name": "PierreMarchand20/htool", "max_forks_repo_head_hexsha": "b6e91690f8d7c20d67dfb3b8db2e7ea674405a37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-04-25T07:44:35.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-05T16:57:00.000Z", "avg_line_length": 28.1807909605, "max_line_length": 166, "alphanum_fraction": 0.5549318364, "num_tokens": 1304}
|
[STATEMENT]
lemma has_field_derivative_powr_right [derivative_intros]:
"w \<noteq> 0 \<Longrightarrow> ((\<lambda>z. w powr z) has_field_derivative Ln w * w powr z) (at z)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<noteq> 0 \<Longrightarrow> ((powr) w has_field_derivative Ln w * w powr z) (at z)
[PROOF STEP]
unfolding powr_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<noteq> 0 \<Longrightarrow> ((\<lambda>z. if w = 0 then 0 else exp (z * Ln w)) has_field_derivative Ln w * (if w = 0 then 0 else exp (z * Ln w))) (at z)
[PROOF STEP]
by (intro derivative_eq_intros | simp)+
|
{"llama_tokens": 258, "file": null, "length": 2}
|
/-
Copyright (c) 2022 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anne Baanen, Alex J. Best
! This file was ported from Lean 3 source module linear_algebra.quotient_pi
! leanprover-community/mathlib commit 398f60f60b43ef42154bd2bdadf5133daf1577a4
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.LinearAlgebra.Pi
import Mathbin.LinearAlgebra.Quotient
/-!
# Submodule quotients and direct sums
This file contains some results on the quotient of a module by a direct sum of submodules,
and the direct sum of quotients of modules by submodules.
# Main definitions
* `submodule.pi_quotient_lift`: create a map out of the direct sum of quotients
* `submodule.quotient_pi_lift`: create a map out of the quotient of a direct sum
* `submodule.quotient_pi`: the quotient of a direct sum is the direct sum of quotients.
-/
namespace Submodule
open LinearMap
variable {ι R : Type _} [CommRing R]
variable {Ms : ι → Type _} [∀ i, AddCommGroup (Ms i)] [∀ i, Module R (Ms i)]
variable {N : Type _} [AddCommGroup N] [Module R N]
variable {Ns : ι → Type _} [∀ i, AddCommGroup (Ns i)] [∀ i, Module R (Ns i)]
/-- Lift a family of maps to the direct sum of quotients. -/
def piQuotientLift [Fintype ι] [DecidableEq ι] (p : ∀ i, Submodule R (Ms i)) (q : Submodule R N)
(f : ∀ i, Ms i →ₗ[R] N) (hf : ∀ i, p i ≤ q.comap (f i)) : (∀ i, Ms i ⧸ p i) →ₗ[R] N ⧸ q :=
lsum R (fun i => Ms i ⧸ p i) R fun i => (p i).mapQ q (f i) (hf i)
#align submodule.pi_quotient_lift Submodule.piQuotientLift
@[simp]
theorem piQuotientLift_mk [Fintype ι] [DecidableEq ι] (p : ∀ i, Submodule R (Ms i))
(q : Submodule R N) (f : ∀ i, Ms i →ₗ[R] N) (hf : ∀ i, p i ≤ q.comap (f i)) (x : ∀ i, Ms i) :
(piQuotientLift p q f hf fun i => Quotient.mk (x i)) = Quotient.mk (lsum _ _ R f x) := by
rw [pi_quotient_lift, lsum_apply, sum_apply, ← mkq_apply, lsum_apply, sum_apply,
_root_.map_sum] <;>
simp only [coe_proj, mapq_apply, mkq_apply, comp_apply]
#align submodule.pi_quotient_lift_mk Submodule.piQuotientLift_mk
@[simp]
theorem piQuotientLift_single [Fintype ι] [DecidableEq ι] (p : ∀ i, Submodule R (Ms i))
(q : Submodule R N) (f : ∀ i, Ms i →ₗ[R] N) (hf : ∀ i, p i ≤ q.comap (f i)) (i)
(x : Ms i ⧸ p i) : piQuotientLift p q f hf (Pi.single i x) = mapQ _ _ (f i) (hf i) x :=
by
simp_rw [pi_quotient_lift, lsum_apply, sum_apply, comp_apply, proj_apply]
rw [Finset.sum_eq_single i]
· rw [Pi.single_eq_same]
· rintro j - hj
rw [Pi.single_eq_of_ne hj, _root_.map_zero]
· intros
have := Finset.mem_univ i
contradiction
#align submodule.pi_quotient_lift_single Submodule.piQuotientLift_single
/-- Lift a family of maps to a quotient of direct sums. -/
def quotientPiLift (p : ∀ i, Submodule R (Ms i)) (f : ∀ i, Ms i →ₗ[R] Ns i)
(hf : ∀ i, p i ≤ ker (f i)) : (∀ i, Ms i) ⧸ pi Set.univ p →ₗ[R] ∀ i, Ns i :=
(pi Set.univ p).liftQ (LinearMap.pi fun i => (f i).comp (proj i)) fun x hx =>
mem_ker.mpr <| by
ext i
simpa using hf i (mem_pi.mp hx i (Set.mem_univ i))
#align submodule.quotient_pi_lift Submodule.quotientPiLift
@[simp]
theorem quotientPiLift_mk (p : ∀ i, Submodule R (Ms i)) (f : ∀ i, Ms i →ₗ[R] Ns i)
(hf : ∀ i, p i ≤ ker (f i)) (x : ∀ i, Ms i) :
quotientPiLift p f hf (Quotient.mk x) = fun i => f i (x i) :=
rfl
#align submodule.quotient_pi_lift_mk Submodule.quotientPiLift_mk
/-- The quotient of a direct sum is the direct sum of quotients. -/
@[simps]
def quotientPi [Fintype ι] [DecidableEq ι] (p : ∀ i, Submodule R (Ms i)) :
((∀ i, Ms i) ⧸ pi Set.univ p) ≃ₗ[R] ∀ i, Ms i ⧸ p i :=
{
quotientPiLift p (fun i => (p i).mkQ) fun i => by
simp with
toFun := quotientPiLift p (fun i => (p i).mkQ) fun i => by simp
invFun := piQuotientLift p (pi Set.univ p) single fun i => le_comap_single_pi p
left_inv := fun x =>
Quotient.inductionOn' x fun x' => by
simp_rw [Quotient.mk''_eq_mk', quotient_pi_lift_mk, mkq_apply, pi_quotient_lift_mk,
lsum_single, id_apply]
right_inv := by
rw [Function.rightInverse_iff_comp, ← coe_comp, ← @id_coe R]
refine' congr_arg _ (pi_ext fun i x => Quotient.inductionOn' x fun x' => funext fun j => _)
rw [comp_apply, pi_quotient_lift_single, Quotient.mk''_eq_mk', mapq_apply,
quotient_pi_lift_mk, id_apply]
by_cases hij : i = j <;> simp only [mkq_apply, coe_single]
· subst hij
simp only [Pi.single_eq_same]
· simp only [Pi.single_eq_of_ne (Ne.symm hij), quotient.mk_zero] }
#align submodule.quotient_pi Submodule.quotientPi
end Submodule
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/LinearAlgebra/QuotientPi.lean"}
|
import os, sys
import numpy as np
execfile('JvM_correction.py')
execfile('reduction_utils.py')
execfile('ImportMS.py')
sys.path.append('../')
import diskdictionary as disk
# read which disk this is about
target = str(np.loadtxt('whichdisk.txt', dtype='str'))
# Perform the imaging
imagename = 'data/deep_'+target+'_data'
for ext in ['.image', '.mask', '.model', '.pb', '.psf', '.residual', '.sumwt']:
os.system('rm -rf '+imagename+ext)
tclean(vis='data/'+target+'_continuum_spavg_tbin30s.ms', imagename=imagename,
specmode='mfs', deconvolver='multiscale',
scales=disk.disk[target]['cscales'], mask=disk.disk[target]['cmask'],
imsize=1024, cell='.006arcsec', gain=disk.disk[target]['cgain'],
cycleniter=disk.disk[target]['ccycleniter'], cyclefactor=1, nterms=1,
weighting='briggs', robust=disk.disk[target]['crobust'],
uvtaper=disk.disk[target]['ctaper'],
niter=50000, threshold=disk.disk[target]['gthresh'], savemodel='none')
# Perform the JvM correction
eps = do_JvM_correction_and_get_epsilon(imagename)
# Estimate map RMS as in DSHARP
coords = str.split(str.split(disk.disk[target]['cmask'], ']')[0], '[[')[1]
noise_ann = "annulus[[%s], ['%.2farcsec', '4.5arcsec']]" % \
(coords, 1.2 * disk.disk[target]['rout'])
estimate_SNR(imagename+'.JvMcorr.image',
disk_mask=disk.disk[target]['cmask'], noise_mask=noise_ann)
print('epsilon = ', eps)
# Export FITS files of the original + JvM-corrected images
exportfits(imagename+'.image', imagename+'.fits', overwrite=True)
exportfits(imagename+'.JvMcorr.image', imagename+'.JvMcorr.fits',
overwrite=True)
|
{"hexsha": "b8d605e8a2c6a895bf0736835079d5163c5091ce", "size": 1648, "ext": "py", "lang": "Python", "max_stars_repo_path": "CSD_modeling/data_imaging.py", "max_stars_repo_name": "seanandrews/DSHARP_CPDs", "max_stars_repo_head_hexsha": "40d8f02945e0d412c5c912b050d4a3d8c6dbbfa2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-04T15:55:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-04T15:55:03.000Z", "max_issues_repo_path": "CSD_modeling/data_imaging.py", "max_issues_repo_name": "seanandrews/DSHARP_CPDs", "max_issues_repo_head_hexsha": "40d8f02945e0d412c5c912b050d4a3d8c6dbbfa2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CSD_modeling/data_imaging.py", "max_forks_repo_name": "seanandrews/DSHARP_CPDs", "max_forks_repo_head_hexsha": "40d8f02945e0d412c5c912b050d4a3d8c6dbbfa2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-22T19:04:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-22T19:04:48.000Z", "avg_line_length": 41.2, "max_line_length": 79, "alphanum_fraction": 0.6796116505, "include": true, "reason": "import numpy", "num_tokens": 475}
|
from .base import Strategy, Transform
from .random import LHS
from summit.domain import *
from summit.utils.dataset import DataSet
import botorch
from botorch.models.model import Model
from botorch.acquisition.objective import ScalarizedObjective
import torch
from torch import Tensor
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
import numpy as np
from typing import Type, Tuple, Union, Optional
class MTBO(Strategy):
"""Multitask Bayesian Optimisation
This strategy enables pre-training a model with past reaction data
in order to enable faster optimisation.
Parameters
----------
domain : :class:`~summit.domain.Domain`
The domain of the optimization
transform : :class:`~summit.strategies.base.Transform`, optional
A transform object. By default, no transformation will be done
on the input variables or objectives.
pretraining_data : :class:`~summit.utils.data.DataSet`
A DataSet with pretraining data. Must contain a metadata column named "task"
that specfies the task for all data.
task : int, optional
The index of the task being optimized. Defaults to 1.
categorical_method : str, optional
The method for transforming categorical variables. Either
"one-hot" or "descriptors". Descriptors must be included in the
categorical variables for the later.
Notes
-----
References
----------
.. [Swersky] K. Swersky et al., in `NIPS Proceedings <http://papers.neurips.cc/paper/5086-multi-task-bayesian-optimization>`_, 2013, pp. 2004–2012.
Examples
--------
>>> from summit.domain import Domain, ContinuousVariable
>>> from summit.strategies import NelderMead
>>> domain = Domain()
>>> domain += ContinuousVariable(name='temperature', description='reaction temperature in celsius', bounds=[0, 1])
>>> domain += ContinuousVariable(name='flowrate_a', description='flow of reactant a in mL/min', bounds=[0, 1])
>>> domain += ContinuousVariable(name='yield', description='relative conversion to xyz', bounds=[0,100], is_objective=True, maximize=True)
>>> strategy = NelderMead(domain)
>>> next_experiments = strategy.suggest_experiments()
>>> print(next_experiments)
NAME temperature flowrate_a strategy
TYPE DATA DATA METADATA
0 0.500 0.500 Nelder-Mead Simplex
1 0.625 0.500 Nelder-Mead Simplex
2 0.500 0.625 Nelder-Mead Simplex
"""
def __init__(
self,
domain: Domain,
pretraining_data=None,
transform: Transform = None,
task: int = 1,
categorical_method: str = "one-hot",
**kwargs
):
Strategy.__init__(self, domain, transform, **kwargs)
self.pretraining_data = pretraining_data
self.task = task
self.categorical_method = categorical_method
if self.categorical_method not in ["one-hot", "descriptors"]:
raise ValueError(
"categorical_method must be one of 'one-hot' or 'descriptors'."
)
self.reset()
def suggest_experiments(self, num_experiments, prev_res: DataSet = None, **kwargs):
# Suggest lhs initial design or append new experiments to previous experiments
if prev_res is None:
lhs = LHS(self.domain)
self.iterations += 1
k = num_experiments if num_experiments > 1 else 2
conditions = lhs.suggest_experiments(k)
conditions[("task", "METADATA")] = self.task
return conditions
elif prev_res is not None and self.all_experiments is None:
self.all_experiments = prev_res
elif prev_res is not None and self.all_experiments is not None:
self.all_experiments = self.all_experiments.append(prev_res)
self.iterations += 1
# Combine pre-training and experiment data
data = self.all_experiments.append(self.pretraining_data)
# Get inputs (decision variables) and outputs (objectives)
inputs, output = self.transform.transform_inputs_outputs(
data,
categorical_method=self.categorical_method,
standardize_inputs=True,
standardize_outputs=True,
)
# Add column to inputs indicating task
task_data = data["task"].to_numpy()
task_data = np.atleast_2d(task_data).T
inputs_task = np.append(inputs.data_to_numpy(), task_data, axis=1).astype(
np.float
)
# Train model
model = botorch.models.MultiTaskGP(
torch.tensor(inputs_task).float(),
torch.tensor(output.data_to_numpy()).float(),
task_feature=-1,
output_tasks=[self.task],
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
botorch.fit.fit_gpytorch_model(mll)
# Create acquisition function
objective = self.domain.output_variables[0]
if objective.maximize:
fbest_scaled = data[data["task"] == self.task].max()[objective.name]
maximize = True
else:
fbest_scaled = data[data["task"] == self.task].min()[objective.name]
maximize = False
ei = CategoricalEI(self.domain, model, best_f=fbest_scaled, maximize=maximize)
# Optimize acquisitio function
results, acq_values = botorch.optim.optimize_acqf(
acq_function=ei,
bounds=self._get_bounds(),
num_restarts=20,
q=num_experiments,
raw_samples=100,
)
# Convert result to datset
result = DataSet(
results.detach().numpy(),
columns=inputs.data_columns,
)
# Untransform
result = self.transform.un_transform(
result, categorical_method=self.categorical_method, standardize_inputs=True
)
# Add metadata
result[("strategy", "METADATA")] = "MTBO"
result[("task", "METADATA")] = self.task
return result
def _get_bounds(self):
bounds = []
for v in self.domain.input_variables:
if isinstance(v, ContinuousVariable):
mean = self.transform.input_means[v.name]
std = self.transform.input_stds[v.name]
v_bounds = np.array(v.bounds)
v_bounds = (v_bounds - mean) / std
bounds.append(v_bounds)
elif (
isinstance(v, CategoricalVariable)
and self.categorical_method == "one-hot"
):
bounds += [[0, 1] for _ in v.levels]
return torch.tensor(bounds).T.float()
def reset(self):
"""Reset MTBO state"""
self.all_experiments = None
self.iterations = 0
self.fbest = (
float("inf") if self.domain.output_variables[0].maximize else -float("inf")
)
@staticmethod
def standardize(X):
mean, std = X.mean(), X.std()
std[std < 1e-5] = 1e-5
scaled = (X - mean.to_numpy()) / std.to_numpy()
return scaled.to_numpy(), mean, std
def to_dict(self):
ae = (
self.all_experiments.to_dict() if self.all_experiments is not None else None
)
strategy_params = dict(
all_experiments=ae,
categorical_method=self.categorical_method,
task=self.task,
)
return super().to_dict(**strategy_params)
class CategoricalEI(botorch.acquisition.ExpectedImprovement):
def __init__(
self,
domain: Domain,
model: Model,
best_f: Union[float, Tensor],
objective: Optional[ScalarizedObjective] = None,
maximize: bool = True,
) -> None:
super().__init__(model, best_f, objective, maximize)
self._domain = domain
def forward(self, X: Tensor) -> Tensor:
X = self.round_to_one_hot(X, self._domain)
return super().forward(X)
@staticmethod
def round_to_one_hot(X: Tensor, domain: Domain):
"""Round all categorical variables to a one-hot encoding"""
c = 0
for v in domain.input_variables:
if isinstance(v, CategoricalVariable):
n_levels = len(v.levels)
levels_selected = X[:, :, c : c + n_levels].argmax(axis=1)
X[:, :, c : c + n_levels] = 0
for j, l in enumerate(levels_selected):
X[j, :, l] = 1
c += n_levels
else:
c += 1
return X
class STBO(Strategy):
"""Multitask Bayesian Optimisation
This strategy enables pre-training a model with past reaction data
in order to enable faster optimisation.
Parameters
----------
domain : :class:`~summit.domain.Domain`
The domain of the optimization
transform : :class:`~summit.strategies.base.Transform`, optional
A transform object. By default no transformation will be done
on the input variables or objectives.
pretraining_data : :class:`~summit.utils.data.DataSet`
A DataSet with pretraining data. Must contain a metadata column named "task"
that specfies the task for all data.
task : int, optional
The index of the task being optimized. Defaults to 1.
categorical_method : str, optional
The method for transforming categorical variables. Either
"one-hot" or "descriptors". Descriptors must be included in the
categorical variables for the later.
Notes
-----
References
----------
.. [Swersky] K. Swersky et al., in `NIPS Proceedings <http://papers.neurips.cc/paper/5086-multi-task-bayesian-optimization>`_, 2013, pp. 2004–2012.
Examples
--------
>>> from summit.domain import Domain, ContinuousVariable
>>> from summit.strategies import NelderMead
>>> domain = Domain()
>>> domain += ContinuousVariable(name='temperature', description='reaction temperature in celsius', bounds=[0, 1])
>>> domain += ContinuousVariable(name='flowrate_a', description='flow of reactant a in mL/min', bounds=[0, 1])
>>> domain += ContinuousVariable(name='yield', description='relative conversion to xyz', bounds=[0,100], is_objective=True, maximize=True)
>>> strategy = NelderMead(domain)
>>> next_experiments = strategy.suggest_experiments()
>>> print(next_experiments)
NAME temperature flowrate_a strategy
TYPE DATA DATA METADATA
0 0.500 0.500 Nelder-Mead Simplex
1 0.625 0.500 Nelder-Mead Simplex
2 0.500 0.625 Nelder-Mead Simplex
"""
def __init__(
self,
domain: Domain,
transform: Transform = None,
categorical_method: str = "one-hot",
**kwargs
):
Strategy.__init__(self, domain, transform, **kwargs)
self.categorical_method = categorical_method
if self.categorical_method not in ["one-hot", "descriptors"]:
raise ValueError(
"categorical_method must be one of 'one-hot' or 'descriptors'."
)
self.reset()
def suggest_experiments(self, num_experiments, prev_res: DataSet = None, **kwargs):
# Suggest lhs initial design or append new experiments to previous experiments
if prev_res is None:
lhs = LHS(self.domain)
self.iterations += 1
k = num_experiments if num_experiments > 1 else 2
conditions = lhs.suggest_experiments(k)
return conditions
elif prev_res is not None and self.all_experiments is None:
self.all_experiments = prev_res
elif prev_res is not None and self.all_experiments is not None:
self.all_experiments = self.all_experiments.append(prev_res)
self.iterations += 1
data = self.all_experiments
# Get inputs (decision variables) and outputs (objectives)
inputs, output = self.transform.transform_inputs_outputs(
data,
categorical_method=self.categorical_method,
standardize_inputs=True,
standardize_outputs=True,
)
# Train model
model = botorch.models.SingleTaskGP(
torch.tensor(inputs.data_to_numpy()).float(),
torch.tensor(output.data_to_numpy()).float(),
)
mll = ExactMarginalLogLikelihood(model.likelihood, model)
botorch.fit.fit_gpytorch_model(mll)
# Create acquisition function
objective = self.domain.output_variables[0]
if objective.maximize:
fbest_scaled = data.max()[objective.name]
maximize = True
else:
fbest_scaled = data.min()[objective.name]
maximize = False
ei = CategoricalEI(self.domain, model, best_f=fbest_scaled, maximize=maximize)
# Optimize acquisition function
results, acq_values = botorch.optim.optimize_acqf(
acq_function=ei,
bounds=self._get_bounds(),
num_restarts=20,
q=num_experiments,
raw_samples=100,
)
# Convert result to datset
result = DataSet(
results.detach().numpy(),
columns=inputs.data_columns,
)
# Untransform
result = self.transform.un_transform(
result, categorical_method=self.categorical_method, standardize_inputs=True
)
# Add metadata
result[("strategy", "METADATA")] = "STBO"
return result
def _get_bounds(self):
bounds = []
for v in self.domain.input_variables:
if isinstance(v, ContinuousVariable):
mean = self.transform.input_means[v.name]
std = self.transform.input_stds[v.name]
v_bounds = np.array(v.bounds)
v_bounds = (v_bounds - mean) / std
bounds.append(v_bounds)
elif (
isinstance(v, CategoricalVariable)
and self.categorical_method == "one-hot"
):
bounds += [[0, 1] for _ in v.levels]
return torch.tensor(bounds).T.float()
def reset(self):
"""Reset MTBO state"""
self.all_experiments = None
self.iterations = 0
self.fbest = (
float("inf") if self.domain.output_variables[0].maximize else -float("inf")
)
@staticmethod
def standardize(X):
mean, std = X.mean(), X.std()
std[std < 1e-5] = 1e-5
scaled = (X - mean.to_numpy()) / std.to_numpy()
return scaled.to_numpy(), mean, std
|
{"hexsha": "3c3a88030c309bb846fdf059ed9cb55e92c1a42c", "size": 14772, "ext": "py", "lang": "Python", "max_stars_repo_path": "summit/strategies/multitask.py", "max_stars_repo_name": "jezsadler/summit", "max_stars_repo_head_hexsha": "982de7f6424bb94da2084d4d84396b4b2673eeca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "summit/strategies/multitask.py", "max_issues_repo_name": "jezsadler/summit", "max_issues_repo_head_hexsha": "982de7f6424bb94da2084d4d84396b4b2673eeca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "summit/strategies/multitask.py", "max_forks_repo_name": "jezsadler/summit", "max_forks_repo_head_hexsha": "982de7f6424bb94da2084d4d84396b4b2673eeca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3842364532, "max_line_length": 151, "alphanum_fraction": 0.6108177633, "include": true, "reason": "import numpy", "num_tokens": 3294}
|
# -*- coding: UTF-8 -*-
from typing import Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
import numpy as np
from .base import TK2Conv2D, TK2Linear
class TK2LeNet5(nn.Module):
def __init__(self, num_classes: int, rs: Union[list, np.ndarray]):
"""LeNet-5 based on the Tucker-2.
Parameters
----------
num_classes : int
The number of classes
rs : Union[list, numpy.ndarray]
The ranks of network.
"""
super(TK2LeNet5, self).__init__()
assert len(rs) == 4, "The length of the rank should be 4."
self.c1 = TK2Conv2D(1, 20, [rs[0], rs[0]], 5, padding=2)
self.s2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.c3 = TK2Conv2D(20, 50, [rs[1], rs[1]], 5)
self.s4 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)
self.fc5 = TK2Linear([5, 10, 25], 320, [rs[2], rs[2]])
self.fc6 = TK2Linear([320], num_classes, [rs[3]])
def forward(self, inputs: Tensor) -> Tensor:
"""forwarding method.
Parameters
----------
inputs : torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times C \\times H \\times W}`
Returns
-------
torch.Tensor
tensor :math:`\in \mathbb{R}^{b \\times num\_classes}`
"""
out = self.c1(inputs)
out = F.relu(out)
out = self.s2(out)
out = self.c3(out)
out = F.relu(out)
out = self.s4(out)
out = out.view(inputs.size(0), -1)
out = self.fc5(out)
out = F.relu(out)
out = self.fc6(out)
return out
|
{"hexsha": "7edf1338dff95b2d18b0b6b1304560851e4255dd", "size": 1687, "ext": "py", "lang": "Python", "max_stars_repo_path": "tednet/tnn/tucker2/tk2_lenet.py", "max_stars_repo_name": "perryuu/tednet", "max_stars_repo_head_hexsha": "9e05d7df3b690518921b3d8c2289ab638dbc221c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2021-01-05T16:39:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T11:33:58.000Z", "max_issues_repo_path": "tednet/tnn/tucker2/tk2_lenet.py", "max_issues_repo_name": "perryuu/tednet", "max_issues_repo_head_hexsha": "9e05d7df3b690518921b3d8c2289ab638dbc221c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-13T04:59:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-13T06:58:06.000Z", "max_forks_repo_path": "tednet/tnn/tucker2/tk2_lenet.py", "max_forks_repo_name": "perryuu/tednet", "max_forks_repo_head_hexsha": "9e05d7df3b690518921b3d8c2289ab638dbc221c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-04-20T15:34:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T10:45:20.000Z", "avg_line_length": 25.5606060606, "max_line_length": 79, "alphanum_fraction": 0.5323058684, "include": true, "reason": "import numpy", "num_tokens": 487}
|
(*
Theory: PDF_Compiler.thy
Authors: Manuel Eberl
The concrete compiler that compiles a PDF expression into a target language expression
that describes a density function on the corresponding measure space.
*)
section \<open>Concrete PDF Compiler\<close>
theory PDF_Compiler
imports PDF_Compiler_Pred PDF_Target_Density_Contexts
begin
inductive expr_has_density_cexpr :: "cdens_ctxt \<Rightarrow> expr \<Rightarrow> cexpr \<Rightarrow> bool"
("(1_/ \<turnstile>\<^sub>c/ (_ \<Rightarrow>/ _))" [50,0,50] 50) where
(* edc_equiv: "cexpr_equiv f1 f2 \<Longrightarrow> \<Gamma> \<turnstile> e : t \<Longrightarrow> is_density_expr (vs,vs',\<Gamma>,\<delta>) t f2 \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f1 \<Longrightarrow> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f2"*)
edc_val: "countable_type (val_type v) \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Val v \<Rightarrow>
map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>)) *\<^sub>c \<langle>CVar 0 =\<^sub>c CVal v\<rangle>\<^sub>c"
| edc_var: "x \<in> set vs \<Longrightarrow> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Var x \<Rightarrow> marg_dens_cexpr \<Gamma> vs x \<delta>"
| edc_pair: "x \<in> set vs \<Longrightarrow> y \<in> set vs \<Longrightarrow> x \<noteq> y \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c <Var x, Var y> \<Rightarrow> marg_dens2_cexpr \<Gamma> vs x y \<delta>"
| edc_fail: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Fail t \<Rightarrow> CReal 0"
| edc_let: "([], vs @ vs', \<Gamma>, CReal 1) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow>
(shift_vars vs, map Suc vs', the (expr_type \<Gamma> e) \<cdot> \<Gamma>,
map_vars Suc \<delta> *\<^sub>c f) \<turnstile>\<^sub>c e' \<Rightarrow> g \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c LET e IN e' \<Rightarrow> map_vars (\<lambda>x. x - 1) g"
| edc_rand: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Random dst e \<Rightarrow>
\<integral>\<^sub>c map_vars (case_nat 0 (\<lambda>x. x + 2)) f *\<^sub>c
dist_dens_cexpr dst (CVar 0) (CVar 1) \<partial>dist_param_type dst"
| edc_rand_det: "randomfree e \<Longrightarrow> free_vars e \<subseteq> set vs' \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Random dst e \<Rightarrow>
map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>)) *\<^sub>c
dist_dens_cexpr dst (map_vars Suc (expr_rf_to_cexpr e)) (CVar 0)"
| edc_if_det: "randomfree b \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta> *\<^sub>c \<langle>expr_rf_to_cexpr b\<rangle>\<^sub>c) \<turnstile>\<^sub>c e1 \<Rightarrow> f1 \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta> *\<^sub>c \<langle>\<not>\<^sub>c expr_rf_to_cexpr b\<rangle>\<^sub>c) \<turnstile>\<^sub>c e2 \<Rightarrow> f2 \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c IF b THEN e1 ELSE e2 \<Rightarrow> f1 +\<^sub>c f2"
| edc_if: "([], vs @ vs', \<Gamma>, CReal 1) \<turnstile>\<^sub>c b \<Rightarrow> f \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta> *\<^sub>c cexpr_subst_val f TRUE) \<turnstile>\<^sub>c e1 \<Rightarrow> g1 \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta> *\<^sub>c cexpr_subst_val f FALSE) \<turnstile>\<^sub>c e2 \<Rightarrow> g2 \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c IF b THEN e1 ELSE e2 \<Rightarrow> g1 +\<^sub>c g2"
| edc_op_discr: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow> \<Gamma> \<turnstile> e : t \<Longrightarrow>
op_type oper t = Some t' \<Longrightarrow> countable_type t' \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c oper $$ e \<Rightarrow>
\<integral>\<^sub>c \<langle>(oper $$\<^sub>c (CVar 0)) =\<^sub>c CVar 1\<rangle>\<^sub>c *\<^sub>c map_vars (case_nat 0 (\<lambda>x. x+2)) f \<partial>t"
| edc_fst: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow> \<Gamma> \<turnstile> e : PRODUCT t t' \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Fst $$ e \<Rightarrow>
\<integral>\<^sub>c (map_vars (case_nat 0 (\<lambda>x. x + 2)) f \<circ>\<^sub>c <CVar 1, CVar 0>\<^sub>c) \<partial>t'"
| edc_snd: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow> \<Gamma> \<turnstile> e : PRODUCT t t' \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Snd $$ e \<Rightarrow>
\<integral>\<^sub>c (map_vars (case_nat 0 (\<lambda>x. x + 2)) f \<circ>\<^sub>c <CVar 0, CVar 1>\<^sub>c) \<partial>t"
| edc_neg: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Minus $$ e \<Rightarrow> f \<circ>\<^sub>c (\<lambda>\<^sub>cx. -\<^sub>c x)"
| edc_addc: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow> randomfree e' \<Longrightarrow> free_vars e' \<subseteq> set vs' \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Add $$ <e, e'> \<Rightarrow>
f \<circ>\<^sub>c (\<lambda>\<^sub>cx. x -\<^sub>c map_vars Suc (expr_rf_to_cexpr e'))"
| edc_multc: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow> c \<noteq> 0 \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Mult $$ <e, Val (RealVal c)> \<Rightarrow>
(f \<circ>\<^sub>c (\<lambda>\<^sub>cx. x *\<^sub>c CReal (inverse c))) *\<^sub>c CReal (inverse (abs c))"
| edc_add: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow> \<Gamma> \<turnstile> e : PRODUCT t t \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Add $$ e \<Rightarrow>
\<integral>\<^sub>c (map_vars (case_nat 0 (\<lambda>x. x+2)) f \<circ>\<^sub>c (\<lambda>\<^sub>cx. <x, CVar 1 -\<^sub>c x>\<^sub>c)) \<partial>t"
| edc_inv: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Inverse $$ e \<Rightarrow>
(f \<circ>\<^sub>c (\<lambda>\<^sub>cx. inverse\<^sub>c x)) *\<^sub>c (\<lambda>\<^sub>cx. (inverse\<^sub>c x) ^\<^sub>c CInt 2)"
| edc_exp: "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow>
(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c Exp $$ e \<Rightarrow>
(\<lambda>\<^sub>cx. IF\<^sub>c CReal 0 <\<^sub>c x THEN (f \<circ>\<^sub>c ln\<^sub>c x) *\<^sub>c inverse\<^sub>c x ELSE CReal 0)"
code_pred expr_has_density_cexpr .
text \<open>Auxiliary lemmas\<close>
lemma cdens_ctxt_invar_insert:
assumes inv: "cdens_ctxt_invar vs vs' \<Gamma> \<delta>"
assumes t : "\<Gamma> \<turnstile> e : t'"
assumes free_vars: "free_vars e \<subseteq> set vs \<union> set vs'"
assumes hd: "dens_ctxt_\<alpha> ([], vs @ vs', \<Gamma>, CReal 1) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))"
notes invar = cdens_ctxt_invarD[OF inv]
assumes wf1: "is_density_expr ([], vs @ vs', \<Gamma>, CReal 1) t' f"
shows "cdens_ctxt_invar (shift_vars vs) (map Suc vs') (t' \<cdot> \<Gamma>) (map_vars Suc \<delta> *\<^sub>c f)"
proof (intro cdens_ctxt_invarI)
show t': "case_nat t' \<Gamma> \<turnstile>\<^sub>c map_vars Suc \<delta> *\<^sub>c f : REAL" using invar wf1
by (intro cet_op[where t = "PRODUCT REAL REAL"])
(auto intro!: cexpr_typing.intros cexpr_typing_map_vars simp: o_def dest: is_density_exprD)
let ?vs = "shift_var_set (set vs)" and ?vs' = "Suc ` set vs'" and ?\<Gamma> = "case_nat t' \<Gamma>" and
?\<delta> = "insert_dens (set vs) (set vs') (\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> x))
(\<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>)))"
interpret density_context "set vs" "set vs'" \<Gamma> "\<lambda>\<sigma>. extract_real (cexpr_sem \<sigma> \<delta>)"
by (rule density_context_\<alpha>[OF inv])
have dc: "density_context {} (set vs \<union> set vs') \<Gamma> (\<lambda>_. 1)"
by (rule density_context_empty)
hence dens: "has_parametrized_subprob_density (state_measure (set vs \<union> set vs') \<Gamma>)
(\<lambda>\<rho>. dens_ctxt_measure ({}, set vs \<union> set vs', \<Gamma>, \<lambda>_. 1) \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> e))
(stock_measure t') (\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> x))"
using hd free_vars by (intro expr_has_density_sound_aux[OF _ t dc])
(auto simp: shift_var_set_def dens_ctxt_\<alpha>_def simp: extract_real_def one_ennreal_def)
from density_context.density_context_insert[OF density_context_\<alpha>[OF inv] this]
have "density_context ?vs ?vs' ?\<Gamma> ?\<delta>" .
have dc: "density_context (shift_var_set (set vs)) (Suc ` set vs') (case_nat t' \<Gamma>)
(\<lambda>\<sigma>. extract_real (cexpr_sem \<sigma> (map_vars Suc \<delta> *\<^sub>c f)))"
proof (rule density_context_equiv)
show "density_context (shift_var_set (set vs)) (Suc ` set vs') (case_nat t' \<Gamma>) ?\<delta>" by fact
show "(\<lambda>x. ennreal (extract_real (cexpr_sem x (map_vars Suc \<delta> *\<^sub>c f))))
\<in> borel_measurable (state_measure (?vs \<union> ?vs') ?\<Gamma>)"
apply (rule measurable_compose[OF _ measurable_ennreal], rule measurable_compose[OF _ measurable_extract_real])
apply (rule measurable_cexpr_sem[OF t'])
apply (insert invar is_density_exprD[OF wf1], auto simp: shift_var_set_def)
done
next
fix \<sigma> assume \<sigma>: "\<sigma> \<in> space (state_measure (?vs \<union> ?vs') ?\<Gamma>)"
have [simp]: "case_nat (\<sigma> 0) (\<lambda>x. \<sigma> (Suc x)) = \<sigma>" by (intro ext) (simp split: nat.split)
from \<sigma> show "insert_dens (set vs) (set vs') (\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> x))
(\<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))) \<sigma> =
ennreal (extract_real (cexpr_sem \<sigma> (map_vars Suc \<delta> *\<^sub>c f)))"
unfolding insert_dens_def using invar is_density_exprD[OF wf1]
apply (subst ennreal_mult'[symmetric])
apply (erule nonneg_cexprD)
apply (rule measurable_space[OF measurable_remove_var[where t=t']])
apply simp
apply (subst cexpr_sem_Mult[of ?\<Gamma> _ _ _ "?vs \<union> ?vs'"])
apply (auto intro!: cexpr_typing_map_vars ennreal_mult'[symmetric]
simp: o_def shift_var_set_def eval_cexpr_def
cexpr_sem_map_vars remove_var_def)
done
qed
from subprob_imp_subprob_cexpr[OF this]
show "subprob_cexpr (set (shift_vars vs)) (set (map Suc vs')) (case_nat t' \<Gamma>)
(map_vars Suc \<delta> *\<^sub>c f)" by simp
have "Suc -` shift_var_set (set vs \<union> set vs') = set vs \<union> set vs'"
by (auto simp: shift_var_set_def)
moreover have "nonneg_cexpr (shift_var_set (set vs \<union> set vs')) (case_nat t' \<Gamma>) f"
using wf1[THEN is_density_exprD_nonneg] by simp
ultimately show "nonneg_cexpr (set (shift_vars vs) \<union> set (map Suc vs')) (case_nat t' \<Gamma>) (map_vars Suc \<delta> *\<^sub>c f)"
using invar is_density_exprD[OF wf1]
by (intro nonneg_cexpr_Mult)
(auto intro!: cexpr_typing_map_vars nonneg_cexpr_map_vars
simp: o_def shift_var_set_def image_Un)
qed (insert invar is_density_exprD[OF wf1],
auto simp: shift_vars_def shift_var_set_def distinct_map intro!: cexpr_typing_map_vars)
lemma cdens_ctxt_invar_insert_bool:
assumes dens: "dens_ctxt_\<alpha> ([], vs @ vs', \<Gamma>, CReal 1) \<turnstile>\<^sub>d b \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))"
assumes wf: "is_density_expr ([], vs @ vs', \<Gamma>, CReal 1) BOOL f"
assumes t: "\<Gamma> \<turnstile> b : BOOL" and vars: "free_vars b \<subseteq> set vs \<union> set vs'"
assumes invar: "cdens_ctxt_invar vs vs' \<Gamma> \<delta>"
shows "cdens_ctxt_invar vs vs' \<Gamma> (\<delta> *\<^sub>c cexpr_subst_val f (BoolVal v))"
proof (intro cdens_ctxt_invarI nonneg_cexpr_Mult nonneg_cexpr_subst_val)
note invar' = cdens_ctxt_invarD[OF invar] and wf' = is_density_exprD[OF wf]
show "\<Gamma> \<turnstile>\<^sub>c \<delta> *\<^sub>c cexpr_subst_val f (BoolVal v) : REAL" using invar' wf'
by (intro cet_op[where t = "PRODUCT REAL REAL"] cet_pair cexpr_typing_subst_val) simp_all
let ?M = "\<lambda>\<rho>. dens_ctxt_measure ({}, set vs \<union> set vs', \<Gamma>, \<lambda>_. 1) \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> b)"
have dens': "has_parametrized_subprob_density (state_measure (set vs \<union> set vs') \<Gamma>) ?M
(stock_measure BOOL) (\<lambda>\<sigma> v. ennreal (eval_cexpr f \<sigma> v))"
using density_context_\<alpha>[OF invar] t vars dens unfolding dens_ctxt_\<alpha>_def
by (intro expr_has_density_sound_aux density_context.density_context_empty)
(auto simp: extract_real_def one_ennreal_def)
thus nonneg: "nonneg_cexpr (shift_var_set (set vs \<union> set vs')) (case_nat BOOL \<Gamma>) f"
using wf[THEN is_density_exprD_nonneg] by simp
show "subprob_cexpr (set vs) (set vs') \<Gamma> (\<delta> *\<^sub>c cexpr_subst_val f (BoolVal v))"
proof (intro subprob_cexprI)
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
let ?eval = "\<lambda>e \<sigma>. extract_real (cexpr_sem (merge (set vs) (set vs') (\<sigma>, \<rho>)) e)"
{
fix \<sigma> assume \<sigma> : "\<sigma> \<in> space (state_measure (set vs) \<Gamma>)"
have A: "?eval (\<delta> *\<^sub>c cexpr_subst_val f (BoolVal v)) \<sigma> =
?eval \<delta> \<sigma> * ?eval (cexpr_subst_val f (BoolVal v)) \<sigma>" using wf' invar' \<sigma> \<rho>
by (subst cexpr_sem_Mult[where \<Gamma> = \<Gamma> and V = "set vs \<union> set vs'"])
(auto intro: merge_in_state_measure simp: shift_var_set_def)
have "?eval \<delta> \<sigma> \<ge> 0" using \<sigma> \<rho> invar'
by (blast dest: nonneg_cexprD intro: merge_in_state_measure)
moreover have "?eval (cexpr_subst_val f (BoolVal v)) \<sigma> \<ge> 0" using \<sigma> \<rho> nonneg
by (intro nonneg_cexprD nonneg_cexpr_subst_val) (auto intro: merge_in_state_measure)
moreover have B: "ennreal (?eval (cexpr_subst_val f (BoolVal v)) \<sigma>) =
ennreal (eval_cexpr f (merge (set vs) (set vs') (\<sigma>, \<rho>)) (BoolVal v))"
(is "_ = ?f (BoolVal v)") by (simp add: eval_cexpr_def)
hence "ennreal (?eval (cexpr_subst_val f (BoolVal v)) \<sigma>) \<le> 1"
using \<sigma> \<rho> dens' unfolding has_parametrized_subprob_density_def
by (subst B, intro subprob_count_space_density_le_1[of _ _ ?f])
(auto intro: merge_in_state_measure simp: stock_measure.simps)
ultimately have "?eval (\<delta> *\<^sub>c cexpr_subst_val f (BoolVal v)) \<sigma> \<le> ?eval \<delta> \<sigma>"
by (subst A, intro mult_right_le_one_le) simp_all
}
hence "(\<integral>\<^sup>+\<sigma>. ?eval (\<delta> *\<^sub>c cexpr_subst_val f (BoolVal v)) \<sigma> \<partial>state_measure (set vs) \<Gamma>) \<le>
(\<integral>\<^sup>+\<sigma>. ?eval \<delta> \<sigma> \<partial>state_measure (set vs) \<Gamma>)" by (intro nn_integral_mono) (simp add: ennreal_leI)
also have "... \<le> 1" using invar' \<rho> by (intro subprob_cexprD)
finally show "(\<integral>\<^sup>+\<sigma>. ?eval (\<delta> *\<^sub>c cexpr_subst_val f (BoolVal v)) \<sigma> \<partial>state_measure (set vs) \<Gamma>) \<le> 1" .
qed
qed (insert cdens_ctxt_invarD[OF invar] is_density_exprD[OF wf],
auto simp: shift_var_set_def)
lemma space_state_measureD_shift:
"\<sigma> \<in> space (state_measure (shift_var_set V) (case_nat t \<Gamma>)) \<Longrightarrow>
\<exists>x \<sigma>'. x \<in> type_universe t \<and> \<sigma>' \<in> space (state_measure V \<Gamma>) \<and> \<sigma> = case_nat x \<sigma>' "
by (intro exI[of _ "\<sigma> 0"] exI[of _ "\<sigma> \<circ> Suc"])
(auto simp: fun_eq_iff PiE_iff space_state_measure extensional_def split: nat.split)
lemma space_state_measure_shift_iff:
"\<sigma> \<in> space (state_measure (shift_var_set V) (case_nat t \<Gamma>)) \<longleftrightarrow>
(\<exists>x \<sigma>'. x \<in> type_universe t \<and> \<sigma>' \<in> space (state_measure V \<Gamma>) \<and> \<sigma> = case_nat x \<sigma>')"
by (auto dest!: space_state_measureD_shift)
lemma nonneg_cexprI_shift:
assumes "\<And>x \<sigma>. x \<in> type_universe t \<Longrightarrow> \<sigma> \<in> space (state_measure V \<Gamma>) \<Longrightarrow>
0 \<le> extract_real (cexpr_sem (case_nat x \<sigma>) e)"
shows "nonneg_cexpr (shift_var_set V) (case_nat t \<Gamma>) e"
by (auto intro!: nonneg_cexprI assms dest!: space_state_measureD_shift)
lemma nonneg_cexpr_shift_iff:
"nonneg_cexpr (shift_var_set V) (case_nat t \<Gamma>) (map_vars Suc e) \<longleftrightarrow> nonneg_cexpr V \<Gamma> e"
apply (auto simp: cexpr_sem_map_vars o_def nonneg_cexpr_def space_state_measure_shift_iff)
subgoal for \<sigma>
apply (drule bspec[of _ _ "case_nat (SOME x. x \<in> type_universe t) \<sigma>"])
using type_universe_nonempty[of t]
unfolding ex_in_conv[symmetric]
apply (auto intro!: case_nat_in_state_measure intro: someI)
done
done
lemma case_nat_case_nat: "case_nat x n (case_nat y m i) = case_nat (case_nat x n y) (\<lambda>i'. case_nat x n (m i')) i"
by (rule nat.case_distrib)
lemma nonneg_cexpr_shift_iff2:
"nonneg_cexpr (shift_var_set (shift_var_set V))
(case_nat t1 (case_nat t2 \<Gamma>)) (map_vars (case_nat 0 (\<lambda>x. Suc (Suc x))) e) \<longleftrightarrow>
nonneg_cexpr (shift_var_set V) (case_nat t1 \<Gamma>) e"
apply (auto simp: cexpr_sem_map_vars o_def nonneg_cexpr_def space_state_measure_shift_iff)
subgoal for x \<sigma>
apply (drule bspec[of _ _ "case_nat x (case_nat (SOME x. x \<in> type_universe t2) \<sigma>)"])
using type_universe_nonempty[of t2]
unfolding ex_in_conv[symmetric]
apply (auto simp: case_nat_case_nat cong: nat.case_cong
intro!: case_nat_in_state_measure intro: someI_ex someI)
done
apply (erule bspec)
subgoal for x1 x2 \<sigma>
by (auto simp add: space_state_measure_shift_iff fun_eq_iff split: nat.split
intro!: exI[of _ x1] exI[of _ \<sigma>])
done
lemma nonneg_cexpr_Add:
assumes "\<Gamma> \<turnstile>\<^sub>c e1 : REAL" "\<Gamma> \<turnstile>\<^sub>c e2 : REAL"
assumes "free_vars e1 \<subseteq> V" "free_vars e2 \<subseteq> V"
assumes N1: "nonneg_cexpr V \<Gamma> e1" and N2: "nonneg_cexpr V \<Gamma> e2"
shows "nonneg_cexpr V \<Gamma> (e1 +\<^sub>c e2)"
proof (rule nonneg_cexprI)
fix \<sigma> assume \<sigma>: "\<sigma> \<in> space (state_measure V \<Gamma>)"
hence "extract_real (cexpr_sem \<sigma> (e1 +\<^sub>c e2)) = extract_real (cexpr_sem \<sigma> e1) + extract_real (cexpr_sem \<sigma> e2)"
using assms by (subst cexpr_sem_Add[of \<Gamma> _ _ _ V]) simp_all
also have "... \<ge> 0" using \<sigma> N1 N2 by (intro add_nonneg_nonneg nonneg_cexprD)
finally show "extract_real (cexpr_sem \<sigma> (e1 +\<^sub>c e2)) \<ge> 0" .
qed
lemma expr_has_density_cexpr_sound_aux:
assumes "\<Gamma> \<turnstile> e : t" "(vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>c e \<Rightarrow> f" "cdens_ctxt_invar vs vs' \<Gamma> \<delta>"
"free_vars e \<subseteq> set vs \<union> set vs'"
shows "dens_ctxt_\<alpha> (vs,vs',\<Gamma>,\<delta>) \<turnstile>\<^sub>d e \<Rightarrow> eval_cexpr f \<and> is_density_expr (vs,vs',\<Gamma>,\<delta>) t f"
using assms(2,1,3,4)
proof (induction arbitrary: t rule: expr_has_density_cexpr.induct[split_format (complete)])
(* case (edc_equiv f1 f2 \<Gamma> e t vs vs' \<delta> t')
hence [simp]: "t' = t" by (auto intro!: expr_typing_unique)
from edc_equiv have "(\<lambda>\<rho> x. ennreal (eval_cexpr f1 \<rho> x)) = (\<lambda>\<rho> x. ennreal (eval_cexpr f2 \<rho> x))"
unfolding cexpr_equiv_def eval_cexpr_def by (intro ext) auto
with edc_equiv show ?case unfolding dens_ctxt_\<alpha>_def by auto
next*)
case (edc_val v vs vs' \<Gamma> \<delta>)
from edc_val.prems have [simp]: "t = val_type v" by auto
note invar = cdens_ctxt_invarD[OF edc_val.prems(2)]
let ?e1 = "map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>))" and ?e2 = "\<langle>CVar 0 =\<^sub>c CVal v\<rangle>\<^sub>c"
have ctype1: "case_nat t \<Gamma> \<turnstile>\<^sub>c ?e1 : REAL" and ctype2: "case_nat t \<Gamma> \<turnstile>\<^sub>c ?e2: REAL" using invar
by (auto intro!: cexpr_typing.intros cexpr_typing_map_vars simp: o_def)
hence ctype: "case_nat t \<Gamma> \<turnstile>\<^sub>c ?e1 *\<^sub>c ?e2 : REAL" by (auto intro!: cexpr_typing.intros)
{
fix \<rho> x assume x: "x \<in> type_universe (val_type v)"
and \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
hence "case_nat x \<rho> \<in> space (state_measure (shift_var_set (set vs')) (case_nat (val_type v) \<Gamma>))"
by (rule case_nat_in_state_measure)
hence "ennreal (eval_cexpr (?e1 *\<^sub>c ?e2) \<rho> x) =
ennreal (extract_real (cexpr_sem (case_nat x \<rho>)
(map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>))))) *
ennreal (extract_real (RealVal (bool_to_real (x = v))))" (is "_ = ?a * ?b")
using invar unfolding eval_cexpr_def
apply (subst ennreal_mult''[symmetric])
apply (simp add: bool_to_real_def)
apply (subst cexpr_sem_Mult[of "case_nat t \<Gamma>" _ _ _ "shift_var_set (set vs')"])
apply (insert invar ctype1 ctype2)
apply (auto simp: shift_var_set_def)
done
also have "?a = branch_prob (dens_ctxt_\<alpha> (vs,vs',\<Gamma>,\<delta>)) \<rho>"
by (subst cexpr_sem_map_vars, subst cexpr_sem_branch_prob) (simp_all add: o_def \<rho> edc_val.prems)
also have "?b = indicator {v} x"
by (simp add: extract_real_def bool_to_real_def split: split_indicator)
finally have "ennreal (eval_cexpr (?e1 *\<^sub>c ?e2) \<rho> x) =
branch_prob (dens_ctxt_\<alpha> (vs,vs',\<Gamma>,\<delta>)) \<rho> * indicator {v} x" .
} note e = this
have meas: "(\<lambda>(\<sigma>, x). ennreal (eval_cexpr (?e1 *\<^sub>c ?e2) \<sigma> x))
\<in> borel_measurable (state_measure (set vs') \<Gamma> \<Otimes>\<^sub>M stock_measure (val_type v))"
apply (subst measurable_split_conv, rule measurable_compose[OF _ measurable_ennreal])
apply (subst measurable_split_conv[symmetric], rule measurable_eval_cexpr)
apply (insert ctype invar, auto simp: shift_var_set_def)
done
have *: "Suc -` shift_var_set (set vs') = set vs'" "case_nat (val_type v) \<Gamma> \<circ> Suc = \<Gamma>"
by (auto simp: shift_var_set_def)
have nn: "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>)
(map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>)) *\<^sub>c \<langle>CVar 0 =\<^sub>c CVal v\<rangle>\<^sub>c)"
using invar ctype1 ctype2
by (fastforce intro!: nonneg_cexpr_Mult nonneg_indicator nonneg_cexpr_map_vars
cexpr_typing.intros nonneg_cexpr_sem_integrate_vars'
simp: branch_prob_cexpr_def *)
show ?case unfolding dens_ctxt_\<alpha>_def
apply (simp only: prod.case, intro conjI)
apply (rule hd_AE[OF hd_val et_val AE_I2])
apply (insert edc_val, simp_all add: e dens_ctxt_\<alpha>_def meas) [4]
apply (intro is_density_exprI)
using ctype
apply simp
apply (insert invar nn, auto simp: shift_var_set_def)
done
next
case (edc_var x vs vs' \<Gamma> \<delta> t)
hence t: "t = \<Gamma> x" by auto
note invar = cdens_ctxt_invarD[OF edc_var.prems(2)]
from invar have ctype: "case_nat t \<Gamma> \<turnstile>\<^sub>c marg_dens_cexpr \<Gamma> vs x \<delta> : REAL" by (auto simp: t)
show ?case unfolding dens_ctxt_\<alpha>_def
proof (simp only: prod.case, intro conjI is_density_exprI, rule hd_AE[OF hd_var edc_var.prems(1)])
show "case_nat t \<Gamma> \<turnstile>\<^sub>c marg_dens_cexpr \<Gamma> vs x \<delta> : REAL" by fact
next
show "free_vars (marg_dens_cexpr \<Gamma> vs x \<delta>) \<subseteq> shift_var_set (set vs')"
using edc_var.prems(2) by (rule free_vars_marg_dens_cexpr)
next
have free_vars: "free_vars (marg_dens_cexpr \<Gamma> vs x \<delta>) \<subseteq> shift_var_set (set vs')"
using edc_var.prems(2) by (rule free_vars_marg_dens_cexpr)
show "(\<lambda>(\<rho>, y). ennreal (eval_cexpr (marg_dens_cexpr \<Gamma> vs x \<delta>) \<rho> y))
\<in> borel_measurable (state_measure (set vs') \<Gamma> \<Otimes>\<^sub>M stock_measure t)"
apply (subst measurable_split_conv, rule measurable_compose[OF _ measurable_ennreal])
apply (subst measurable_split_conv[symmetric], rule measurable_eval_cexpr)
apply (insert ctype free_vars, auto simp: shift_var_set_def)
done
next
fix \<rho> assume "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
hence "AE y in stock_measure t.
marg_dens (dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>)) x \<rho> y =
ennreal (eval_cexpr (marg_dens_cexpr \<Gamma> vs x \<delta>) \<rho> y)"
using edc_var unfolding eval_cexpr_def by (subst t, subst eq_commute, intro cexpr_sem_marg_dens)
thus "AE y in stock_measure t.
marg_dens (set vs, set vs', \<Gamma>, \<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))) x \<rho> y =
ennreal (eval_cexpr (marg_dens_cexpr \<Gamma> vs x \<delta>) \<rho> y)"
by (simp add: dens_ctxt_\<alpha>_def)
next
show "x \<in> set vs"
by (insert edc_var.prems edc_var.hyps, auto simp: eval_cexpr_def intro!: nonneg_cexpr_sem_marg_dens)
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) (marg_dens_cexpr \<Gamma> vs x \<delta>)"
by (intro nonneg_cexprI_shift nonneg_cexpr_sem_marg_dens[OF edc_var.prems(2) \<open>x \<in> set vs\<close>])
(auto simp: t)
qed
next
case (edc_pair x vs y vs' \<Gamma> \<delta> t)
hence t[simp]: "t = PRODUCT (\<Gamma> x) (\<Gamma> y)" by auto
note invar = cdens_ctxt_invarD[OF edc_pair.prems(2)]
from invar have ctype: "case_nat t \<Gamma> \<turnstile>\<^sub>c marg_dens2_cexpr \<Gamma> vs x y \<delta> : REAL" by auto
from edc_pair.prems have vars: "free_vars (marg_dens2_cexpr \<Gamma> vs x y \<delta>) \<subseteq> shift_var_set (set vs')"
using free_vars_marg_dens2_cexpr by simp
show ?case unfolding dens_ctxt_\<alpha>_def
proof (simp only: prod.case, intro conjI is_density_exprI, rule hd_AE[OF hd_pair edc_pair.prems(1)])
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
show "AE z in stock_measure t.
marg_dens2 (set vs, set vs', \<Gamma>, \<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))) x y \<rho> z =
ennreal (eval_cexpr (marg_dens2_cexpr \<Gamma> vs x y \<delta>) \<rho> z)"
using cexpr_sem_marg_dens2[OF edc_pair.prems(2) edc_pair.hyps \<rho>] unfolding eval_cexpr_def
by (subst t, subst eq_commute) (simp add: dens_ctxt_\<alpha>_def)
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) (marg_dens2_cexpr \<Gamma> vs x y \<delta>)"
by (intro nonneg_cexprI_shift nonneg_cexpr_sem_marg_dens2[OF edc_pair.prems(2) \<open>x \<in> set vs\<close> \<open>y\<in>set vs\<close>])
auto
qed (insert edc_pair invar ctype vars, auto simp: dens_ctxt_\<alpha>_def)
next
case (edc_fail vs vs' \<Gamma> \<delta> t t')
hence [simp]: "t = t'" by auto
have ctype: "case_nat t' \<Gamma> \<turnstile>\<^sub>c CReal 0 : REAL"
by (subst val_type.simps[symmetric]) (rule cexpr_typing.intros)
thus ?case by (auto simp: dens_ctxt_\<alpha>_def eval_cexpr_def extract_real_def
zero_ennreal_def[symmetric] hd_fail
intro!: is_density_exprI nonneg_cexprI)
next
case (edc_let vs vs' \<Gamma> e f \<delta> e' g t)
then obtain t' where t1: "\<Gamma> \<turnstile> e : t'" and t2: "case_nat t' \<Gamma> \<turnstile> e' : t" by auto
note invar = cdens_ctxt_invarD[OF edc_let.prems(2)]
from t1 have t1': "the (expr_type \<Gamma> e) = t'" by (auto simp: expr_type_Some_iff[symmetric])
have dens1: "dens_ctxt_\<alpha> ([], vs @ vs', \<Gamma>, CReal 1) \<turnstile>\<^sub>d e \<Rightarrow>
(\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf1: "is_density_expr ([], vs @ vs', \<Gamma>, CReal 1) t' f"
using edc_let.IH(1)[OF t1] edc_let.prems by (auto dest: cdens_ctxt_invar_empty)
have invf: "cdens_ctxt_invar (shift_vars vs) (map Suc vs') (case_nat t' \<Gamma>) (map_vars Suc \<delta> *\<^sub>c f)"
using edc_let.prems edc_let.hyps dens1 wf1 invar
by (intro cdens_ctxt_invar_insert[OF _ t1]) (auto simp: dens_ctxt_\<alpha>_def)
let ?\<Y> = "(shift_vars vs, map Suc vs', case_nat t' \<Gamma>, map_vars Suc \<delta> *\<^sub>c f)"
have "set (shift_vars vs) \<union> set (map Suc vs') = shift_var_set (set vs \<union> set vs')"
by (simp add: shift_var_set_def image_Un)
hence "dens_ctxt_\<alpha> (shift_vars vs, map Suc vs', case_nat t' \<Gamma>, map_vars Suc \<delta> *\<^sub>c f) \<turnstile>\<^sub>d
e' \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr g x xa)) \<and>
is_density_expr (shift_vars vs, map Suc vs', case_nat t' \<Gamma>, map_vars Suc \<delta> *\<^sub>c f) t g"
using invf t2 edc_let.prems subset_shift_var_set
by (simp only: t1'[symmetric], intro edc_let.IH(2)) simp_all
hence dens2: "dens_ctxt_\<alpha> ?\<Y> \<turnstile>\<^sub>d e' \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr g x xa))" and
wf2: "is_density_expr (shift_vars vs, map Suc vs', case_nat t' \<Gamma>, map_vars Suc \<delta> *\<^sub>c f) t g"
by simp_all
have cexpr_eq: "cexpr_sem (case_nat x \<rho> \<circ> (\<lambda>x. x - Suc 0)) g =
cexpr_sem (case_nat x (case_nat undefined \<rho>)) g" for x \<rho>
using is_density_exprD[OF wf2]
by (intro cexpr_sem_eq_on_vars) (auto split: nat.split simp: shift_var_set_def)
have [simp]: "\<And>\<sigma>. case_nat (\<sigma> 0) (\<lambda>x. \<sigma> (Suc x)) = \<sigma>" by (intro ext) (simp split: nat.split)
hence "(shift_var_set (set vs), Suc ` set vs', case_nat t' \<Gamma>,
insert_dens (set vs) (set vs') (\<lambda>x xa. ennreal (eval_cexpr f x xa))
(\<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))))
\<turnstile>\<^sub>d e' \<Rightarrow> (\<lambda>a aa. ennreal (eval_cexpr g a aa))" using dens2
apply (simp only: dens_ctxt_\<alpha>_def prod.case set_shift_vars set_map)
apply (erule hd_dens_ctxt_cong)
apply (insert invar is_density_exprD[OF wf1])
unfolding insert_dens_def
apply (subst ennreal_mult'[symmetric])
apply (erule nonneg_cexprD)
apply (rule measurable_space[OF measurable_remove_var[where t=t']])
apply simp
apply (simp add: shift_var_set_def image_Un)
apply (subst cexpr_sem_Mult[of "case_nat t' \<Gamma>"])
apply (auto intro!: cexpr_typing_map_vars simp: o_def shift_var_set_def image_Un
cexpr_sem_map_vars insert_dens_def eval_cexpr_def remove_var_def)
done
hence "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d LET e IN e' \<Rightarrow>
(\<lambda>\<rho> x. ennreal (eval_cexpr g (case_nat undefined \<rho>) x))"
unfolding dens_ctxt_\<alpha>_def
by (simp only: prod.case, intro hd_let[where f = "\<lambda>x xa. ennreal (eval_cexpr f x xa)"])
(insert dens1 dens2, simp_all add: dens_ctxt_\<alpha>_def extract_real_def one_ennreal_def t1')
hence "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d LET e IN e' \<Rightarrow>
(\<lambda>\<rho> x. ennreal (eval_cexpr (map_vars (\<lambda>x. x - 1) g) \<rho> x))"
proof (simp only: dens_ctxt_\<alpha>_def prod.case, erule_tac hd_cong[OF _ _ edc_let.prems(1,3)])
fix \<rho> x assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
and x: "x \<in> space (stock_measure t)"
have "eval_cexpr (map_vars (\<lambda>x. x - 1) g) \<rho> x =
extract_real (cexpr_sem (case_nat x \<rho> \<circ> (\<lambda>x. x - Suc 0)) g)"
unfolding eval_cexpr_def by (simp add: cexpr_sem_map_vars)
also note cexpr_eq[of x \<rho>]
finally show "ennreal (eval_cexpr g (case_nat undefined \<rho>) x) =
ennreal (eval_cexpr (map_vars (\<lambda>x. x - 1) g) \<rho> x)"
by (simp add: eval_cexpr_def)
qed (simp_all add: density_context_\<alpha>[OF edc_let.prems(2)])
moreover have "is_density_expr (vs, vs', \<Gamma>, \<delta>) t (map_vars (\<lambda>x. x - 1) g)"
proof (intro is_density_exprI)
note wf = is_density_exprD[OF wf2]
show "case_nat t \<Gamma> \<turnstile>\<^sub>c map_vars (\<lambda>x. x - 1) g : REAL"
by (rule cexpr_typing_map_vars, rule cexpr_typing_cong'[OF wf(1)])
(insert wf(2), auto split: nat.split simp: shift_var_set_def)
from wf(2) show "free_vars (map_vars (\<lambda>x. x - 1) g)
\<subseteq> shift_var_set (set vs')"
by (auto simp: shift_var_set_def)
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) (map_vars (\<lambda>x. x - 1) g)"
apply (intro nonneg_cexprI_shift)
apply (simp add: cexpr_sem_map_vars cexpr_eq)
apply (rule nonneg_cexprD[OF wf2[THEN is_density_exprD_nonneg]])
apply (auto simp: space_state_measure PiE_iff extensional_def split: nat.splits)
done
qed
ultimately show ?case by (rule conjI)
next
case (edc_rand vs vs' \<Gamma> \<delta> e f dst t')
define t where "t = dist_param_type dst"
note invar = cdens_ctxt_invarD[OF edc_rand.prems(2)]
from edc_rand have t1: "\<Gamma> \<turnstile> e : t" and t2: "t' = dist_result_type dst" by (auto simp: t_def)
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) t f" using edc_rand t1 t2 by auto
from wf have tf: "case_nat t \<Gamma> \<turnstile>\<^sub>c f : REAL" and varsf: "free_vars f \<subseteq> shift_var_set (set vs')"
unfolding is_density_expr_def by simp_all
let ?M = "(\<lambda>\<rho>. dens_ctxt_measure (dens_ctxt_\<alpha> (vs,vs',\<Gamma>,\<delta>)) \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> e))"
have dens': "has_parametrized_subprob_density (state_measure (set vs') \<Gamma>) ?M (stock_measure t)
(\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))" using dens t1 edc_rand.prems
by (simp_all add: dens_ctxt_\<alpha>_def expr_has_density_sound_aux density_context_\<alpha>)
let ?shift = "case_nat 0 (\<lambda>x. Suc (Suc x))"
let ?e1 = "map_vars ?shift f"
let ?e2 = "dist_dens_cexpr dst (CVar 0) (CVar 1)"
let ?e = "(\<integral>\<^sub>c ?e1 *\<^sub>c ?e2 \<partial>t)"
have [simp]: "\<And>t t' \<Gamma>. case_nat t (case_nat t' \<Gamma>) \<circ> ?shift = case_nat t \<Gamma>"
by (intro ext) (simp split: nat.split add: o_def)
have te1: "case_nat t (case_nat t' \<Gamma>) \<turnstile>\<^sub>c ?e1 : REAL" using tf
by (auto intro!: cexpr_typing.intros cexpr_typing_dist_dens_cexpr cet_var'
cexpr_typing_map_vars simp: t_def t2)
have te2: "case_nat t (case_nat t' \<Gamma>) \<turnstile>\<^sub>c ?e2 : REAL"
by (intro cexpr_typing_dist_dens_cexpr cet_var') (simp_all add: t_def t2)
have te: "case_nat t' \<Gamma> \<turnstile>\<^sub>c ?e : REAL" using te1 te2
by (intro cet_int cet_op[where t = "PRODUCT REAL REAL"] cet_pair) (simp_all add: t2 t_def)
have vars_e1: "free_vars ?e1 \<subseteq> shift_var_set (shift_var_set (set vs'))"
using varsf by (auto simp: shift_var_set_def)
have "(case_nat 0 (\<lambda>x. Suc (Suc x)) -` shift_var_set (shift_var_set (set vs'))) =
shift_var_set (set vs')" by (auto simp: shift_var_set_def split: nat.split_asm)
have nonneg_e1: "nonneg_cexpr (shift_var_set (shift_var_set (set vs'))) (case_nat t (case_nat t' \<Gamma>)) ?e1"
by (auto intro!: nonneg_cexprI wf[THEN is_density_exprD_nonneg, THEN nonneg_cexprD] case_nat_in_state_measure
dest!: space_state_measureD_shift simp: cexpr_sem_map_vars)
have vars_e2: "free_vars ?e2 \<subseteq> shift_var_set (shift_var_set (set vs'))"
by (intro order.trans[OF free_vars_dist_dens_cexpr]) (auto simp: shift_var_set_def)
have nonneg_e2: "nonneg_cexpr (shift_var_set (shift_var_set (set vs')))
(case_nat t (case_nat t' \<Gamma>)) ?e2"
by (intro nonneg_dist_dens_cexpr cet_var') (auto simp: t2 t_def shift_var_set_def)
let ?f = "\<lambda>\<rho> x. \<integral>\<^sup>+y. ennreal (eval_cexpr f \<rho> y) * dist_dens dst y x\<partial>stock_measure t"
let ?M = "(\<lambda>\<rho>. dens_ctxt_measure (dens_ctxt_\<alpha> (vs,vs',\<Gamma>,\<delta>)) \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> (Random dst e)))"
have dens': "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Random dst e \<Rightarrow> ?f" using dens
by (simp only: dens_ctxt_\<alpha>_def prod.case t_def hd_rand[unfolded apply_dist_to_dens_def])
hence dens'': "has_parametrized_subprob_density (state_measure (set vs') \<Gamma>) ?M (stock_measure t') ?f"
using edc_rand.prems invar
by (simp only: dens_ctxt_\<alpha>_def prod.case, intro expr_has_density_sound_aux)
(auto intro!: density_context_\<alpha>)
{
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
fix x assume x: "x \<in> type_universe t'"
fix y assume y: "y \<in> type_universe t"
let ?\<rho>'' = "case_nat y (case_nat x \<rho>)" and ?\<Gamma>'' = "case_nat t (case_nat t' \<Gamma>)"
let ?V'' = "shift_var_set (shift_var_set (set vs'))"
have \<rho>'': "?\<rho>'' \<in> space (state_measure (shift_var_set (shift_var_set (set vs'))) ?\<Gamma>'')"
using \<rho> x y by (intro case_nat_in_state_measure) simp_all
have A: "extract_real (cexpr_sem ?\<rho>'' (?e1 *\<^sub>c ?e2)) =
extract_real (cexpr_sem ?\<rho>'' ?e1) * extract_real (cexpr_sem ?\<rho>'' ?e2)"
by (rule cexpr_sem_Mult[OF te1 te2 \<rho>'' vars_e1 vars_e2])
also have "... \<ge> 0" using nonneg_e1 nonneg_e2 \<rho>''
by (blast intro: mult_nonneg_nonneg dest: nonneg_cexprD)
finally have B: "extract_real (cexpr_sem ?\<rho>'' (?e1 *\<^sub>c ?e2)) \<ge> 0" .
note A
hence "eval_cexpr f \<rho> y * dist_dens dst y x = extract_real (cexpr_sem ?\<rho>'' (?e1 *\<^sub>c ?e2))"
using \<rho>''
apply (subst A)
apply (subst ennreal_mult'')
using nonneg_e2
apply (erule nonneg_cexprD)
apply (subst cexpr_sem_dist_dens_cexpr[of ?\<Gamma>'' _ _ _ ?V''])
apply (force simp: cexpr_sem_map_vars eval_cexpr_def t2 t_def intro!: cet_var')+
done
note this B
} note e1e2 = this
{
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
have "AE x in stock_measure t'.
apply_dist_to_dens dst (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x)) \<rho> x = eval_cexpr ?e \<rho> x"
proof (rule AE_mp[OF _ AE_I2[OF impI]])
from has_parametrized_subprob_density_integral[OF dens'' \<rho>]
have "(\<integral>\<^sup>+x. ?f \<rho> x \<partial>stock_measure t') \<noteq> \<infinity>" by auto
thus "AE x in stock_measure t'. ?f \<rho> x \<noteq> \<infinity>"
using has_parametrized_subprob_densityD(3)[OF dens''] \<rho>
by (intro nn_integral_PInf_AE ) simp_all
next
fix x assume x: "x \<in> space (stock_measure t')" and finite: "?f \<rho> x \<noteq> \<infinity>"
let ?\<rho>' = "case_nat x \<rho>"
have \<rho>': "?\<rho>' \<in> space (state_measure (shift_var_set (set vs')) (case_nat t' \<Gamma>))"
using \<rho> x by (intro case_nat_in_state_measure) simp_all
hence *: "(\<integral>\<^sup>+y. ennreal (eval_cexpr f \<rho> y) * dist_dens dst y x \<partial>stock_measure t) =
\<integral>\<^sup>+y. extract_real (cexpr_sem (case_nat y ?\<rho>') (?e1 *\<^sub>c ?e2)) \<partial>stock_measure t" (is "_ = ?I")
using \<rho> x by (intro nn_integral_cong) (simp add: e1e2)
also from * and finite have finite': "?I < \<infinity>" by (simp add: less_top)
have "?I = ennreal (eval_cexpr ?e \<rho> x)" using \<rho>' te te1 te2 vars_e1 vars_e2 nonneg_e1 nonneg_e2
unfolding eval_cexpr_def
by (subst cexpr_sem_integral_nonneg[OF finite'])
(auto simp: eval_cexpr_def t2 t_def intro!: nonneg_cexpr_Mult)
finally show "apply_dist_to_dens dst (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x)) \<rho> x =
ennreal (eval_cexpr ?e \<rho> x)"
unfolding apply_dist_to_dens_def by (simp add: t_def)
qed
} note AE_eq = this
have meas: "(\<lambda>(\<rho>, x). ennreal (eval_cexpr ?e \<rho> x))
\<in> borel_measurable (state_measure (set vs') \<Gamma> \<Otimes>\<^sub>M stock_measure t')"
apply (subst measurable_split_conv, rule measurable_compose[OF _ measurable_ennreal])
apply (subst measurable_split_conv[symmetric], rule measurable_eval_cexpr[OF te])
apply (insert vars_e1 vars_e2, auto simp: shift_var_set_def)
done
show ?case
proof (intro conjI is_density_exprI, simp only: dens_ctxt_\<alpha>_def prod.case,
rule hd_AE[OF hd_rand edc_rand.prems(1)])
from dens show "(set vs, set vs', \<Gamma>, \<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))) \<turnstile>\<^sub>d
e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))"
unfolding dens_ctxt_\<alpha>_def by simp
next
have "nonneg_cexpr (shift_var_set (set vs')) (case_nat t' \<Gamma>) (\<integral>\<^sub>c ?e1 *\<^sub>c ?e2 \<partial>t)"
by (intro nonneg_cexpr_int nonneg_cexpr_Mult nonneg_dist_dens_cexpr te1 te2 vars_e1 vars_e2 nonneg_e1)
(auto simp: t_def t2 intro!: cet_var')
then show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t' \<Gamma>)
(\<integral>\<^sub>c map_vars (case_nat 0 (\<lambda>x. x + 2)) f *\<^sub>c ?e2 \<partial>dist_param_type dst)"
by (simp add: t_def)
qed (insert AE_eq meas te vars_e1 vars_e2, auto simp: t_def t2 shift_var_set_def)
next
case (edc_rand_det e vs' vs \<Gamma> \<delta> dst t')
define t where "t = dist_param_type dst"
note invar = cdens_ctxt_invarD[OF edc_rand_det.prems(2)]
from edc_rand_det have t1: "\<Gamma> \<turnstile> e : t" and t2: "t' = dist_result_type dst" by (auto simp: t_def)
let ?e1 = "map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>))" and
?e2 = "dist_dens_cexpr dst (map_vars Suc (expr_rf_to_cexpr e)) (CVar 0)"
have ctype1: "case_nat t' \<Gamma> \<turnstile>\<^sub>c ?e1 : REAL"
using invar by (auto intro!: cexpr_typing_map_vars simp: o_def)
have vars2': "free_vars (map_vars Suc (expr_rf_to_cexpr e)) \<subseteq> shift_var_set (set vs')"
unfolding shift_var_set_def using free_vars_expr_rf_to_cexpr edc_rand_det.hyps by auto
have vars2: "free_vars ?e2 \<subseteq> shift_var_set (free_vars e)"
unfolding shift_var_set_def using free_vars_expr_rf_to_cexpr edc_rand_det.hyps
by (intro order.trans[OF free_vars_dist_dens_cexpr]) auto
have ctype2: "case_nat t' \<Gamma> \<turnstile>\<^sub>c ?e2 : REAL" using t1 edc_rand_det.hyps
by (intro cexpr_typing_dist_dens_cexpr cexpr_typing_map_vars)
(auto simp: o_def t_def t2 intro!: cet_var')
have nonneg_e2: "nonneg_cexpr (shift_var_set (set vs')) (case_nat t' \<Gamma>) ?e2"
using t1 \<open>randomfree e\<close> free_vars_expr_rf_to_cexpr[of e] edc_rand_det.hyps
apply (intro nonneg_dist_dens_cexpr cexpr_typing_map_vars)
apply (auto simp add: o_def t_def t2 intro!: cet_var')
done
have nonneg_e1: "nonneg_cexpr (shift_var_set (set vs')) (case_nat t' \<Gamma>) ?e1"
using invar
by (auto simp add: branch_prob_cexpr_def nonneg_cexpr_shift_iff intro!: nonneg_cexpr_sem_integrate_vars')
{
fix \<rho> x
assume x: "x \<in> type_universe t'" and \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
hence \<rho>': "case_nat x \<rho> \<in> space (state_measure (shift_var_set (set vs')) (case_nat t' \<Gamma>))"
by (rule case_nat_in_state_measure)
hence "eval_cexpr (?e1 *\<^sub>c ?e2) \<rho> x =
ennreal (extract_real (cexpr_sem (case_nat x \<rho>)
(map_vars Suc (branch_prob_cexpr (vs, vs', \<Gamma>, \<delta>))))) *
ennreal (extract_real (cexpr_sem (case_nat x \<rho>) ?e2))" (is "_ = ?a * ?b")
using invar
apply (subst ennreal_mult''[symmetric])
apply (rule nonneg_cexprD[OF nonneg_e2])
apply simp
unfolding eval_cexpr_def
apply (subst cexpr_sem_Mult[of "case_nat t' \<Gamma>" _ _ _ "shift_var_set (set vs')"])
apply (insert invar ctype1 vars2 ctype2 edc_rand_det.hyps(2))
apply (auto simp: shift_var_set_def)
done
also have "?a = branch_prob (dens_ctxt_\<alpha> (vs,vs',\<Gamma>,\<delta>)) \<rho>" (is "_ = ?c")
by (subst cexpr_sem_map_vars, subst cexpr_sem_branch_prob) (simp_all add: o_def \<rho> edc_rand_det.prems)
also have "?b = dist_dens dst (expr_sem_rf \<rho> e) x" (is "_ = ?d") using t1 edc_rand_det.hyps
by (subst cexpr_sem_dist_dens_cexpr[of "case_nat t' \<Gamma>"], insert \<rho>' vars2')
(auto intro!: cexpr_typing_map_vars cet_var'
simp: o_def t_def t2 cexpr_sem_map_vars cexpr_sem_expr_rf_to_cexpr)
finally have A: "ennreal (eval_cexpr (?e1 *\<^sub>c ?e2) \<rho> x) = ?c * ?d" .
} note A = this
have meas: "(\<lambda>(\<rho>, x). ennreal (eval_cexpr (?e1 *\<^sub>c ?e2) \<rho> x))
\<in> borel_measurable (state_measure (set vs') \<Gamma> \<Otimes>\<^sub>M stock_measure t')"
using ctype1 ctype2 vars2 invar edc_rand_det.hyps
by (subst measurable_split_conv, intro measurable_compose[OF _ measurable_ennreal],
subst measurable_split_conv[symmetric], intro measurable_eval_cexpr)
(auto intro!: cexpr_typing.intros simp: shift_var_set_def)
from ctype1 ctype2 vars2 invar edc_rand_det.hyps
have wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) t' (?e1 *\<^sub>c ?e2)"
proof (intro is_density_exprI)
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t' \<Gamma>) (?e1 *\<^sub>c ?e2)"
using invar(2)
order_trans[OF free_vars_expr_rf_to_cexpr[OF \<open>randomfree e\<close>] \<open>free_vars e \<subseteq> set vs'\<close>]
by (intro nonneg_cexpr_Mult ctype1 ctype2 nonneg_e2 nonneg_e1
free_vars_dist_dens_cexpr[THEN order_trans])
(auto simp: intro: order_trans)
qed (auto intro!: cexpr_typing.intros simp: shift_var_set_def)
show ?case using edc_rand_det.prems edc_rand_det.hyps meas wf A
apply (intro conjI, simp add: dens_ctxt_\<alpha>_def)
apply (intro hd_AE[OF hd_rand_det[OF edc_rand_det.hyps] edc_rand_det.prems(1) AE_I2])
apply (simp_all add: dens_ctxt_\<alpha>_def)
done
next
case (edc_if_det b vs vs' \<Gamma> \<delta> e1 f1 e2 f2 t)
hence tb: "\<Gamma> \<turnstile> b : BOOL" and t1: "\<Gamma> \<turnstile> e1 : t" and t2: "\<Gamma> \<turnstile> e2 : t" by auto
from edc_if_det have b: "randomfree b" "free_vars b \<subseteq> set vs \<union> set vs'" by simp_all
note invar = cdens_ctxt_invarD[OF edc_if_det.prems(2)]
let ?ind1 = "\<langle>expr_rf_to_cexpr b\<rangle>\<^sub>c" and ?ind2 = "\<langle>\<not>\<^sub>c expr_rf_to_cexpr b\<rangle>\<^sub>c"
have tind1: "\<Gamma> \<turnstile>\<^sub>c ?ind1 : REAL" and tind2: "\<Gamma> \<turnstile>\<^sub>c ?ind2 : REAL"
using edc_if_det.hyps tb by (auto intro!: cexpr_typing.intros)
have t\<delta>1: "\<Gamma> \<turnstile>\<^sub>c \<delta> *\<^sub>c ?ind1 : REAL" and t\<delta>2: "\<Gamma> \<turnstile>\<^sub>c \<delta> *\<^sub>c ?ind2 : REAL"
using invar(3) edc_if_det.hyps tb by (auto intro!: cexpr_typing.intros)
have nonneg_ind1: "nonneg_cexpr (set vs \<union> set vs') \<Gamma> ?ind1" and
nonneg_ind2: "nonneg_cexpr (set vs \<union> set vs') \<Gamma> ?ind2"
using tind1 tind2 edc_if_det.hyps tb
by (auto intro!: nonneg_cexprI simp: cexpr_sem_expr_rf_to_cexpr bool_to_real_def extract_real_def
dest: val_type_expr_sem_rf[OF tb b] elim!: BOOL_E split: if_split)
have subprob1: "subprob_cexpr (set vs) (set vs') \<Gamma> (\<delta> *\<^sub>c ?ind1)" and
subprob2: "subprob_cexpr (set vs) (set vs') \<Gamma> (\<delta> *\<^sub>c ?ind2)"
using invar tb edc_if_det.hyps edc_if_det.prems free_vars_expr_rf_to_cexpr[OF edc_if_det.hyps(1)]
by (auto intro!: subprob_indicator cet_op)
have vars1: "free_vars (\<delta> *\<^sub>c ?ind1) \<subseteq> set vs \<union> set vs'" and
vars2: "free_vars (\<delta> *\<^sub>c ?ind2) \<subseteq> set vs \<union> set vs'"
using invar edc_if_det.hyps edc_if_det.prems free_vars_expr_rf_to_cexpr by auto
have inv1: "cdens_ctxt_invar vs vs' \<Gamma> (\<delta> *\<^sub>c ?ind1)"
using invar edc_if_det.hyps edc_if_det.prems tind1 t\<delta>1 subprob1 nonneg_ind1 vars1
by (intro cdens_ctxt_invarI nonneg_cexpr_Mult) auto
have inv2: "cdens_ctxt_invar vs vs' \<Gamma> (\<delta> *\<^sub>c ?ind2)"
using invar edc_if_det.hyps edc_if_det.prems tind2 t\<delta>2 subprob2 nonneg_ind2 vars2
by (intro cdens_ctxt_invarI nonneg_cexpr_Mult) auto
have dens1: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?ind1) \<turnstile>\<^sub>d e1 \<Rightarrow> (\<lambda>\<rho> x. eval_cexpr f1 \<rho> x)" and
wf1: "is_density_expr (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?ind1) t f1"
using edc_if_det.IH(1)[OF t1 inv1] edc_if_det.prems by auto
have dens2: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?ind2) \<turnstile>\<^sub>d e2 \<Rightarrow> (\<lambda>\<rho> x. eval_cexpr f2 \<rho> x)" and
wf2: "is_density_expr (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?ind2) t f2"
using edc_if_det.IH(2)[OF t2 inv2] edc_if_det.prems by auto
show ?case
proof (rule conjI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_cong[OF hd_if_det])
let ?\<Y> = "(set vs, set vs', \<Gamma>, if_dens_det (\<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))) b True)"
show "?\<Y> \<turnstile>\<^sub>d e1 \<Rightarrow> (\<lambda>\<rho> x. eval_cexpr f1 \<rho> x)"
proof (rule hd_dens_ctxt_cong)
let ?\<delta> = "\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?ind1)))"
show "(set vs, set vs', \<Gamma>, ?\<delta>) \<turnstile>\<^sub>d e1 \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f1 \<rho> x))"
using dens1 by (simp add: dens_ctxt_\<alpha>_def)
fix \<sigma> assume \<sigma>: "\<sigma> \<in> space (state_measure (set vs \<union> set vs') \<Gamma>)"
have "extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?ind1)) =
extract_real (cexpr_sem \<sigma> \<delta>) * extract_real (cexpr_sem \<sigma> ?ind1)" using invar vars1
by (subst cexpr_sem_Mult[OF invar(3) tind1 \<sigma>]) simp_all
also have "extract_real (cexpr_sem \<sigma> ?ind1) = (if expr_sem_rf \<sigma> b = TRUE then 1 else 0)"
using edc_if_det.hyps val_type_expr_sem_rf[OF tb b \<sigma>]
by (auto simp: cexpr_sem_expr_rf_to_cexpr extract_real_def bool_to_real_def elim!: BOOL_E)
finally show "?\<delta> \<sigma> = if_dens_det (\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> \<delta>))) b True \<sigma>"
by (simp add: if_dens_det_def)
qed
next
let ?\<Y> = "(set vs, set vs', \<Gamma>, if_dens_det (\<lambda>x. ennreal (extract_real (cexpr_sem x \<delta>))) b False)"
show "?\<Y> \<turnstile>\<^sub>d e2 \<Rightarrow> (\<lambda>\<rho> x. eval_cexpr f2 \<rho> x)"
proof (rule hd_dens_ctxt_cong)
let ?\<delta> = "\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?ind2)))"
show "(set vs, set vs', \<Gamma>, ?\<delta>) \<turnstile>\<^sub>d e2 \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f2 \<rho> x))"
using dens2 by (simp add: dens_ctxt_\<alpha>_def)
fix \<sigma> assume \<sigma>: "\<sigma> \<in> space (state_measure (set vs \<union> set vs') \<Gamma>)"
have "extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?ind2)) =
extract_real (cexpr_sem \<sigma> \<delta>) * extract_real (cexpr_sem \<sigma> ?ind2)" using invar vars1
by (subst cexpr_sem_Mult[OF invar(3) tind2 \<sigma>]) simp_all
also have "extract_real (cexpr_sem \<sigma> ?ind2) = (if expr_sem_rf \<sigma> b = FALSE then 1 else 0)"
using edc_if_det.hyps val_type_expr_sem_rf[OF tb b \<sigma>]
by (auto simp: cexpr_sem_expr_rf_to_cexpr extract_real_def bool_to_real_def elim!: BOOL_E)
finally show "?\<delta> \<sigma> = if_dens_det (\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> \<delta>))) b False \<sigma>"
by (simp add: if_dens_det_def)
qed
next
fix \<rho> x assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)" and x : "x \<in> space (stock_measure t)"
hence "eval_cexpr (f1 +\<^sub>c f2) \<rho> x = eval_cexpr f1 \<rho> x + eval_cexpr f2 \<rho> x"
using wf1 wf2 unfolding eval_cexpr_def is_density_expr_def
by (subst cexpr_sem_Add[where \<Gamma> = "case_nat t \<Gamma>" and V = "shift_var_set (set vs')"]) auto
moreover have "0 \<le> eval_cexpr f1 \<rho> x" "0 \<le> eval_cexpr f2 \<rho> x"
unfolding eval_cexpr_def
using \<rho> x wf1[THEN is_density_exprD_nonneg, THEN nonneg_cexprD] wf2[THEN is_density_exprD_nonneg, THEN nonneg_cexprD]
unfolding space_state_measure_shift_iff by auto
ultimately show "ennreal (eval_cexpr f1 \<rho> x) + ennreal (eval_cexpr f2 \<rho> x) = ennreal (eval_cexpr (f1 +\<^sub>c f2) \<rho> x)"
by simp
next
show "is_density_expr (vs, vs', \<Gamma>, \<delta>) t (f1 +\<^sub>c f2)" using wf1 wf2
using wf1[THEN is_density_exprD_nonneg] wf2[THEN is_density_exprD_nonneg]
by (auto simp: is_density_expr_def intro!: cet_op[where t = "PRODUCT REAL REAL"] cet_pair nonneg_cexpr_Add)
qed (insert edc_if_det.prems edc_if_det.hyps, auto intro!: density_context_\<alpha>)
next
case (edc_if vs vs' \<Gamma> b f \<delta> e1 g1 e2 g2 t)
hence tb: "\<Gamma> \<turnstile> b : BOOL" and t1: "\<Gamma> \<turnstile> e1 : t" and t2: "\<Gamma> \<turnstile> e2 : t" by auto
note invar = cdens_ctxt_invarD[OF edc_if.prems(2)]
have densb: "dens_ctxt_\<alpha> ([], vs @ vs', \<Gamma>, CReal 1) \<turnstile>\<^sub>d b \<Rightarrow> (\<lambda>\<rho> b. ennreal (eval_cexpr f \<rho> b))" and
wfb: "is_density_expr ([], vs @ vs', \<Gamma>, CReal 1) BOOL f"
using edc_if.IH(1)[OF tb] edc_if.prems by (simp_all add: cdens_ctxt_invar_empty)
have inv1: "cdens_ctxt_invar vs vs' \<Gamma> (\<delta> *\<^sub>c cexpr_subst_val f TRUE)" and
inv2: "cdens_ctxt_invar vs vs' \<Gamma> (\<delta> *\<^sub>c cexpr_subst_val f FALSE)"
using tb densb wfb edc_if.prems by (auto intro!: cdens_ctxt_invar_insert_bool)
let ?\<delta>1 = "cexpr_subst_val f TRUE" and ?\<delta>2 = "cexpr_subst_val f FALSE"
have t\<delta>1: "\<Gamma> \<turnstile>\<^sub>c \<delta> *\<^sub>c ?\<delta>1 : REAL" and t\<delta>2: "\<Gamma> \<turnstile>\<^sub>c \<delta> *\<^sub>c ?\<delta>2 : REAL"
using is_density_exprD[OF wfb] invar
by (auto intro!: cet_op[where t = "PRODUCT REAL REAL"] cet_pair)
have vars1: "free_vars (\<delta> *\<^sub>c ?\<delta>1) \<subseteq> set vs \<union> set vs'" and
vars2: "free_vars (\<delta> *\<^sub>c ?\<delta>2) \<subseteq> set vs \<union> set vs'"
using invar is_density_exprD[OF wfb] by (auto simp: shift_var_set_def)
have dens1: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?\<delta>1) \<turnstile>\<^sub>d e1 \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr g1 x xa))" and
wf1: "is_density_expr (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?\<delta>1) t g1" and
dens2: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?\<delta>2) \<turnstile>\<^sub>d e2 \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr g2 x xa))" and
wf2: "is_density_expr (vs, vs', \<Gamma>, \<delta> *\<^sub>c ?\<delta>2) t g2"
using edc_if.IH(2)[OF t1 inv1] edc_if.IH(3)[OF t2 inv2] edc_if.prems by simp_all
have f_nonneg[simp]: "\<sigma> \<in> space (state_measure (set vs \<union> set vs') \<Gamma>) \<Longrightarrow>
0 \<le> extract_real (cexpr_sem (case_nat (BoolVal b) \<sigma>) f)" for b \<sigma>
using wfb[THEN is_density_exprD_nonneg] by (rule nonneg_cexprD) auto
let ?\<delta>' = "\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> \<delta>))" and ?f = "\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> x)"
show ?case
proof (rule conjI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_cong[OF hd_if])
let ?\<Y> = "(set vs, set vs', \<Gamma>, if_dens ?\<delta>' ?f True)"
show "?\<Y> \<turnstile>\<^sub>d e1 \<Rightarrow> (\<lambda>\<rho> x. eval_cexpr g1 \<rho> x)"
proof (rule hd_dens_ctxt_cong)
let ?\<delta> = "\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?\<delta>1)))"
show "(set vs, set vs', \<Gamma>, ?\<delta>) \<turnstile>\<^sub>d e1 \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr g1 \<rho> x))"
using dens1 by (simp add: dens_ctxt_\<alpha>_def)
fix \<sigma> assume \<sigma>: "\<sigma> \<in> space (state_measure (set vs \<union> set vs') \<Gamma>)"
have "extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?\<delta>1)) =
extract_real (cexpr_sem \<sigma> \<delta>) * extract_real (cexpr_sem \<sigma> ?\<delta>1)"
using invar vars1 is_density_exprD[OF wfb] by (subst cexpr_sem_Mult[OF invar(3) _ \<sigma>]) auto
also have "... = if_dens ?\<delta>' ?f True \<sigma>" unfolding if_dens_def by (simp add: eval_cexpr_def ennreal_mult'' \<sigma>)
finally show "?\<delta> \<sigma> = if_dens ?\<delta>' ?f True \<sigma>" by (simp add: if_dens_det_def)
qed
next
let ?\<Y> = "(set vs, set vs', \<Gamma>, if_dens ?\<delta>' ?f False)"
show "?\<Y> \<turnstile>\<^sub>d e2 \<Rightarrow> (\<lambda>\<rho> x. eval_cexpr g2 \<rho> x)"
proof (rule hd_dens_ctxt_cong)
let ?\<delta> = "\<lambda>\<sigma>. ennreal (extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?\<delta>2)))"
show "(set vs, set vs', \<Gamma>, ?\<delta>) \<turnstile>\<^sub>d e2 \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr g2 \<rho> x))"
using dens2 by (simp add: dens_ctxt_\<alpha>_def)
fix \<sigma> assume \<sigma>: "\<sigma> \<in> space (state_measure (set vs \<union> set vs') \<Gamma>)"
have "extract_real (cexpr_sem \<sigma> (\<delta> *\<^sub>c ?\<delta>2)) =
extract_real (cexpr_sem \<sigma> \<delta>) * extract_real (cexpr_sem \<sigma> ?\<delta>2)"
using invar vars1 is_density_exprD[OF wfb] by (subst cexpr_sem_Mult[OF invar(3) _ \<sigma>]) auto
also have "... = if_dens ?\<delta>' ?f False \<sigma>" unfolding if_dens_def by (simp add: eval_cexpr_def ennreal_mult'' \<sigma>)
finally show "?\<delta> \<sigma> = if_dens ?\<delta>' ?f False \<sigma>" by (simp add: if_dens_det_def)
qed
next
fix \<rho> x assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)" and x : "x \<in> space (stock_measure t)"
hence "eval_cexpr (g1 +\<^sub>c g2) \<rho> x = eval_cexpr g1 \<rho> x + eval_cexpr g2 \<rho> x"
using wf1 wf2 unfolding eval_cexpr_def is_density_expr_def
by (subst cexpr_sem_Add[where \<Gamma> = "case_nat t \<Gamma>" and V = "shift_var_set (set vs')"]) auto
moreover have "0 \<le> eval_cexpr g1 \<rho> x" "0 \<le> eval_cexpr g2 \<rho> x"
unfolding eval_cexpr_def
using \<rho> x wf1[THEN is_density_exprD_nonneg, THEN nonneg_cexprD] wf2[THEN is_density_exprD_nonneg, THEN nonneg_cexprD]
unfolding space_state_measure_shift_iff by auto
ultimately show "ennreal (eval_cexpr g1 \<rho> x) + ennreal (eval_cexpr g2 \<rho> x) = ennreal (eval_cexpr (g1 +\<^sub>c g2) \<rho> x)"
by simp
next
show "is_density_expr (vs, vs', \<Gamma>, \<delta>) t (g1 +\<^sub>c g2)" using wf1 wf2
by (auto simp: is_density_expr_def intro!: cet_op[where t = "PRODUCT REAL REAL"] cet_pair nonneg_cexpr_Add)
next
show "({}, set vs \<union> set vs', \<Gamma>, \<lambda>_. 1) \<turnstile>\<^sub>d b \<Rightarrow> (\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> x))"
using densb unfolding dens_ctxt_\<alpha>_def by (simp add: extract_real_def one_ennreal_def)
qed (insert edc_if.prems edc_if.hyps, auto intro!: density_context_\<alpha>)
next
case (edc_op_discr vs vs' \<Gamma> \<delta> e f t oper t' t'')
let ?expr' = "\<langle>(oper $$\<^sub>c (CVar 0)) =\<^sub>c CVar 1\<rangle>\<^sub>c *\<^sub>c map_vars (case_nat 0 (\<lambda>x. x+2)) f"
let ?expr = "\<integral>\<^sub>c ?expr' \<partial>t" and ?shift = "case_nat 0 (\<lambda>x. x + 2)"
from edc_op_discr.prems(1) edc_op_discr.hyps
have t: "\<Gamma> \<turnstile> e : t" by (elim expr_typing_opE, fastforce split: pdf_type.split_asm)
with edc_op_discr.prems(1) and edc_op_discr.hyps have [simp]: "t'' = t'"
by (intro expr_typing_unique) (auto intro: et_op)
from t and edc_op_discr.prems(1)
have the_t1: "the (expr_type \<Gamma> e) = t" and the_t2: "the (expr_type \<Gamma> (oper $$ e)) = t'"
by (simp_all add: expr_type_Some_iff[symmetric])
from edc_op_discr.prems edc_op_discr.IH[OF t]
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) t f" by simp_all
note wf' = is_density_exprD[OF wf]
have ctype''': "case_nat t (case_nat t' \<Gamma>) \<turnstile>\<^sub>c (oper $$\<^sub>c (CVar 0)) =\<^sub>c CVar 1 : BOOL" and
ctype'': "case_nat t (case_nat t' \<Gamma>) \<turnstile>\<^sub>c \<langle>(oper $$\<^sub>c (CVar 0)) =\<^sub>c CVar 1\<rangle>\<^sub>c : REAL" and
ctype': "case_nat t (case_nat t' \<Gamma>) \<turnstile>\<^sub>c ?expr' : REAL" using wf' edc_op_discr.hyps
by ((intro cet_op_intros cexpr_typing_map_vars cet_var' cet_pair cet_eq,
auto intro!: cet_op cet_var') [])+
from ctype' have ctype: "case_nat t' \<Gamma> \<turnstile>\<^sub>c ?expr : REAL" by (rule cet_int)
have vars': "free_vars ?expr' \<subseteq> shift_var_set (shift_var_set (set vs'))" using wf'
by (auto split: nat.split simp: shift_var_set_def)
hence vars: "free_vars ?expr \<subseteq> shift_var_set (set vs')" by (auto split: nat.split_asm)
let ?\<Y> = "(set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>)))"
let ?M = "\<lambda>\<rho>. dens_ctxt_measure ?\<Y> \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> e)"
have "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) f"
using wf[THEN is_density_exprD_nonneg] .
hence nonneg: "nonneg_cexpr (shift_var_set (shift_var_set (set vs')))
(case_nat t (case_nat t' \<Gamma>)) ?expr'"
using wf' vars' ctype''' by (intro nonneg_cexpr_Mult[OF ctype''] cexpr_typing_map_vars
nonneg_cexpr_map_vars nonneg_indicator)
(auto dest: nonneg_cexprD simp: extract_real_def bool_to_real_def)
let ?M = "\<lambda>\<rho>. dens_ctxt_measure ?\<Y> \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> (oper $$ e))"
let ?f = "\<lambda>\<rho> x y. (if op_sem oper y = x then 1 else 0) * ennreal (eval_cexpr f \<rho> y)"
have "?\<Y> \<turnstile>\<^sub>d oper $$ e \<Rightarrow> (\<lambda>\<rho> x. \<integral>\<^sup>+y. ?f \<rho> x y \<partial>stock_measure t)" using dens t edc_op_discr.hyps
by (subst the_t1[symmetric], intro hd_op_discr)
(simp_all add: dens_ctxt_\<alpha>_def the_t1 expr_type_Some_iff[symmetric])
hence dens: "?\<Y> \<turnstile>\<^sub>d oper $$ e \<Rightarrow> (\<lambda>\<rho> x. \<integral>\<^sup>+y. eval_cexpr ?expr' (case_nat x \<rho>) y \<partial>stock_measure t)"
proof (rule hd_cong[OF _ _ _ _ nn_integral_cong])
fix \<rho> x y let ?P = "\<lambda>x M. x \<in> space M"
assume A: "?P \<rho> (state_measure (set vs') \<Gamma>)" "?P x (stock_measure t')" "?P y (stock_measure t)"
hence "val_type (cexpr_sem (case_nat y \<rho>) f) = REAL" using wf' by (intro val_type_cexpr_sem) auto
thus "?f \<rho> x y = ennreal (eval_cexpr ?expr' (case_nat x \<rho>) y)"
by (auto simp: eval_cexpr_def extract_real_def lift_RealIntVal2_def
bool_to_real_def cexpr_sem_map_vars elim!: REAL_E)
qed (insert edc_op_discr.prems, auto intro!: density_context_\<alpha>)
hence dens': "has_parametrized_subprob_density (state_measure (set vs') \<Gamma>) ?M (stock_measure t')
(\<lambda>\<rho> x. \<integral>\<^sup>+y. eval_cexpr ?expr' (case_nat x \<rho>) y \<partial>stock_measure t)"
using edc_op_discr.prems by (intro expr_has_density_sound_aux density_context_\<alpha>) simp_all
show ?case
proof (intro conjI is_density_exprI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_AE[OF dens])
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
let ?dens = "\<lambda>x. \<integral>\<^sup>+y. eval_cexpr ?expr' (case_nat x \<rho>) y \<partial>stock_measure t"
show "AE x in stock_measure t'. ?dens x = ennreal (eval_cexpr ?expr \<rho> x)"
proof (rule AE_mp[OF _ AE_I2[OF impI]])
from has_parametrized_subprob_density_integral[OF dens' \<rho>] and
has_parametrized_subprob_densityD(3)[OF dens'] and \<rho>
show "AE x in stock_measure t'. ?dens x \<noteq> \<infinity>" by (intro nn_integral_PInf_AE) auto
next
fix x assume x: "x \<in> space (stock_measure t')" and fin: "?dens x \<noteq> \<infinity>"
thus "?dens x = ennreal (eval_cexpr ?expr \<rho> x)"
using \<rho> vars' ctype' ctype' nonneg unfolding eval_cexpr_def
by (subst cexpr_sem_integral_nonneg) (auto intro!: nonneg_cexpr_map_vars simp: less_top)
qed
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t'' \<Gamma>) (\<integral>\<^sub>c ?expr' \<partial>t)"
using nonneg by (intro nonneg_cexpr_int) simp
qed (insert vars ctype edc_op_discr.prems, auto)
next
case (edc_fst vs vs' \<Gamma> \<delta> e f t'' t' t)
hence [simp]: "t'' = t" by (auto intro!: expr_typing_unique et_op)
from edc_fst.hyps have t': "the (expr_type \<Gamma> (Snd $$ e)) = t'"
by (simp add: expr_type_Some_iff[symmetric])
let ?shift = "case_nat 0 (\<lambda>x. x + 2)"
have [simp]: "\<And>t t'. case_nat t (case_nat t' \<Gamma>) \<circ> case_nat 0 (\<lambda>x. Suc (Suc x)) = case_nat t \<Gamma>"
by (intro ext) (simp split: nat.split add: o_def)
note invar = cdens_ctxt_invarD[OF edc_fst.prems(2)]
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) (PRODUCT t t') f" using edc_fst by auto
let ?M = "\<lambda>\<rho>. dens_ctxt_measure (set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>))) \<rho>
\<bind> (\<lambda>\<sigma>. expr_sem \<sigma> e)"
have nonneg: "nonneg_cexpr (shift_var_set (set vs')) (case_nat (PRODUCT t t') \<Gamma>) f"
using wf by (rule is_density_exprD_nonneg)
note wf' = is_density_exprD[OF wf]
let ?expr = "map_vars ?shift f \<circ>\<^sub>c <CVar 1, CVar 0>\<^sub>c"
have ctype: "case_nat t' (case_nat t \<Gamma>) \<turnstile>\<^sub>c ?expr : REAL"
using wf' by (auto intro!: cexpr_typing.intros cexpr_typing_map_vars)
have vars: "free_vars ?expr \<subseteq> shift_var_set (shift_var_set (set vs'))" using free_vars_cexpr_comp wf'
by (intro subset_shift_var_set) (force simp: shift_var_set_def)
let ?M = "\<lambda>\<rho>. dens_ctxt_measure (set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>))) \<rho>
\<bind> (\<lambda>\<sigma>. expr_sem \<sigma> (Fst $$ e))"
have A: "\<And>x y \<rho>. ((case_nat x (case_nat y \<rho>))(0 := <|y, x|>)) \<circ> ?shift = case_nat <|y, x|> \<rho>"
by (intro ext) (simp split: nat.split add: o_def)
have dens': "(set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>))) \<turnstile>\<^sub>d Fst $$ e \<Rightarrow>
(\<lambda>\<rho> x. (\<integral>\<^sup>+y. eval_cexpr f \<rho> (<|x, y|>) \<partial>stock_measure t'))" (is "?\<Y> \<turnstile>\<^sub>d _ \<Rightarrow> ?f")
using dens by (subst t'[symmetric], intro hd_fst) (simp add: dens_ctxt_\<alpha>_def)
hence dens': "?\<Y> \<turnstile>\<^sub>d Fst $$ e \<Rightarrow> (\<lambda>\<rho> x. (\<integral>\<^sup>+y. eval_cexpr ?expr (case_nat x \<rho>) y \<partial>stock_measure t'))"
(is "_ \<turnstile>\<^sub>d _ \<Rightarrow> ?f") by (rule hd_cong, intro density_context_\<alpha>, insert edc_fst.prems A)
(auto intro!: nn_integral_cong simp: eval_cexpr_def cexpr_sem_cexpr_comp cexpr_sem_map_vars)
hence dens'': "has_parametrized_subprob_density (state_measure (set vs') \<Gamma>) ?M (stock_measure t) ?f"
using edc_fst.prems by (intro expr_has_density_sound_aux density_context_\<alpha>) simp_all
have "\<And>V. ?shift -` shift_var_set (shift_var_set V) = shift_var_set V"
by (auto simp: shift_var_set_def split: nat.split_asm)
hence nonneg': "nonneg_cexpr (shift_var_set (shift_var_set (set vs'))) (case_nat t' (case_nat t \<Gamma>)) ?expr"
by (auto intro!: nonneg_cexpr_comp nonneg_cexpr_map_vars nonneg cexpr_typing.intros cet_var')
show ?case
proof (intro conjI is_density_exprI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_AE[OF dens'])
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
thus "AE x in stock_measure t. ?f \<rho> x = ennreal (eval_cexpr (\<integral>\<^sub>c ?expr \<partial>t') \<rho> x)"
using ctype vars edc_fst.hyps nonneg'
by (intro has_parametrized_subprob_density_cexpr_sem_integral[OF dens'']) auto
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>)
(\<integral>\<^sub>c (map_vars (case_nat 0 (\<lambda>x. x + 2)) f \<circ>\<^sub>c <CVar 1, CVar 0>\<^sub>c) \<partial>t')"
using nonneg' by (intro nonneg_cexpr_int)
qed (insert edc_fst.prems ctype vars, auto simp: measurable_split_conv
intro!: cet_int measurable_compose[OF _ measurable_ennreal]
measurable_Pair_compose_split[OF measurable_eval_cexpr])
next
case (edc_snd vs vs' \<Gamma> \<delta> e f t t' t'')
hence [simp]: "t'' = t'" by (auto intro!: expr_typing_unique et_op)
from edc_snd.hyps have t': "the (expr_type \<Gamma> (Fst $$ e)) = t"
by (simp add: expr_type_Some_iff[symmetric])
let ?shift = "case_nat 0 (\<lambda>x. x + 2)"
have [simp]: "\<And>t t'. case_nat t (case_nat t' \<Gamma>) \<circ> case_nat 0 (\<lambda>x. Suc (Suc x)) = case_nat t \<Gamma>"
by (intro ext) (simp split: nat.split add: o_def)
note invar = cdens_ctxt_invarD[OF edc_snd.prems(2)]
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) (PRODUCT t t') f" using edc_snd by auto
let ?M = "\<lambda>\<rho>. dens_ctxt_measure (set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>))) \<rho>
\<bind> (\<lambda>\<sigma>. expr_sem \<sigma> e)"
have nonneg: "nonneg_cexpr (shift_var_set (set vs')) (case_nat (PRODUCT t t') \<Gamma>) f"
using wf by (rule is_density_exprD_nonneg)
note wf' = is_density_exprD[OF wf]
let ?expr = "map_vars ?shift f \<circ>\<^sub>c <CVar 0, CVar 1>\<^sub>c"
have ctype: "case_nat t (case_nat t' \<Gamma>) \<turnstile>\<^sub>c ?expr : REAL"
using wf' by (auto intro!: cexpr_typing.intros cexpr_typing_map_vars)
have vars: "free_vars ?expr \<subseteq> shift_var_set (shift_var_set (set vs'))" using free_vars_cexpr_comp wf'
by (intro subset_shift_var_set) (force simp: shift_var_set_def)
let ?M = "\<lambda>\<rho>. dens_ctxt_measure (set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>))) \<rho>
\<bind> (\<lambda>\<sigma>. expr_sem \<sigma> (Snd $$ e))"
have A: "\<And>x y \<rho>. ((case_nat y (case_nat x \<rho>))(0 := <|y, x|>)) \<circ> ?shift = case_nat <|y, x|> \<rho>"
by (intro ext) (simp split: nat.split add: o_def)
have dens': "(set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>))) \<turnstile>\<^sub>d Snd $$ e \<Rightarrow>
(\<lambda>\<rho> y. (\<integral>\<^sup>+x. eval_cexpr f \<rho> (<|x, y|>) \<partial>stock_measure t))" (is "?\<Y> \<turnstile>\<^sub>d _ \<Rightarrow> ?f")
using dens by (subst t'[symmetric], intro hd_snd) (simp add: dens_ctxt_\<alpha>_def)
hence dens': "?\<Y> \<turnstile>\<^sub>d Snd $$ e \<Rightarrow> (\<lambda>\<rho> y. (\<integral>\<^sup>+x. eval_cexpr ?expr (case_nat y \<rho>) x \<partial>stock_measure t))"
(is "_ \<turnstile>\<^sub>d _ \<Rightarrow> ?f") by (rule hd_cong, intro density_context_\<alpha>, insert edc_snd.prems A)
(auto intro!: nn_integral_cong simp: eval_cexpr_def cexpr_sem_cexpr_comp cexpr_sem_map_vars)
hence dens'': "has_parametrized_subprob_density (state_measure (set vs') \<Gamma>) ?M (stock_measure t') ?f"
using edc_snd.prems by (intro expr_has_density_sound_aux density_context_\<alpha>) simp_all
have "\<And>V. ?shift -` shift_var_set (shift_var_set V) = shift_var_set V"
by (auto simp: shift_var_set_def split: nat.split_asm)
hence nonneg': "nonneg_cexpr (shift_var_set (shift_var_set (set vs'))) (case_nat t (case_nat t' \<Gamma>)) ?expr"
by (auto intro!: nonneg_cexpr_comp nonneg_cexpr_map_vars nonneg cexpr_typing.intros cet_var')
show ?case
proof (intro conjI is_density_exprI , simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_AE[OF dens'])
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
thus "AE x in stock_measure t'. ?f \<rho> x = ennreal (eval_cexpr (\<integral>\<^sub>c ?expr \<partial>t) \<rho> x)"
using ctype vars edc_snd.hyps nonneg'
by (intro has_parametrized_subprob_density_cexpr_sem_integral[OF dens'']) auto
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t'' \<Gamma>) (\<integral>\<^sub>c ?expr \<partial>t)"
using nonneg' by (intro nonneg_cexpr_int) simp
qed (insert edc_snd.prems ctype vars, auto simp: measurable_split_conv
intro!: cet_int measurable_compose[OF _ measurable_ennreal]
measurable_Pair_compose_split[OF measurable_eval_cexpr])
next
case (edc_neg vs vs' \<Gamma> \<delta> e f t)
from edc_neg.prems(1) have t: "\<Gamma> \<turnstile> e : t" by (cases t) (auto split: pdf_type.split_asm)
from edc_neg.prems(1) have t_disj: "t = REAL \<or> t = INTEG"
by (cases t) (auto split: pdf_type.split_asm)
from edc_neg.prems edc_neg.IH[OF t]
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) t f" by simp_all
have "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Minus $$ e \<Rightarrow> (\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> (op_sem Minus x)))"
using dens by (simp only: dens_ctxt_\<alpha>_def prod.case, intro hd_neg) simp_all
also have "(\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> (op_sem Minus x))) =
(\<lambda>\<sigma> x. ennreal (eval_cexpr (f \<circ>\<^sub>c -\<^sub>c CVar 0) \<sigma> x))"
by (intro ext) (auto simp: eval_cexpr_comp)
finally have "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Minus $$ e \<Rightarrow>
(\<lambda>\<sigma> x. ennreal (eval_cexpr (f \<circ>\<^sub>c -\<^sub>c CVar 0) \<sigma> x))" .
moreover have "is_density_expr (vs, vs', \<Gamma>, \<delta>) t (f \<circ>\<^sub>c -\<^sub>c CVar 0)"
proof (intro is_density_exprI)
from t_disj have t_minus: "case_nat t \<Gamma> \<turnstile>\<^sub>c -\<^sub>c CVar 0 : t"
by (intro cet_op[where t = t]) (auto simp: cexpr_type_Some_iff[symmetric])
thus "case_nat t \<Gamma> \<turnstile>\<^sub>c f \<circ>\<^sub>c -\<^sub>c CVar 0 : REAL" using is_density_exprD(1)[OF wf]
by (intro cexpr_typing_cexpr_comp[of _ _ _ t])
show "free_vars (f \<circ>\<^sub>c -\<^sub>c CVar 0) \<subseteq> shift_var_set (set vs')" using is_density_exprD(2)[OF wf]
by (intro order.trans[OF free_vars_cexpr_comp]) (auto simp: shift_var_set_def)
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) (f \<circ>\<^sub>c -\<^sub>c CVar 0)"
using wf[THEN is_density_exprD_nonneg] t_disj
by (intro nonneg_cexpr_comp)
(auto intro!: cet_var' cet_minus_real cet_minus_int)
qed
ultimately show ?case by (rule conjI)
next
case (edc_addc vs vs' \<Gamma> \<delta> e f e' t)
let ?expr = "f \<circ>\<^sub>c (\<lambda>\<^sub>cx. x -\<^sub>c map_vars Suc (expr_rf_to_cexpr e'))"
from edc_addc.prems(1)
have t1: "\<Gamma> \<turnstile> e : t" and t2: "\<Gamma> \<turnstile> e' : t" and t3: "op_type Add (PRODUCT t t) = Some t"
by (elim expr_typing_opE expr_typing_pairE, fastforce split: pdf_type.split_asm)+
from edc_addc.prems(1) have t_disj: "t = REAL \<or> t = INTEG"
by (cases t) (auto split: pdf_type.split_asm)
hence t3': "op_type Minus t = Some t" by auto
from edc_addc.prems edc_addc.IH[OF t1]
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) t f" by simp_all
hence ctype: "case_nat t \<Gamma> \<turnstile>\<^sub>c ?expr : REAL" using t1 t2 t3 t3' edc_addc.hyps edc_addc.prems
by (intro cexpr_typing_cexpr_comp cet_op[where t = "PRODUCT t t"] cet_var')
(auto intro!: cet_pair cexpr_typing_map_vars cet_var' cet_op dest: is_density_exprD simp: o_def)
have vars: "free_vars ?expr \<subseteq> shift_var_set (set vs')" using edc_addc.prems edc_addc.hyps
using free_vars_expr_rf_to_cexpr is_density_exprD[OF wf]
by (intro order.trans[OF free_vars_cexpr_comp subset_shift_var_set]) auto
have cet_e': "\<Gamma> \<turnstile> e' : t"
using edc_addc.prems(1)
apply (cases)
apply (erule expr_typing.cases)
apply (auto split: pdf_type.splits)
done
have "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Add $$ <e, e'> \<Rightarrow>
(\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> (op_sem Add <|x, expr_sem_rf \<sigma> (Minus $$ e')|>)))"
(is "?\<Y> \<turnstile>\<^sub>d _ \<Rightarrow> ?f") using dens edc_addc.hyps
by (simp only: dens_ctxt_\<alpha>_def prod.case, intro hd_addc) simp_all
also have "?f = (\<lambda>\<sigma> x. ennreal (eval_cexpr ?expr \<sigma> x))" using edc_addc.hyps
by (intro ext) (auto simp: eval_cexpr_comp cexpr_sem_map_vars o_def cexpr_sem_expr_rf_to_cexpr)
finally have "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Add $$ <e, e'> \<Rightarrow>
(\<lambda>\<sigma> x. ennreal (eval_cexpr ?expr \<sigma> x))" .
moreover have "is_density_expr (vs, vs', \<Gamma>, \<delta>) t ?expr" using ctype vars
proof (intro is_density_exprI)
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) ?expr"
using t_disj edc_addc.hyps edc_addc.prems cet_e' free_vars_expr_rf_to_cexpr[of e']
by (intro nonneg_cexpr_comp[OF wf[THEN is_density_exprD_nonneg]])
(auto intro!: cet_add_int cet_add_real cet_minus_int cet_minus_real cet_var' cexpr_typing_map_vars
simp: o_def)
qed auto
ultimately show ?case by (rule conjI)
next
case (edc_multc vs vs' \<Gamma> \<delta> e f c t)
let ?expr = "(f \<circ>\<^sub>c (\<lambda>\<^sub>cx. x *\<^sub>c CReal (inverse c))) *\<^sub>c CReal (inverse (abs c))"
from edc_multc.prems(1) edc_multc.hyps have t1: "\<Gamma> \<turnstile> e : REAL" and [simp]: "t = REAL"
by (elim expr_typing_opE expr_typing_pairE, force split: pdf_type.split_asm)+
from edc_multc.prems edc_multc.IH[OF t1]
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) REAL f" by simp_all
have ctype': "case_nat t \<Gamma> \<turnstile>\<^sub>c f \<circ>\<^sub>c (\<lambda>\<^sub>cx. x *\<^sub>c CReal (inverse c)) : REAL"
using t1 edc_multc.hyps edc_multc.prems is_density_exprD[OF wf]
by (intro cexpr_typing_cexpr_comp)
(auto intro!: cet_pair cexpr_typing_map_vars cet_var' cet_val' cet_op_intros)
hence ctype: "case_nat t \<Gamma> \<turnstile>\<^sub>c ?expr : REAL"
by (auto intro!: cet_op_intros cet_pair cet_val')
have vars': "free_vars (f \<circ>\<^sub>c (\<lambda>\<^sub>cx. x *\<^sub>c CReal (inverse c))) \<subseteq> shift_var_set (set vs')"
using edc_multc.prems edc_multc.hyps free_vars_expr_rf_to_cexpr is_density_exprD[OF wf]
by (intro order.trans[OF free_vars_cexpr_comp subset_shift_var_set]) auto
hence vars: "free_vars ?expr \<subseteq> shift_var_set (set vs')" by simp
have "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Mult $$ <e, Val (RealVal c)> \<Rightarrow>
(\<lambda>\<sigma> x. ennreal (eval_cexpr f \<sigma> (op_sem Mult <|x, op_sem Inverse (RealVal c)|>)) *
ennreal (inverse (abs (extract_real (RealVal c)))))"
(is "?\<Y> \<turnstile>\<^sub>d _ \<Rightarrow> ?f") using dens edc_multc.hyps
by (simp only: dens_ctxt_\<alpha>_def prod.case, intro hd_multc) simp_all
hence "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d Mult $$ <e, Val (RealVal c)> \<Rightarrow>
(\<lambda>\<sigma> x. ennreal (eval_cexpr ?expr \<sigma> x))"
proof (simp only: dens_ctxt_\<alpha>_def prod.case, erule_tac hd_cong)
fix \<rho> x assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)" and x: "x \<in> space (stock_measure REAL)"
hence "eval_cexpr ?expr \<rho> x =
extract_real (cexpr_sem (case_nat x \<rho>) (f \<circ>\<^sub>c CVar 0 *\<^sub>c CReal (inverse c))) * inverse \<bar>c\<bar>"
(is "_ = ?a * ?b") unfolding eval_cexpr_def
by (subst cexpr_sem_Mult[OF ctype' cet_val' _ vars'])
(auto simp: extract_real_def simp del: stock_measure.simps)
also hence "?a = eval_cexpr f \<rho> (op_sem Mult <|x, op_sem Inverse (RealVal c)|>)"
by (auto simp: cexpr_sem_cexpr_comp eval_cexpr_def lift_RealVal_def lift_RealIntVal2_def)
finally show "ennreal (eval_cexpr f \<rho> (op_sem Mult <|x, op_sem Inverse (RealVal c)|>)) *
ennreal (inverse \<bar>extract_real (RealVal c)\<bar>) = ennreal (eval_cexpr ?expr \<rho> x)"
by (simp add: extract_real_def ennreal_mult'')
qed (insert edc_multc.prems, auto intro!: density_context_\<alpha>)
moreover have "is_density_expr (vs, vs', \<Gamma>, \<delta>) t ?expr" using ctype vars
proof (intro is_density_exprI)
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) ?expr"
using is_density_exprD[OF wf] vars vars'
by (intro nonneg_cexpr_comp[OF wf[THEN is_density_exprD_nonneg]] nonneg_cexpr_Mult ctype')
(auto intro!: nonneg_cexprI cet_var' cet_val' cet_op_intros)
qed auto
ultimately show ?case by (rule conjI)
next
case (edc_add vs vs' \<Gamma> \<delta> e f t t')
note t = \<open>\<Gamma> \<turnstile> e : PRODUCT t t\<close>
note invar = cdens_ctxt_invarD[OF edc_add.prems(2)]
from edc_add.prems and t have "op_type Add (PRODUCT t t) = Some t'"
by (elim expr_typing_opE) (auto dest: expr_typing_unique)
hence [simp]: "t' = t" and t_disj: "t = INTEG \<or> t = REAL" by (auto split: pdf_type.split_asm)
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>x xa. ennreal (eval_cexpr f x xa))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) (PRODUCT t t) f"
using edc_add by simp_all
note wf' = is_density_exprD[OF wf]
let ?\<Y> = "(set vs, set vs', \<Gamma>, \<lambda>\<rho>. ennreal (extract_real (cexpr_sem \<rho> \<delta>)))"
let ?M = "\<lambda>\<rho>. dens_ctxt_measure ?\<Y> \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> e)"
have nonneg: "nonneg_cexpr (shift_var_set (set vs')) (case_nat (PRODUCT t t) \<Gamma>) f"
using wf by (rule is_density_exprD_nonneg)
let ?shift = "case_nat 0 (\<lambda>x. x + 2)"
let ?expr' = "map_vars ?shift f \<circ>\<^sub>c (\<lambda>\<^sub>cx. <x, CVar 1 -\<^sub>c x>\<^sub>c)"
let ?expr = "\<integral>\<^sub>c ?expr' \<partial>t"
have [simp]: "\<And>t t' \<Gamma>. case_nat t (case_nat t' \<Gamma>) \<circ> case_nat 0 (\<lambda>x. Suc (Suc x)) = case_nat t \<Gamma>"
by (intro ext) (simp split: nat.split add: o_def)
have ctype'': "case_nat t (case_nat t \<Gamma>) \<turnstile>\<^sub>c <CVar 0, CVar 1 -\<^sub>c CVar 0>\<^sub>c : PRODUCT t t"
by (rule cet_pair, simp add: cet_var', rule cet_op[where t = "PRODUCT t t"], rule cet_pair)
(insert t_disj, auto intro!: cet_var' cet_op[where t = t])
hence ctype': "case_nat t (case_nat t \<Gamma>) \<turnstile>\<^sub>c ?expr' : REAL" using wf'
by (intro cexpr_typing_cexpr_comp cexpr_typing_map_vars) simp_all
hence ctype: "case_nat t \<Gamma> \<turnstile>\<^sub>c ?expr : REAL" by (rule cet_int)
have vars': "free_vars ?expr' \<subseteq> shift_var_set (shift_var_set (set vs'))" using wf'
by (intro order.trans[OF free_vars_cexpr_comp]) (auto split: nat.split simp: shift_var_set_def)
hence vars: "free_vars ?expr \<subseteq> shift_var_set (set vs')" by auto
let ?M = "\<lambda>\<rho>. dens_ctxt_measure ?\<Y> \<rho> \<bind> (\<lambda>\<sigma>. expr_sem \<sigma> (Add $$ e))"
let ?f = "\<lambda>\<rho> x y. eval_cexpr f \<rho> <|y, op_sem Add <|x, op_sem Minus y|>|>"
have "?\<Y> \<turnstile>\<^sub>d Add $$ e \<Rightarrow> (\<lambda>\<rho> x. \<integral>\<^sup>+y. ?f \<rho> x y \<partial>stock_measure (val_type x))" using dens
by (intro hd_add) (simp add: dens_ctxt_\<alpha>_def)
hence dens: "?\<Y> \<turnstile>\<^sub>d Add $$ e \<Rightarrow> (\<lambda>\<rho> x. \<integral>\<^sup>+y. eval_cexpr ?expr' (case_nat x \<rho>) y \<partial>stock_measure t)"
by (rule hd_cong) (insert edc_add.prems, auto intro!: density_context_\<alpha> nn_integral_cong
simp: eval_cexpr_def cexpr_sem_cexpr_comp cexpr_sem_map_vars)
hence dens': "has_parametrized_subprob_density (state_measure (set vs') \<Gamma>) ?M (stock_measure t)
(\<lambda>\<rho> x. \<integral>\<^sup>+y. eval_cexpr ?expr' (case_nat x \<rho>) y \<partial>stock_measure t)"
using edc_add.prems by (intro expr_has_density_sound_aux density_context_\<alpha>) simp_all
show ?case
proof (intro conjI is_density_exprI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_AE[OF dens])
fix \<rho> assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
let ?dens = "\<lambda>x. \<integral>\<^sup>+y. eval_cexpr ?expr' (case_nat x \<rho>) y \<partial>stock_measure t"
show "AE x in stock_measure t. ?dens x = ennreal (eval_cexpr ?expr \<rho> x)"
proof (rule AE_mp[OF _ AE_I2[OF impI]])
from has_parametrized_subprob_density_integral[OF dens' \<rho>] and
has_parametrized_subprob_densityD(3)[OF dens'] and \<rho>
show "AE x in stock_measure t. ?dens x \<noteq> \<infinity>" by (intro nn_integral_PInf_AE) auto
next
fix x assume x: "x \<in> space (stock_measure t)" and fin: "?dens x \<noteq> \<infinity>"
thus "?dens x = ennreal (eval_cexpr ?expr \<rho> x)"
using \<rho> vars' ctype' ctype'' nonneg unfolding eval_cexpr_def
by (subst cexpr_sem_integral_nonneg) (auto intro!: nonneg_cexpr_comp nonneg_cexpr_map_vars simp: less_top)
qed
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t' \<Gamma>) ?expr"
using ctype'' nonneg
by (intro nonneg_cexpr_int nonneg_cexpr_comp[of _ "PRODUCT t t"] nonneg_cexpr_map_vars)
auto
qed (insert vars ctype edc_add.prems, auto)
next
case (edc_inv vs vs' \<Gamma> \<delta> e f t)
hence t: "\<Gamma> \<turnstile> e : t" and [simp]: "t = REAL"
by (elim expr_typing_opE, force split: pdf_type.split_asm)+
note invar = cdens_ctxt_invarD[OF edc_inv.prems(2)]
let ?expr = "(f \<circ>\<^sub>c (\<lambda>\<^sub>cx. inverse\<^sub>c x)) *\<^sub>c (\<lambda>\<^sub>cx. (inverse\<^sub>c x) ^\<^sub>c CInt 2)"
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) REAL f" using edc_inv t by simp_all
note wf' = is_density_exprD[OF wf]
from wf' have ctype: "case_nat REAL \<Gamma> \<turnstile>\<^sub>c ?expr : REAL"
by (auto intro!: cet_op_intros cexpr_typing_cexpr_comp cet_var' cet_val')
from wf' have vars': "free_vars (f \<circ>\<^sub>c (\<lambda>\<^sub>cx. inverse\<^sub>c x)) \<subseteq> shift_var_set (set vs')"
by (intro order.trans[OF free_vars_cexpr_comp]) auto
hence vars: "free_vars ?expr \<subseteq> shift_var_set (set vs')" using free_vars_cexpr_comp by simp
show ?case
proof (intro conjI is_density_exprI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_cong[OF hd_inv])
fix \<rho> x assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
and x: "x \<in> space (stock_measure REAL)"
from x obtain x' where [simp]: "x = RealVal x'" by (auto simp: val_type_eq_REAL)
from \<rho> and wf' have "val_type (cexpr_sem (case_nat (RealVal (inverse x')) \<rho>) f) = REAL"
by (intro val_type_cexpr_sem[OF _ _ case_nat_in_state_measure ])
(auto simp: type_universe_def simp del: type_universe_type)
thus "ennreal (eval_cexpr f \<rho> (op_sem Inverse x)) * ennreal ((inverse (extract_real x))\<^sup>2) =
ennreal (eval_cexpr ?expr \<rho> x)"
by (auto simp: eval_cexpr_def lift_RealVal_def lift_RealIntVal2_def ennreal_mult''
extract_real_def cexpr_sem_cexpr_comp elim!: REAL_E)
next
have "nonneg_cexpr (shift_var_set (set vs')) (case_nat REAL \<Gamma>) (inverse\<^sub>c (CVar 0) ^\<^sub>c CInt 2)"
by (auto intro!: nonneg_cexprI simp: space_state_measure_shift_iff val_type_eq_REAL lift_RealVal_eq)
then show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) ?expr"
using wf'
by (intro nonneg_cexpr_Mult nonneg_cexpr_comp vars')
(auto intro!: cet_op_intros cexpr_typing_cexpr_comp cet_var' cet_val')
qed (insert edc_inv.prems ctype vars dens,
auto intro!: density_context_\<alpha> simp: dens_ctxt_\<alpha>_def)
next
case (edc_exp vs vs' \<Gamma> \<delta> e f t)
hence t: "\<Gamma> \<turnstile> e : t" and [simp]: "t = REAL"
by (elim expr_typing_opE, force split: pdf_type.split_asm)+
note invar = cdens_ctxt_invarD[OF edc_exp.prems(2)]
let ?expr = "(\<lambda>\<^sub>cx. IF\<^sub>c CReal 0 <\<^sub>c x THEN (f \<circ>\<^sub>c ln\<^sub>c x) *\<^sub>c inverse\<^sub>c x ELSE CReal 0)"
have dens: "dens_ctxt_\<alpha> (vs, vs', \<Gamma>, \<delta>) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))" and
wf: "is_density_expr (vs, vs', \<Gamma>, \<delta>) REAL f" using edc_exp t by simp_all
note wf' = is_density_exprD[OF wf]
from wf' have ctype: "case_nat REAL \<Gamma> \<turnstile>\<^sub>c ?expr : REAL"
by (auto intro!: cet_if cet_op_intros cet_var' cet_val' cexpr_typing_cexpr_comp)
from wf' have "free_vars (f \<circ>\<^sub>c (\<lambda>\<^sub>cx. ln\<^sub>c x)) \<subseteq> shift_var_set (set vs')"
by (intro order.trans[OF free_vars_cexpr_comp]) auto
hence vars: "free_vars ?expr \<subseteq> shift_var_set (set vs')" using free_vars_cexpr_comp by simp
show ?case
proof (intro conjI is_density_exprI, simp only: dens_ctxt_\<alpha>_def prod.case, rule hd_cong[OF hd_exp])
fix \<rho> x assume \<rho>: "\<rho> \<in> space (state_measure (set vs') \<Gamma>)"
and x: "x \<in> space (stock_measure REAL)"
from x obtain x' where [simp]: "x = RealVal x'" by (auto simp: val_type_eq_REAL)
from \<rho> and wf' have "val_type (cexpr_sem (case_nat (RealVal (ln x')) \<rho>) f) = REAL"
by (intro val_type_cexpr_sem[OF _ _ case_nat_in_state_measure ])
(auto simp: type_universe_def simp del: type_universe_type)
thus "(if 0 < extract_real x then ennreal (eval_cexpr f \<rho> (lift_RealVal safe_ln x)) *
ennreal (inverse (extract_real x)) else 0) = ennreal (eval_cexpr ?expr \<rho> x)"
by (auto simp: eval_cexpr_def lift_RealVal_def lift_RealIntVal2_def lift_Comp_def ennreal_mult''
extract_real_def cexpr_sem_cexpr_comp elim!: REAL_E)
next
show "nonneg_cexpr (shift_var_set (set vs')) (case_nat t \<Gamma>) ?expr"
proof (rule nonneg_cexprI_shift)
fix x \<sigma> assume "x \<in> type_universe t" and \<sigma>: "\<sigma> \<in> space (state_measure (set vs') \<Gamma>)"
then obtain r where "x = RealVal r"
by (auto simp: val_type_eq_REAL)
moreover note \<sigma> nonneg_cexprD[OF is_density_exprD_nonneg[OF wf], of "case_nat (RealVal (ln r)) \<sigma>"]
moreover have "val_type (cexpr_sem (case_nat (RealVal (ln r)) \<sigma>) f) = REAL"
using \<sigma> by (intro val_type_cexpr_sem[OF wf'(1,2)] case_nat_in_state_measure) auto
ultimately show "0 \<le> extract_real
(cexpr_sem (case_nat x \<sigma>)
(IF\<^sub>c CReal 0 <\<^sub>c CVar 0 THEN (f \<circ>\<^sub>c ln\<^sub>c (CVar 0)) /\<^sub>c CVar 0 ELSE CReal 0))"
by (auto simp: lift_Comp_def lift_RealVal_eq cexpr_sem_cexpr_comp val_type_eq_REAL
case_nat_in_state_measure lift_RealIntVal2_def)
qed
qed (insert edc_exp.prems ctype vars dens,
auto intro!: density_context_\<alpha> simp: dens_ctxt_\<alpha>_def)
qed
lemma expr_has_density_cexpr_sound:
assumes "([], [], \<Gamma>, CReal 1) \<turnstile>\<^sub>c e \<Rightarrow> f" "\<Gamma> \<turnstile> e : t" "free_vars e = {}"
shows "has_subprob_density (expr_sem \<sigma> e) (stock_measure t) (\<lambda>x. ennreal (eval_cexpr f \<sigma> x))"
"\<forall>x\<in>type_universe t. 0 \<le> extract_real (cexpr_sem (case_nat x \<sigma>) f)"
"\<Gamma>' 0 = t \<Longrightarrow> \<Gamma>' \<turnstile>\<^sub>c f : REAL"
"free_vars f \<subseteq> {0}"
proof-
have "dens_ctxt_\<alpha> ([], [], \<Gamma>, CReal 1) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x)) \<and>
is_density_expr ([], [], \<Gamma>, CReal 1) t f" using assms
by (intro expr_has_density_cexpr_sound_aux assms cdens_ctxt_invarI nonneg_cexprI subprob_cexprI)
(auto simp: state_measure_def PiM_empty cexpr_type_Some_iff[symmetric] extract_real_def)
hence dens: "dens_ctxt_\<alpha> ([], [], \<Gamma>, CReal 1) \<turnstile>\<^sub>d e \<Rightarrow> (\<lambda>\<rho> x. ennreal (eval_cexpr f \<rho> x))"
and wf: "is_density_expr ([], [], \<Gamma>, CReal 1) t f" using assms by blast+
have "has_subprob_density (expr_sem \<sigma> e) (stock_measure t)
(\<lambda>x. ennreal (eval_cexpr f (\<lambda>_. undefined) x))" (is ?P) using dens assms
by (intro expr_has_density_sound) (auto simp: dens_ctxt_\<alpha>_def extract_real_def one_ennreal_def)
also have "\<And>x. cexpr_sem (case_nat x (\<lambda>_. undefined)) f = cexpr_sem (case_nat x \<sigma>) f"
using is_density_exprD[OF wf]
by (intro cexpr_sem_eq_on_vars) (auto split: nat.split simp: shift_var_set_def)
hence "?P \<longleftrightarrow> has_subprob_density (expr_sem \<sigma> e) (stock_measure t)
(\<lambda>x. ennreal (eval_cexpr f \<sigma> x))"
by (intro has_subprob_density_cong) (simp add: eval_cexpr_def)
finally show "..." .
from is_density_exprD[OF wf] show vars: "free_vars f \<subseteq> {0}" by (auto simp: shift_var_set_def)
show "\<forall>x\<in>type_universe t. 0 \<le> extract_real (cexpr_sem (case_nat x \<sigma>) f)"
proof
fix v assume v: "v \<in> type_universe t"
then have "0 \<le> extract_real (cexpr_sem (case_nat v (\<lambda>_. undefined)) f)"
by (intro nonneg_cexprD[OF wf[THEN is_density_exprD_nonneg]] case_nat_in_state_measure)
(auto simp: space_state_measure)
also have "cexpr_sem (case_nat v (\<lambda>_. undefined)) f = cexpr_sem (case_nat v \<sigma>) f"
using \<open>free_vars f \<subseteq> {0}\<close> by (intro cexpr_sem_eq_on_vars) auto
finally show "0 \<le> extract_real (cexpr_sem (case_nat v \<sigma>) f)" .
qed
assume "\<Gamma>' 0 = t"
thus "\<Gamma>' \<turnstile>\<^sub>c f : REAL"
by (intro cexpr_typing_cong'[OF is_density_exprD(1)[OF wf]])
(insert vars, auto split: nat.split)
qed
inductive expr_compiles_to :: "expr \<Rightarrow> pdf_type \<Rightarrow> cexpr \<Rightarrow> bool" ("_ : _ \<Rightarrow>\<^sub>c _" [10,0,10] 10)
for e t f where
"(\<lambda>_. UNIT) \<turnstile> e : t \<Longrightarrow> free_vars e = {} \<Longrightarrow>
([], [], \<lambda>_. UNIT, CReal 1) \<turnstile>\<^sub>c e \<Rightarrow> f \<Longrightarrow>
e : t \<Rightarrow>\<^sub>c f"
code_pred expr_compiles_to .
lemma expr_compiles_to_sound:
assumes "e : t \<Rightarrow>\<^sub>c f"
shows "expr_sem \<sigma> e = density (stock_measure t) (\<lambda>x. ennreal (eval_cexpr f \<sigma>' x))"
"\<forall>x\<in>type_universe t. eval_cexpr f \<sigma>' x \<ge> 0"
"\<Gamma> \<turnstile> e : t"
"t \<cdot> \<Gamma>' \<turnstile>\<^sub>c f : REAL"
"free_vars f \<subseteq> {0}"
proof-
let ?\<Gamma> = "\<lambda>_. UNIT"
from assms have A: "([], [], ?\<Gamma>, CReal 1) \<turnstile>\<^sub>c e \<Rightarrow> f" "?\<Gamma> \<turnstile> e : t" "free_vars e = {}"
by (simp_all add: expr_compiles_to.simps)
hence "expr_sem \<sigma> e = expr_sem \<sigma>' e" by (intro expr_sem_eq_on_vars) simp
with expr_has_density_cexpr_sound[OF A]
show "expr_sem \<sigma> e = density (stock_measure t) (\<lambda>x. ennreal (eval_cexpr f \<sigma>' x))"
"\<forall>x\<in>type_universe t. eval_cexpr f \<sigma>' x \<ge> 0"
"t \<cdot> \<Gamma>' \<turnstile>\<^sub>c f : REAL"
"free_vars f \<subseteq> {0}" unfolding has_subprob_density_def has_density_def eval_cexpr_def
by (auto intro!: nonneg_cexprD case_nat_in_state_measure)
from assms have "(\<lambda>_. UNIT) \<turnstile> e : t" by (simp add: expr_compiles_to.simps)
from this and assms show "\<Gamma> \<turnstile> e : t"
by (subst expr_typing_cong) (auto simp: expr_compiles_to.simps)
qed
section \<open>Tests\<close>
values "{(t, f) |t f. Val (IntVal 42) : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. Minus $$ (Val (IntVal 42)) : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. Fst $$ (Val <|IntVal 13, IntVal 37|>) : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. Random Bernoulli (Val (RealVal 0.5)) : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. Add $$ <Val (IntVal 37), Minus $$ (Val (IntVal 13))> : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. LET Val (IntVal 13) IN LET Minus $$ (Val (IntVal 37)) IN
Add $$ <Var 0, Var 1> : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. IF Random Bernoulli (Val (RealVal 0.5)) THEN
Random Bernoulli (Val (RealVal 0.25))
ELSE
Random Bernoulli (Val (RealVal 0.75)) : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. LET Random Bernoulli (Val (RealVal 0.5)) IN
IF Var 0 THEN
Random Bernoulli (Val (RealVal 0.25))
ELSE
Random Bernoulli (Val (RealVal 0.75)) : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. LET Random Gaussian <Val (RealVal 0), Val (RealVal 1)> IN
LET Random Gaussian <Val (RealVal 0), Val (RealVal 1)> IN
Add $$ <Var 0, Var 1> : t \<Rightarrow>\<^sub>c f}"
values "{(t, f) |t f. LET Random UniformInt <Val (IntVal 1), Val (IntVal 6)> IN
LET Random UniformInt <Val (IntVal 1), Val (IntVal 6)> IN
Add $$ <Var 0, Var 1> : t \<Rightarrow>\<^sub>c f}"
(* Example from the paper by Bhat et.al. *)
values "{(t, f) |t f. LET Random UniformReal <Val (RealVal 0), Val (RealVal 1)> IN
LET Random Bernoulli (Var 0) IN
IF Var 0 THEN Add $$ <Var 1, Val (RealVal 1)> ELSE Var 1 : t \<Rightarrow>\<^sub>c f}"
(* Simplification of constant expression yields:
\<integral>b. (IF 0 \<le> x - 1 \<and> x - 1 \<le> 1 THEN 1 ELSE 0) *
(IF 0 \<le> x - 1 \<and> x - 1 \<le> 1 THEN IF b THEN x - 1 ELSE 1 - (x - 1) ELSE 0) * \<langle>b\<rangle> +
\<integral>b. (IF 0 \<le> x \<and> x \<le> 1 THEN 1 ELSE 0) *
(IF 0 \<le> x \<and> x \<le> 1 THEN IF b THEN x ELSE 1 - x ELSE 0) * \<langle>\<not>b\<rangle>
*)
(* Further simplification yields:
(\<integral>b. \<langle>0 \<le> x-1 \<le> 1\<rangle> * (IF b THEN x-1 ELSE 2-x) * \<langle>b\<rangle>) +
(\<integral>b. \<langle>0 \<le> x \<le> 1\<rangle> * (IF b THEN x ELSE 1-x) * \<langle>\<not>b\<rangle>)
*)
(* Further simplification yields:
\<langle>1 \<le> x \<le> 2\<rangle>*(x-1) + \<langle>0 \<le> x \<le> 1\<rangle>*(1-x)
*)
(* Mathematica input:
Piecewise[{{x-1, 1 <= x && x <= 2}, {1-x, 0 <= x && x <= 1}}]
*)
definition "cthulhu skill \<equiv>
LET Random UniformInt (Val <|IntVal 1, IntVal 100|>)
IN IF Less $$ <Val (IntVal skill), Var 0> THEN
Val (IntVal skill)
ELSE IF Or $$ <Less $$ <Var 0, Val (IntVal 6)>,
Less $$ <Mult $$ <Var 0, Val (IntVal 5)>,
Add $$ <Val (IntVal skill), Val (IntVal 1)> > > THEN
Add $$ <IF Less $$ <Val (IntVal skill),
Random UniformInt <Val (IntVal 1), Val (IntVal 100)> > THEN
Random UniformInt <Val (IntVal 1), Val (IntVal 10)>
ELSE
Val (IntVal 0),
Val (IntVal skill)>
ELSE Val (IntVal skill)"
definition "cthulhu' (skill :: int) =
LET Random UniformInt (Val <|IntVal 1, IntVal 100|>)
IN IF Less $$ <Val (IntVal skill), Var 0> THEN
Val (IntVal skill)
ELSE IF Or $$ <Less $$ <Var 0, Val (IntVal 6)>,
Less $$ <Mult $$ <Var 0, Val (IntVal 5)>,
Add $$ <Val (IntVal skill), Val (IntVal 1)> > > THEN
LET Random UniformInt (Val <|IntVal 1, IntVal 100|>)
IN Add $$ <IF Less $$ <Val (IntVal skill), Var 1 > THEN
Random UniformInt (Val <|IntVal 1, IntVal 10|>)
ELSE
Val (IntVal 0),
Val (IntVal skill)>
ELSE Val (IntVal skill)"
values "{(t, f) |t f. cthulhu' 42 : t \<Rightarrow>\<^sub>c f}"
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Density_Compiler/PDF_Compiler.thy"}
|
subroutine unpack_contr(contr,svertex,vtx,topo,xlines,nvtx,nj_res)
implicit none
include 'stdunit.h'
include 'opdim.h'
include 'def_contraction.h'
include 'ifc_operators.h'
integer, intent(in) ::
& nj_res, nvtx
type(contraction), intent(inout) ::
& contr
integer, intent(in) ::
& svertex(nvtx)
integer(8), intent(in) ::
& vtx(nvtx), topo(nvtx,nvtx),
& xlines(nvtx,nj_res)
integer ::
& narc, nxarc, iarc, ivtx, jvtx, ij,
& idx_op, iblk_op, icnt, iadj
integer(8) ::
& avtx, base
integer ::
& occ(ngastp,2)
type(cntr_vtx), pointer ::
& vertex(:)
type(cntr_arc), pointer ::
& arc(:), xarc(:)
integer, external ::
& int8_expand
base = pack_base
! count arcs
narc = 0
do jvtx = 1, nvtx
do ivtx = 1, jvtx
if (topo(ivtx,jvtx).gt.0) narc = narc+1
end do
end do
! count xarcs
nxarc = 0
do ij = 1, nj_res
do ivtx = 1, nvtx
if (xlines(ivtx,ij).gt.0) nxarc = nxarc+1
end do
end do
call resize_contr(contr,nvtx,narc,nxarc,0)
vertex => contr%vertex
arc => contr%arc
xarc => contr%xarc
contr%nvtx = nvtx
contr%narc = narc
contr%nxarc = nxarc
! unpack vertices
do ivtx = 1, nvtx
avtx = abs(vtx(ivtx))
iadj = avtx/(base**6)
avtx = mod(avtx,(base**6))
idx_op = sign(avtx/(base*base),vtx(ivtx))
iblk_op = mod(avtx,base*base)
vertex(ivtx)%idx_op = idx_op
vertex(ivtx)%iblk_op = iblk_op
vertex(ivtx)%dagger = iadj.eq.1
end do
! unpack arcs
iarc = 0
do jvtx = 1, nvtx
do ivtx = 1, jvtx
if (topo(ivtx,jvtx).gt.0) then
iarc = iarc+1
occ = 0
icnt = int8_expand(topo(ivtx,jvtx),base,occ)
arc(iarc)%link(1) = ivtx
arc(iarc)%link(2) = jvtx
arc(iarc)%occ_cnt = occ
end if
end do
end do
! unpack xarcs
iarc = 0
do ij = 1, nj_res
do ivtx = 1, nvtx
if (xlines(ivtx,ij).gt.0) then
iarc = iarc+1
occ = 0
icnt = int8_expand(xlines(ivtx,ij),base,occ)
xarc(iarc)%link(1) = ivtx
xarc(iarc)%link(2) = ij
xarc(iarc)%occ_cnt = occ
end if
end do
end do
! store svertex
do ivtx = 1, nvtx
contr%svertex(ivtx) = svertex(ivtx)
end do
! update other svertex info
call update_svtx4contr(contr)
return
end
|
{"hexsha": "3eb5cb60c86f56a73fd46e2c48fc90ec22b18fc4", "size": 2765, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "formula/unpack_contr.f", "max_stars_repo_name": "ak-ustutt/GeCCo-public", "max_stars_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "formula/unpack_contr.f", "max_issues_repo_name": "ak-ustutt/GeCCo-public", "max_issues_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "formula/unpack_contr.f", "max_forks_repo_name": "ak-ustutt/GeCCo-public", "max_forks_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6324786325, "max_line_length": 72, "alphanum_fraction": 0.4911392405, "num_tokens": 909}
|
#include "root_path.hpp"
#include "../common/content_format.hpp"
#include <pxp-agent/external_module.hpp>
#include <pxp-agent/module_type.hpp>
#include <pxp-agent/configuration.hpp>
#include <pxp-agent/util/process.hpp>
#include <cpp-pcp-client/protocol/chunks.hpp> // ParsedChunks
#include <leatherman/json_container/json_container.hpp>
#include <leatherman/util/scope_exit.hpp>
#include <leatherman/file_util/file.hpp>
#include <boost/filesystem/operations.hpp>
#include <catch.hpp>
#include <string>
#include <vector>
#include <unistd.h>
#ifdef _WIN32
#define EXTENSION ".bat"
#else
#define EXTENSION ""
#endif
namespace PXPAgent {
namespace fs = boost::filesystem;
namespace lth_jc = leatherman::json_container;
namespace lth_util = leatherman::util;
namespace lth_file = leatherman::file_util;
static const std::string SPOOL_DIR { std::string { PXP_AGENT_ROOT_PATH }
+ "/lib/tests/resources/test_spool" };
static const std::string REVERSE_TXT {
(DATA_FORMAT % "\"0987\""
% "\"reverse\""
% "\"string\""
% "{\"argument\" : \"maradona\"}").str() };
static const std::string NON_BLOCKING_REVERSE_TXT {
(NON_BLOCKING_DATA_FORMAT % "\"1988\""
% "\"reverse\""
% "\"string\""
% "{\"argument\" : \"zico\"}"
% "false").str() };
static const std::vector<lth_jc::JsonContainer> NO_DEBUG {};
static const PCPClient::ParsedChunks CONTENT {
lth_jc::JsonContainer(ENVELOPE_TXT), // envelope
lth_jc::JsonContainer(REVERSE_TXT), // data
NO_DEBUG, // debug
0 }; // num invalid debug chunks
static const PCPClient::ParsedChunks NON_BLOCKING_CONTENT {
lth_jc::JsonContainer(ENVELOPE_TXT), // envelope
lth_jc::JsonContainer(NON_BLOCKING_REVERSE_TXT), // data
NO_DEBUG, // debug
0 }; // num invalid debug chunks
TEST_CASE("ExternalModule::ExternalModule", "[modules]") {
SECTION("can successfully instantiate from a valid external module") {
REQUIRE_NOTHROW(ExternalModule(PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/reverse_valid"
EXTENSION,
SPOOL_DIR));
}
SECTION("all actions are successfully loaded from a valid external module") {
ExternalModule mod { PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/failures_test"
EXTENSION,
SPOOL_DIR };
REQUIRE(mod.actions.size() == 2u);
}
SECTION("throw a Module::LoadingError in case the module has an invalid "
"metadata schema") {
REQUIRE_THROWS_AS(
ExternalModule(PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules_broken/reverse_broken"
EXTENSION,
SPOOL_DIR),
Module::LoadingError);
}
}
TEST_CASE("ExternalModule::type", "[modules]") {
ExternalModule mod { PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/reverse_valid"
EXTENSION,
SPOOL_DIR };
SECTION("correctly reports its type") {
REQUIRE(mod.type() == ModuleType::External);
}
}
TEST_CASE("ExternalModule::hasAction", "[modules]") {
ExternalModule mod { PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/reverse_valid"
EXTENSION,
SPOOL_DIR };
SECTION("correctly reports false") {
REQUIRE(!mod.hasAction("foo"));
}
SECTION("correctly reports true") {
REQUIRE(mod.hasAction("string"));
}
}
static void configureTest() {
if (!fs::exists(SPOOL_DIR) && !fs::create_directories(SPOOL_DIR)) {
FAIL("Failed to create the results directory");
}
Configuration::Instance().initialize(
[](std::vector<std::string>) {
return EXIT_SUCCESS;
});
}
static void resetTest() {
if (fs::exists(SPOOL_DIR)) {
fs::remove_all(SPOOL_DIR);
}
}
TEST_CASE("ExternalModule::callAction - blocking", "[modules]") {
configureTest();
lth_util::scope_exit config_cleaner { resetTest };
SECTION("the shipped 'reverse' module works correctly") {
ExternalModule reverse_module { PXP_AGENT_ROOT_PATH
"/lib/tests/resources//modules/reverse_valid"
EXTENSION,
SPOOL_DIR };
SECTION("correctly call the reverse module") {
ActionRequest request { RequestType::Blocking, CONTENT };
auto response = reverse_module.executeAction(request);
REQUIRE(response.output.std_out.find("anodaram") != std::string::npos);
REQUIRE(response.output.std_err.empty());
}
}
SECTION("it should handle module failures") {
ExternalModule test_reverse_module { PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/failures_test"
EXTENSION,
SPOOL_DIR };
SECTION("mark the results as invalid if the module returns an invalid result") {
std::string failure_txt { (DATA_FORMAT % "\"1234987\""
% "\"failures_test\""
% "\"get_an_invalid_result\""
% "\"maradona\"").str() };
PCPClient::ParsedChunks failure_content {
lth_jc::JsonContainer(ENVELOPE_TXT),
lth_jc::JsonContainer(failure_txt),
NO_DEBUG,
0 };
ActionRequest request { RequestType::Blocking, failure_content };
auto response = test_reverse_module.executeAction(request);
REQUIRE_FALSE(response.action_metadata.includes("results"));
REQUIRE(response.action_metadata.includes("results_are_valid"));
REQUIRE_FALSE(response.action_metadata.get<bool>("results_are_valid"));
}
SECTION("it should include error output in response") {
std::string failure_txt { (DATA_FORMAT % "\"1234987\""
% "\"failures_test\""
% "\"broken_action\""
% "\"maradona\"").str() };
PCPClient::ParsedChunks failure_content {
lth_jc::JsonContainer(ENVELOPE_TXT),
lth_jc::JsonContainer(failure_txt),
NO_DEBUG,
0 };
ActionRequest request { RequestType::Blocking, failure_content };
auto response = test_reverse_module.executeAction(request);
REQUIRE(response.action_metadata.includes("results"));
REQUIRE(response.action_metadata.get<std::string>("results").empty());
REQUIRE(response.action_metadata.includes("results_are_valid"));
REQUIRE_FALSE(response.action_metadata.get<bool>("results_are_valid"));
REQUIRE(response.action_metadata.includes("execution_error"));
REQUIRE(response.action_metadata.get<std::string>("execution_error").find("we failed, sorry ☹") != std::string::npos);
REQUIRE(response.output.std_out.empty());
REQUIRE(response.output.std_err.find("we failed, sorry ☹") != std::string::npos);
}
}
}
TEST_CASE("ExternalModule::callAction - non blocking", "[modules]") {
configureTest();
lth_util::scope_exit config_cleaner { resetTest };
SECTION("the pid is written to file") {
ExternalModule e_m { PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/reverse_valid"
EXTENSION,
SPOOL_DIR };
ActionRequest request { RequestType::NonBlocking, NON_BLOCKING_CONTENT };
fs::path spool_path { SPOOL_DIR };
auto results_dir = (spool_path / request.transactionId()).string();
fs::create_directories(results_dir);
request.setResultsDir(results_dir);
auto pid_path = spool_path / request.transactionId() / "pid";
REQUIRE_NOTHROW(e_m.executeAction(request));
REQUIRE(fs::exists(pid_path));
try {
auto pid_txt = lth_file::read(pid_path.string());
auto pid = std::stoi(pid_txt);
} catch (std::exception) {
FAIL("fail to get pid");
}
}
}
TEST_CASE("ExternalModule::getModuleMetadata", "[modules][metadata]") {
configureTest();
lth_util::scope_exit config_cleaner { resetTest };
SECTION("gets a list of actions when module returns valid metadata") {
try {
ExternalModule e_m { PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/reverse_valid"
EXTENSION,
SPOOL_DIR };
std::vector<std::string> expected_actions { "string", "hash" };
REQUIRE(e_m.actions == expected_actions);
} catch (const std::exception& e) {
FAIL(std::string { "failed to initialize: " } + e.what());
}
}
SECTION("throws Module::LoadingError when module returns no metadata") {
REQUIRE_THROWS_AS(
ExternalModule(PXP_AGENT_ROOT_PATH
"/lib/tests/resources/broken_modules/reverse_no_metadata"
EXTENSION,
SPOOL_DIR),
Module::LoadingError);
}
SECTION("throws Module::LoadingError when module returns invalid JSON") {
REQUIRE_THROWS_AS(
ExternalModule(PXP_AGENT_ROOT_PATH
"/lib/tests/resources/broken_modules/reverse_bad_json_format"
EXTENSION,
SPOOL_DIR),
Module::LoadingError);
}
SECTION("throws Module::LoadingError when module returns invalid metadata") {
REQUIRE_THROWS_AS(
ExternalModule(PXP_AGENT_ROOT_PATH
"/lib/tests/resources/broken_modules/reverse_broken"
EXTENSION,
SPOOL_DIR),
Module::LoadingError);
}
}
TEST_CASE("ExternalModule::validateConfiguration", "[modules][configuration]") {
configureTest();
lth_util::scope_exit config_cleaner { resetTest };
SECTION("succeeds when presented with a valid configuration") {
ExternalModule e_m {
PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/convert_test"
EXTENSION,
lth_jc::JsonContainer(
"{ \"rate\" : 1.1, \"fee_percent\" : 0.1, \"fee_max\" : 10 }"),
SPOOL_DIR };
REQUIRE_NOTHROW(e_m.validateConfiguration());
}
SECTION("throws exception when presented with an invalid configuration") {
ExternalModule e_m {
PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/convert_test"
EXTENSION,
lth_jc::JsonContainer(
"{ \"rate\" : 1.1, \"fee_percent\" : 0.1, \"foo_max\" : 10 }"),
SPOOL_DIR };
REQUIRE_THROWS_AS(e_m.validateConfiguration(),
PCPClient::validation_error);
}
}
TEST_CASE("ExternalModule::executeAction", "[modules][output]") {
configureTest();
lth_util::scope_exit config_cleaner { resetTest };
SECTION("gets the action output") {
ExternalModule e_m {
PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/convert_test"
EXTENSION,
lth_jc::JsonContainer(
"{ \"rate\" : 1.1, \"fee_percent\" : 0.1, \"fee_max\" : 10 }"),
SPOOL_DIR };
auto convert_txt = (DATA_FORMAT % "\"0632\""
% "\"convert_test\""
% "\"convert\""
% "{\"amount\" : 1000}").str();
PCPClient::ParsedChunks convert_content {
lth_jc::JsonContainer(ENVELOPE_TXT),
lth_jc::JsonContainer(convert_txt),
NO_DEBUG,
0 };
ActionRequest request { RequestType::Blocking, convert_content };
try {
REQUIRE(e_m.executeAction(request)
.action_metadata
.get<double>({ "results", "amount" }) == 1098.9);
} catch (...) {
FAIL("failed to execute the action");
}
}
SECTION("validates the action output") {
ExternalModule e_m {
PXP_AGENT_ROOT_PATH
"/lib/tests/resources/modules/convert_test"
EXTENSION,
lth_jc::JsonContainer(
"{ \"rate\" : 1.1, \"fee_percent\" : 0.1, \"fee_max\" : 10 }"),
SPOOL_DIR };
auto convert_txt = (DATA_FORMAT % "\"0633\""
% "\"convert_test\""
% "\"convert2\""
% "{\"amount\" : 10000}").str();
PCPClient::ParsedChunks convert_content {
lth_jc::JsonContainer(ENVELOPE_TXT),
lth_jc::JsonContainer(convert_txt),
NO_DEBUG,
0 };
ActionRequest request { RequestType::Blocking, convert_content };
auto response = e_m.executeAction(request);
REQUIRE(response.action_metadata.includes("results_are_valid"));
REQUIRE_FALSE(response.action_metadata.get<bool>("results_are_valid"));
}
}
} // namespace PXPAgent
|
{"hexsha": "b3d6ff9fcc4b56850c29b8aa012ecfe98ea9cd7b", "size": 14150, "ext": "cc", "lang": "C++", "max_stars_repo_path": "lib/tests/unit/external_module_test.cc", "max_stars_repo_name": "nicklewis/pxp-agent", "max_stars_repo_head_hexsha": "12a383fb3403760524008ec81e5c1fcfd9178452", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/tests/unit/external_module_test.cc", "max_issues_repo_name": "nicklewis/pxp-agent", "max_issues_repo_head_hexsha": "12a383fb3403760524008ec81e5c1fcfd9178452", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/tests/unit/external_module_test.cc", "max_forks_repo_name": "nicklewis/pxp-agent", "max_forks_repo_head_hexsha": "12a383fb3403760524008ec81e5c1fcfd9178452", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7671232877, "max_line_length": 130, "alphanum_fraction": 0.5488339223, "num_tokens": 2766}
|
\section{The dataset}
We downloaded data from Kaggle\footnote{\url{https://www.kaggle.com/c/random-acts-of-pizza/download/train.json.zip}}. The dataset includes 4040 requests collected from the Reddit community Random Acts of Pizza between December 8, 2010 and September 29, 2013. Each data object is a request for a free pizza. There are 994 requests received a pizza while 3046 requests did not. \\
The dataset is in \texttt{JSON} format. Each \texttt{JSON} entry represents one pizza request. Data fields includes information about requests such as id, text, requester name, etc. and meta-data such as: time of the request, activity of the requester, community-age of the requester, etc. There are several fields are collected after the time a request was posted, the values of those fields doesn't affect whether a request receive pizza, thus, we removed those fields out of the dataset. Table \ref{fields} shows important attributes (and explanations that) we consider in our analysis.
\begin{table}[]
\sf\scriptsize
\centering
\caption{Important attributes used in our analysis}
\label{fields}
\begin{tabular}{lp{10cm}}
\toprule
Important Attributes & Explanations \\
\midrule
\multicolumn{2}{l}{Request text} \\
\midrule
request\_title & Title of the request \\
request\_text\_edit\_aware & Request text after removing comments indicating the success of the request \\
\midrule
\multicolumn{2}{l}{Requester information} \\
\midrule
requester\_account\_age\_in\_days\_at\_request & The age of requester (in days) at time of request \\
requester\_number\_of\_comments\_at\_request & The number of comments on Reddit by requester at time of request \\
requester\_number\_of\_posts\_at\_request & The number of posts on Reddit by requester at time of request. \\
requester\_subreddits\_at\_request & The number of subreddits that requester had posted in at the time of request. \\
requester\_upvotes\_minus\_downvotes\_at\_request & Difference of upvotes and downvotes of requester at time of retrieval. \\
requester\_upvotes\_plus\_downvotes\_at\_request & Sum of upvotes and downvotes of requester at time of request. \\
\bottomrule
\end{tabular}
\end{table}
|
{"hexsha": "ac4c0c85f83862871576673677696e8e11f8a351", "size": 2820, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "project/proposal/data.tex", "max_stars_repo_name": "tamnguyenthe/CS5890Project", "max_stars_repo_head_hexsha": "74446d536074d369af73bf6d88e38f917c7a3ee6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/proposal/data.tex", "max_issues_repo_name": "tamnguyenthe/CS5890Project", "max_issues_repo_head_hexsha": "74446d536074d369af73bf6d88e38f917c7a3ee6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/proposal/data.tex", "max_forks_repo_name": "tamnguyenthe/CS5890Project", "max_forks_repo_head_hexsha": "74446d536074d369af73bf6d88e38f917c7a3ee6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 94.0, "max_line_length": 590, "alphanum_fraction": 0.6046099291, "num_tokens": 600}
|
#include <glipf/processors/foreground-coverage-processor.h>
#include <glipf/gles-utils/shader-builder.h>
#include <glipf/gles-utils/glsl-program-builder.h>
#include <boost/variant/get.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <cstring>
#define BASE_TEXTURE_WIDTH 320
#define BASE_TEXTURE_HEIGHT 240
using std::pair;
using std::string;
using std::tuple;
using std::vector;
namespace glipf {
namespace processors {
enum VertexAttributeLocations : GLuint {
kPosition = 0,
kColor = 1,
kCellOffset = 2
};
ForegroundCoverageProcessor::ForegroundCoverageProcessor(const sources::FrameProperties& frameProperties,
const vector<ModelData>& models,
const glm::mat4& mvpMatrix)
: GlesProcessor(frameProperties)
, mPixelCountingGlslProgram(0)
, mModelVertexBuffer(0)
, mModelIndexBuffer(0)
{
setupReductionGlslPrograms(mvpMatrix);
setupModelGeometry(models);
mResultSet["model_coverage"] = vector<float>();
}
ForegroundCoverageProcessor::~ForegroundCoverageProcessor() {
glDeleteProgram(mPixelCountingGlslProgram);
glDeleteBuffers(1, &mModelVertexBuffer);
glDeleteBuffers(1, &mModelIndexBuffer);
for (auto reductionFboSpec : mReductionFboSpecs)
glDeleteProgram(std::get<0>(reductionFboSpec));
for (const auto& reductionFboSet : mReductionFboSets) {
for (auto modelTextureFboPair : std::get<3>(reductionFboSet)) {
glDeleteFramebuffers(1, &std::get<1>(modelTextureFboPair));
glDeleteTextures(1, &std::get<0>(modelTextureFboPair));
}
}
}
void ForegroundCoverageProcessor::setupReductionGlslPrograms(const glm::mat4& mvpMatrix)
{
GLuint mainGlslProgram = gles_utils::GlslProgramBuilder()
.attachShader(gles_utils::ShaderBuilder(GL_VERTEX_SHADER)
.appendSourceFile("glsl/transformation.vert")
.compile())
.attachShader(gles_utils::ShaderBuilder(GL_FRAGMENT_SHADER)
.appendSourceFile("glsl/foreground-coverage.frag")
.compile())
.bindAttribLocation(VertexAttributeLocations::kPosition, "vertex")
.bindAttribLocation(VertexAttributeLocations::kColor, "vertexColor")
.bindAttribLocation(VertexAttributeLocations::kCellOffset, "cellOffset")
.link();
glUseProgram(mainGlslProgram);
glUniform2f(glGetUniformLocation(mainGlslProgram, "viewportDimensions"),
mFrameProperties.dimensions().first,
mFrameProperties.dimensions().second);
glUniformMatrix4fv(glGetUniformLocation(mainGlslProgram, "projectionMatrix"),
1, GL_FALSE, glm::value_ptr(mvpMatrix));
glUniform1i(glGetUniformLocation(mainGlslProgram, "tex"), 0);
assertNoGlError();
mReductionFboSpecs.push_back(std::make_tuple(mainGlslProgram,
BASE_TEXTURE_WIDTH,
BASE_TEXTURE_HEIGHT));
vector<tuple<uint16_t, uint16_t, uint16_t, uint16_t>> reductionFboSpecs = {
std::make_tuple(BASE_TEXTURE_WIDTH / 4, BASE_TEXTURE_HEIGHT / 4, 4, 4),
std::make_tuple(BASE_TEXTURE_WIDTH / 16, BASE_TEXTURE_HEIGHT / 16, 4, 4),
std::make_tuple(BASE_TEXTURE_WIDTH / 80, BASE_TEXTURE_HEIGHT / 80, 5, 5)
};
for (auto& reductionFboSpec : reductionFboSpecs) {
uint16_t fboWidth, fboHeight, texelWidth, texelHeight;
std::tie(fboWidth, fboHeight, texelWidth, texelHeight) = reductionFboSpec;
GLuint reductionGlslProgram = gles_utils::GlslProgramBuilder()
.attachShader(gles_utils::ShaderBuilder(GL_VERTEX_SHADER)
.appendSourceFile("glsl/active-pixel-count.vert")
.compile())
.attachShader(gles_utils::ShaderBuilder(GL_FRAGMENT_SHADER)
.appendSourceString("#define TEXEL_WIDTH " +
std::to_string(texelWidth) + ".0\n")
.appendSourceString("#define TEXEL_HEIGHT " +
std::to_string(texelHeight) + ".0\n")
.appendSourceFile("glsl/active-pixel-count.frag")
.compile())
.bindAttribLocation(VertexAttributeLocations::kPosition, "vertex")
.link();
glUseProgram(reductionGlslProgram);
glUniform1i(glGetUniformLocation(reductionGlslProgram, "tex"), 2);
glUniform2f(glGetUniformLocation(reductionGlslProgram, "stepSize"),
0.5f / (4 * texelWidth * fboWidth),
0.5f / (4 * texelHeight * fboHeight));
assertNoGlError();
mReductionFboSpecs.push_back(std::make_tuple(reductionGlslProgram,
fboWidth,
fboHeight));
}
}
void ForegroundCoverageProcessor::addReductionFboSet(size_t modelCount,
GLuint indexOffset,
GLuint indexCount)
{
vector<TextureFboPair> reductionObjects;
// Prepare a texture to store the average foreground coverage of the
// model
for (auto& spec : mReductionFboSpecs) {
GLuint averageTexture;
glActiveTexture(GL_TEXTURE3);
glGenTextures(1, &averageTexture);
glBindTexture(GL_TEXTURE_2D, averageTexture);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 4 * std::get<1>(spec),
4 * std::get<2>(spec), 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
assertNoGlError();
// Prepare an FBO to store the average foreground coverage of the
// model
GLuint averageFbo;
glGenFramebuffers(1, &averageFbo);
glBindFramebuffer(GL_FRAMEBUFFER, averageFbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, averageTexture, 0);
assert(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE);
assertNoGlError();
reductionObjects.push_back(std::make_pair(averageTexture, averageFbo));
}
mReductionFboSets.push_back(std::make_tuple(modelCount,
indexOffset * sizeof(GLushort),
indexCount, reductionObjects));
}
void ForegroundCoverageProcessor::setupModelGeometry(const std::vector<ModelData>& models) {
size_t vertexCount = 0, indexCount = 0, fboIndexCount = 0;
ptrdiff_t vertexOffset = 0;
ptrdiff_t indexOffset = 0;
unsigned short int modelNumber = 0;
for (auto& model : models) {
vertexCount += model.first.size();
indexCount += model.second.size();
fboIndexCount += model.second.size();
if (++modelNumber % 32 == 0) {
addReductionFboSet(modelNumber, indexOffset, fboIndexCount);
indexOffset += fboIndexCount;
fboIndexCount = 0;
modelNumber = 0;
}
}
if (fboIndexCount > 0)
addReductionFboSet(modelNumber, indexOffset, fboIndexCount);
GLfloat vertexData[vertexCount * 3];
GLushort indexData[indexCount];
memset(vertexData, 0, sizeof(vertexData));
indexOffset = 0;
modelNumber = 0;
for (auto& model : models) {
uint_fast8_t modelColorChannel = 2 * (modelNumber % 2);
for (size_t i = 0; i < model.first.size(); i += 3) {
memcpy(vertexData + vertexOffset + i * 3, model.first.data() + i,
3 * sizeof(GLfloat));
vertexData[vertexOffset + i * 3 + 3 + modelColorChannel] = 1.0;
vertexData[vertexOffset + i * 3 + 4 + modelColorChannel] = 1.0;
vertexData[vertexOffset + i * 3 + 7] = ((modelNumber / 2) % 16) % 4;
vertexData[vertexOffset + i * 3 + 8] = ((modelNumber / 2) % 16) / 4;
}
for (size_t i = 0; i < model.second.size(); ++i)
indexData[indexOffset + i] = model.second[i] + vertexOffset / 9;
modelNumber++;
vertexOffset += model.first.size() * 3;
indexOffset += model.second.size();
}
glGenBuffers(1, &mModelVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, mModelVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexData), vertexData,
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
assertNoGlError();
glGenBuffers(1, &mModelIndexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mModelIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indexData), indexData,
GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
assertNoGlError();
}
const ProcessingResultSet& ForegroundCoverageProcessor::process(GLuint frameTexture) {
vector<float>& modelCoverageSet =
boost::get<vector<float>>(mResultSet["model_coverage"]);
modelCoverageSet.clear();
auto reductionSpecIter = std::begin(mReductionFboSpecs);
GLuint reductionGlslProgram;
uint_fast16_t fboWidth, fboHeight;
std::tie(reductionGlslProgram, fboWidth, fboHeight) = *reductionSpecIter;
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glBlendEquation(GL_FUNC_ADD);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, frameTexture);
glEnableVertexAttribArray(VertexAttributeLocations::kPosition);
glEnableVertexAttribArray(VertexAttributeLocations::kColor);
glEnableVertexAttribArray(VertexAttributeLocations::kCellOffset);
// Step 1: preprocessing
glViewport(0, 0, 4 * fboWidth, 4 * fboHeight);
glUseProgram(reductionGlslProgram);
glBindBuffer(GL_ARRAY_BUFFER, mModelVertexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mModelIndexBuffer);
glVertexAttribPointer(VertexAttributeLocations::kPosition, 3, GL_FLOAT,
GL_FALSE, 9 * sizeof(GLfloat), 0);
glVertexAttribPointer(VertexAttributeLocations::kColor, 4, GL_FLOAT,
GL_FALSE, 9 * sizeof(GLfloat),
(GLvoid*)(3 * sizeof(GLfloat)));
glVertexAttribPointer(VertexAttributeLocations::kCellOffset, 2, GL_FLOAT,
GL_FALSE, 9 * sizeof(GLfloat),
(GLvoid*)(7 * sizeof(GLfloat)));
for (auto& reductionFboSet : mReductionFboSets) {
const auto& modelTextureFboPair = std::get<3>(reductionFboSet).front();
glBindFramebuffer(GL_FRAMEBUFFER, std::get<1>(modelTextureFboPair));
glClear(GL_COLOR_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, std::get<2>(reductionFboSet),
GL_UNSIGNED_SHORT, (GLvoid*)std::get<1>(reductionFboSet));
assertNoGlError();
}
glDisable(GL_BLEND);
glDisableVertexAttribArray(VertexAttributeLocations::kColor);
glDisableVertexAttribArray(VertexAttributeLocations::kCellOffset);
// Step 2: average coverage by mipmapping
glActiveTexture(GL_TEXTURE2);
size_t reductionFboIndex = 1;
while (++reductionSpecIter != std::end(mReductionFboSpecs)) {
std::tie(reductionGlslProgram, fboWidth, fboHeight) = *reductionSpecIter;
glViewport(0, 0, 4 * fboWidth, 4 * fboHeight);
glUseProgram(reductionGlslProgram);
for (auto& reductionFboSet : mReductionFboSets) {
const auto& textureFboPairList = std::get<3>(reductionFboSet);
glBindTexture(GL_TEXTURE_2D,
std::get<0>(textureFboPairList[reductionFboIndex - 1]));
glBindFramebuffer(GL_FRAMEBUFFER,
std::get<1>(textureFboPairList[reductionFboIndex]));
glClear(GL_COLOR_BUFFER_BIT);
drawFullscreenQuad(VertexAttributeLocations::kPosition);
}
reductionFboIndex++;
}
glDisableVertexAttribArray(VertexAttributeLocations::kPosition);
// Step 3: extract coverage
for (auto& reductionFboSet : mReductionFboSets) {
glBindFramebuffer(GL_FRAMEBUFFER,
std::get<1>(std::get<3>(reductionFboSet).back()));
GLubyte pixelData[fboWidth * fboHeight * 64];
glReadPixels(0, 0, 4 * fboWidth, 4 * fboHeight, GL_RGBA, GL_UNSIGNED_BYTE,
pixelData);
uint_fast16_t offset = 0;
size_t modelCount = std::get<0>(reductionFboSet);
for (uint_fast16_t i = 0; i < 4; ++i) {
uint_fast32_t modelCoverage[] = {0, 0, 0, 0, 0, 0, 0, 0};
uint_fast32_t foregroundCoverage[] = {0, 0, 0, 0, 0, 0, 0, 0};
for (uint_fast16_t j = 0; j < fboHeight; ++j) {
for (uint_fast16_t k = 0; k < 4 * fboWidth; ++k) {
uint_fast8_t modelIndex = (k / fboWidth) * 2;
modelCoverage[modelIndex] += pixelData[offset++];
foregroundCoverage[modelIndex] += pixelData[offset++];
modelCoverage[modelIndex + 1] += pixelData[offset++];
foregroundCoverage[modelIndex + 1] += pixelData[offset++];
}
}
for (uint_fast16_t j = 0; j < std::min(modelCount, 8u); ++j)
modelCoverageSet.push_back(foregroundCoverage[j] / (float)modelCoverage[j]);
if (modelCount < 8)
return mResultSet;
modelCount -= 8;
}
}
return mResultSet;
}
} // end namespace processors
} // end namespace glipf
|
{"hexsha": "01c55455167fc5e5a013cae2353439b8b17bb124", "size": 13013, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "glipf/src/processors/foreground-coverage-processor.cpp", "max_stars_repo_name": "cognitivesystems/smartcamera", "max_stars_repo_head_hexsha": "5374193260e6385becfe8086a70d21d650314beb", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-03-27T16:14:59.000Z", "max_stars_repo_stars_event_max_datetime": "2017-03-27T16:14:59.000Z", "max_issues_repo_path": "glipf/src/processors/foreground-coverage-processor.cpp", "max_issues_repo_name": "cognitivesystems/smartcamera", "max_issues_repo_head_hexsha": "5374193260e6385becfe8086a70d21d650314beb", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "glipf/src/processors/foreground-coverage-processor.cpp", "max_forks_repo_name": "cognitivesystems/smartcamera", "max_forks_repo_head_hexsha": "5374193260e6385becfe8086a70d21d650314beb", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.18, "max_line_length": 105, "alphanum_fraction": 0.6692538231, "num_tokens": 3295}
|
import challenge_notations
import challenge_prerequisites
/-!
# Liquid Tensor Experiment
## The main challenge
The main challenge of the liquid tensor experiment is
a formalisation of the first theorem in Peter Scholze's blogpost
https://xenaproject.wordpress.com/2020/12/05/liquid-tensor-experiment/
Theorem 1.1 (Clausen--Scholze)
Let `0 < p' < p ≤ 1` be real numbers, let `S` be a profinite set, and let `V` be a `p`-Banach space.
Let `ℳ p' S` be the space of `p'`-measures on `S`. Then
$$ Ext^i (ℳ p' S, V) = 0 $$
for `i ≥ 1`.
-/
noncomputable theory
open_locale liquid_tensor_experiment nnreal zero_object
open liquid_tensor_experiment category_theory category_theory.limits
variables (p' p : ℝ≥0) [fact (0 < p')] [fact (p' < p)] [fact (p ≤ 1)]
theorem liquid_tensor_experiment (S : Profinite.{0}) (V : pBanach.{0} p) :
∀ i > 0, Ext i (ℳ_{p'} S) V ≅ 0 :=
begin
intros i hi,
apply is_zero.iso_zero,
revert i,
haveI : fact (0 < (p:ℝ)) := ⟨lt_trans (fact.out _ : 0 < p') (fact.out _)⟩,
haveI : fact (p' < 1) := ⟨lt_of_lt_of_le (fact.out _ : p' < p) (fact.out _)⟩,
erw is_zero_iff_epi_and_is_iso _ _ (V : Condensed.{0 1 2} Ab) (laurent_measures.short_exact p' S),
let := pBanach.choose_seminormed_add_comm_group V,
let := pBanach.choose_normed_with_aut V 2⁻¹,
haveI : fact (0 < (2⁻¹ : ℝ≥0) ^ (p : ℝ)) := r_pos',
convert laurent_measures.epi_and_is_iso p' p S ⟨V⟩ _ using 1,
intro v,
rw [pBanach.choose_normed_with_aut_T_inv, inv_inv, two_smul, two_nsmul],
end
|
{"author": "leanprover-community", "repo": "lean-liquid", "sha": "92f188bd17f34dbfefc92a83069577f708851aec", "save_path": "github-repos/lean/leanprover-community-lean-liquid", "path": "github-repos/lean/leanprover-community-lean-liquid/lean-liquid-92f188bd17f34dbfefc92a83069577f708851aec/src/challenge.lean"}
|
using Test
using TestSetExtensions
using LinearAlgebra
using Qaintessent
using SparseArrays
using StatsBase
##==----------------------------------------------------------------------------------------------------------------------
isunitary(cg::CircuitGate) = (sparse_matrix(cg) * sparse_matrix(Base.adjoint(cg)) ≈ I)
@testset ExtendedTestSet "circuit gates" begin
θ = 0.7 * π
ϕ = 0.4 * π
n = randn(3); n /= norm(n)
# single qubit gates
@testset "single qubit circuit gates" begin
for g in [X, Y, Z, HadamardGate(), SGate(), TGate(), RxGate(θ), RyGate(θ), RzGate(θ), RotationGate(θ, n), PhaseShiftGate(ϕ)]
cg = CircuitGate((2,), g)
cgadj = adjoint(cg)
Qaintessent.sparse_matrix(cgadj.gate) == adjoint(Qaintessent.sparse_matrix(cg.gate))
@test LinearAlgebra.ishermitian(cg) == (Qaintessent.sparse_matrix(cg) == Qaintessent.sparse_matrix(adjoint(cg)))
end
cgs = circuit_gate.((2,), [X, Y, Z, HadamardGate(), SGate(), TGate(), RxGate(θ), RyGate(θ), RzGate(θ), RotationGate(θ, n), PhaseShiftGate(ϕ)])
@test all(sparse_matrix(adjoint(cgs)) .≈ adjoint(sparse_matrix(cgs)))
end
# two qubit gates
@testset "two qubit circuit gates" begin
for g in [EntanglementXXGate(θ), EntanglementYYGate(θ), EntanglementZZGate(θ), controlled_not(), SwapGate()]
cg = CircuitGate((2, 3), g)
cgadj = adjoint(cg)
Qaintessent.sparse_matrix(cgadj.gate) == adjoint(Qaintessent.sparse_matrix(cg.gate))
@test LinearAlgebra.ishermitian(cg) == (Qaintessent.sparse_matrix(cg) == Qaintessent.sparse_matrix(adjoint(cg)))
end
end
# Y acting on second wire
@testset "apply circuit gate to second wire" begin
cg = CircuitGate((2,), Y)
@test Qaintessent.sparse_matrix(cg, 3) ≈ kron(Matrix(I, 2, 2), Qaintessent.matrix(Y), Matrix(I, 2, 2))
@test isunitary(cg)
end
# flip control and target
@testset "flip control and target circuit gate" begin
cg = CircuitGate((2, 1), controlled_not())
@test Qaintessent.sparse_matrix(cg) ≈ [1 0 0 0; 0 0 0 1; 0 0 1 0; 0 1 0 0]
@test isunitary(cg)
end
# third qubit as control and first qubit as target
@testset "shift control and target circuit gate" begin
cg = circuit_gate(1, HadamardGate(), 3)
@test Qaintessent.sparse_matrix(cg) ≈ [
Matrix(I, 4, 4) fill(0, 4, 2) fill(0, 4, 2);
fill(0, 2, 4) Qaintessent.matrix(HadamardGate()) fill(0, 2, 2);
fill(0, 2, 6) Qaintessent.matrix(HadamardGate())]
@test isunitary(cg)
end
@testset "circuit gate exceptions" begin
H = HadamardGate()
S = SwapGate()
N = 2
@test_throws ErrorException("SwapGate affects 2 wires but 0 wires, (), were passed.") CircuitGate{0,SwapGate}(NTuple{0,Int}(), S)
@test_throws ErrorException("Wire indices must be unique.") CircuitGate{2,SwapGate}((1, 1), S)
@test_throws ErrorException("Wire index cannot be smaller than 1.") CircuitGate{2,SwapGate}((1, -1), S)
end
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "circuit gate isapprox" begin
θ = 0.7 * π
ϕ = 0.4 * π
n = randn(3); n /= norm(n)
ϵ = 3*sqrt(eps())
sqg = [RxGate(θ), RyGate(θ), RzGate(θ), RotationGate(θ, n), PhaseShiftGate(ϕ)]
sqḡ = [RxGate(θ + eps()), RyGate(θ + eps()), RzGate(θ + eps()), RotationGate(θ + eps(), n), PhaseShiftGate(ϕ + eps())]
sqĝ = [RxGate(θ + ϵ), RyGate(θ + ϵ), RzGate(θ + ϵ), RotationGate(θ + ϵ, n), PhaseShiftGate(ϕ + ϵ)]
for (i, g) in enumerate(sqg)
cg1 = CircuitGate((2,), sqg[i])
cg2 = CircuitGate((2,), sqḡ[i])
cg3 = CircuitGate((2,), sqĝ[i])
@test cg1 ≈ cg2
@test !(cg1 ≈ cg3)
end
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "circuit gate helper functions" begin
N = 5
@testset ExtendedTestSet "circuit gate single qubit helper function" begin
iwire = rand(1:N)
g = XGate()
@test circuit_gate(iwire, g) ≈ CircuitGate((iwire,), g)
end
@testset "circuit gate two qubit helper function" begin
iwire1 = rand(1:N)
iwire2 = rand(vcat(1:iwire1 - 1..., iwire1 + 1:N...))
g2 = SwapGate()
@test circuit_gate(iwire1, iwire2, g2) ≈ CircuitGate((iwire1, iwire2), g2)
end
@testset "circuit gate controlled gate helper function" begin
cntrl_iwire1, cntrl_iwire2, targ_iwire1, targ_iwire2 = sample(1:N, 4, replace=false)
g = YGate()
ref_cg = CircuitGate((targ_iwire1, cntrl_iwire1), ControlledGate(g, 1))
@test circuit_gate(targ_iwire1, g, cntrl_iwire1) ≈ ref_cg
@test circuit_gate(targ_iwire1, g, (cntrl_iwire1,)) ≈ ref_cg
@test circuit_gate((targ_iwire1,), g, cntrl_iwire1) ≈ ref_cg
@test circuit_gate((targ_iwire1,), g, (cntrl_iwire1,)) ≈ ref_cg
g = SwapGate()
ref_cg2 = CircuitGate((targ_iwire1, targ_iwire2, cntrl_iwire1), ControlledGate(g, 1))
@test circuit_gate(targ_iwire1, targ_iwire2, g, cntrl_iwire1) ≈ ref_cg2
@test circuit_gate(targ_iwire1, targ_iwire2, g, (cntrl_iwire1,)) ≈ ref_cg2
@test circuit_gate((targ_iwire1, targ_iwire2), g, cntrl_iwire1) ≈ ref_cg2
@test circuit_gate((targ_iwire1, targ_iwire2), g, (cntrl_iwire1,)) ≈ ref_cg2
ref_cg3 = CircuitGate((targ_iwire1, targ_iwire2, cntrl_iwire1, cntrl_iwire2), ControlledGate(g, 2))
@test circuit_gate(targ_iwire1, targ_iwire2, g, cntrl_iwire1, cntrl_iwire2) ≈ ref_cg3
@test circuit_gate(targ_iwire1, targ_iwire2, g, (cntrl_iwire1, cntrl_iwire2)) ≈ ref_cg3
@test circuit_gate((targ_iwire1, targ_iwire2), g, cntrl_iwire1, cntrl_iwire2) ≈ ref_cg3
@test circuit_gate((targ_iwire1, targ_iwire2), g, (cntrl_iwire1, cntrl_iwire2)) ≈ ref_cg3
end
# test sparse_matrix
@testset "circuit gates sparse matrix" begin
cgs = [circuit_gate(1, X), circuit_gate(2, X), circuit_gate(3, X)]
m = sparse_matrix(cgs)
@test m ≈ sparse([8, 7, 6, 5, 4, 3, 2, 1], [1, 2, 3, 4, 5, 6, 7, 8], Float64[1, 1, 1, 1, 1, 1, 1, 1])
end
# test sparse_matrix
@testset "circuit gates sparse matrix exceptions" begin
cgs = [circuit_gate(1, X), circuit_gate(2, X), circuit_gate(3, X)]
@test_throws ErrorException("Circuit size `2` too small; vector of CircuitGate requires 3 wires.") sparse_matrix(cgs, 2)
end
end
|
{"hexsha": "dbf0b7302b07683535f8e2d20bdd656dc9451eb8", "size": 6714, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_circuitgate.jl", "max_stars_repo_name": "oguzcankirmemis/Qaintessent.jl", "max_stars_repo_head_hexsha": "6261dc5d8a9a7ea7d406ea39cac950747583f414", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-05-25T11:43:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:34:12.000Z", "max_issues_repo_path": "test/test_circuitgate.jl", "max_issues_repo_name": "oguzcankirmemis/Qaintessent.jl", "max_issues_repo_head_hexsha": "6261dc5d8a9a7ea7d406ea39cac950747583f414", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2020-04-09T17:15:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T12:46:52.000Z", "max_forks_repo_path": "test/test_circuitgate.jl", "max_forks_repo_name": "oguzcankirmemis/Qaintessent.jl", "max_forks_repo_head_hexsha": "6261dc5d8a9a7ea7d406ea39cac950747583f414", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-12-16T13:25:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T15:49:00.000Z", "avg_line_length": 40.9390243902, "max_line_length": 150, "alphanum_fraction": 0.5953232052, "num_tokens": 2212}
|
# -*- coding: utf-8 -*-
""" Class average finetuning functions. Before using any of these finetuning
functions, ensure that the model is set up with nb_classes=2.
"""
from __future__ import print_function
import uuid
from time import sleep
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchMoji.torchmoji.global_variables import (
FINETUNING_METHODS,
WEIGHTS_DIR)
from torchMoji.torchmoji.finetuning import (
freeze_layers,
get_data_loader,
fit_model,
train_by_chain_thaw,
find_f1_threshold)
def relabel(y, current_label_nr, nb_classes):
""" Makes a binary classification for a specific class in a
multi-class dataset.
# Arguments:
y: Outputs to be relabelled.
current_label_nr: Current label number.
nb_classes: Total number of classes.
# Returns:
Relabelled outputs of a given multi-class dataset into a binary
classification dataset.
"""
# Handling binary classification
if nb_classes == 2 and len(y.shape) == 1:
return y
y_new = np.zeros(len(y))
y_cut = y[:, current_label_nr]
label_pos = np.where(y_cut == 1)[0]
y_new[label_pos] = 1
return y_new
def class_avg_finetune(model, texts, labels, nb_classes, batch_size,
method, epoch_size=5000, nb_epochs=1000, embed_l2=1E-6,
verbose=True):
""" Compiles and finetunes the given model.
# Arguments:
model: Model to be finetuned
texts: List of three lists, containing tokenized inputs for training,
validation and testing (in that order).
labels: List of three lists, containing labels for training,
validation and testing (in that order).
nb_classes: Number of classes in the dataset.
batch_size: Batch size.
method: Finetuning method to be used. For available methods, see
FINETUNING_METHODS in global_variables.py. Note that the model
should be defined accordingly (see docstring for torchmoji_transfer())
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs. Doesn't matter much as early stopping is used.
embed_l2: L2 regularization for the embedding layer.
verbose: Verbosity flag.
# Returns:
Model after finetuning,
score after finetuning using the class average F1 metric.
"""
if method not in FINETUNING_METHODS:
raise ValueError('ERROR (class_avg_tune_trainable): '
'Invalid method parameter. '
'Available options: {}'.format(FINETUNING_METHODS))
(X_train, y_train) = (texts[0], labels[0])
(X_val, y_val) = (texts[1], labels[1])
(X_test, y_test) = (texts[2], labels[2])
checkpoint_path = '{}/torchmoji-checkpoint-{}.bin' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
f1_init_path = '{}/torchmoji-f1-init-{}.bin' \
.format(WEIGHTS_DIR, str(uuid.uuid4()))
if method in ['last', 'new']:
lr = 0.001
elif method in ['full', 'chain-thaw']:
lr = 0.0001
loss_op = nn.BCEWithLogitsLoss()
# Freeze layers if using last
if method == 'last':
model = freeze_layers(model, unfrozen_keyword='output_layer')
# Define optimizer, for chain-thaw we define it later (after freezing)
if method == 'last':
adam = optim.Adam((p for p in model.parameters() if p.requires_grad), lr=lr)
elif method in ['full', 'new']:
# Add L2 regulation on embeddings only
special_params = [id(p) for p in model.embed.parameters()]
base_params = [p for p in model.parameters() if id(p) not in special_params and p.requires_grad]
embed_parameters = [p for p in model.parameters() if id(p) in special_params and p.requires_grad]
adam = optim.Adam([
{'params': base_params},
{'params': embed_parameters, 'weight_decay': embed_l2},
], lr=lr)
# Training
if verbose:
print('Method: {}'.format(method))
print('Classes: {}'.format(nb_classes))
if method == 'chain-thaw':
result = class_avg_chainthaw(model, nb_classes=nb_classes,
loss_op=loss_op,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
batch_size=batch_size,
epoch_size=epoch_size,
nb_epochs=nb_epochs,
checkpoint_weight_path=checkpoint_path,
f1_init_weight_path=f1_init_path,
verbose=verbose)
else:
result = class_avg_tune_trainable(model, nb_classes=nb_classes,
loss_op=loss_op,
optim_op=adam,
train=(X_train, y_train),
val=(X_val, y_val),
test=(X_test, y_test),
epoch_size=epoch_size,
nb_epochs=nb_epochs,
batch_size=batch_size,
init_weight_path=f1_init_path,
checkpoint_weight_path=checkpoint_path,
verbose=verbose)
return model, result
def prepare_labels(y_train, y_val, y_test, iter_i, nb_classes):
# Relabel into binary classification
y_train_new = relabel(y_train, iter_i, nb_classes)
y_val_new = relabel(y_val, iter_i, nb_classes)
y_test_new = relabel(y_test, iter_i, nb_classes)
return y_train_new, y_val_new, y_test_new
def prepare_generators(X_train, y_train_new, X_val, y_val_new, batch_size, epoch_size):
# Create sample generators
# Make a fixed validation set to avoid fluctuations in validation
train_gen = get_data_loader(X_train, y_train_new, batch_size,
extended_batch_sampler=True)
val_gen = get_data_loader(X_val, y_val_new, epoch_size,
extended_batch_sampler=True)
X_val_resamp, y_val_resamp = next(iter(val_gen))
return train_gen, X_val_resamp, y_val_resamp
def class_avg_tune_trainable(model, nb_classes, loss_op, optim_op, train, val, test,
epoch_size, nb_epochs, batch_size,
init_weight_path, checkpoint_weight_path, patience=5,
verbose=True):
""" Finetunes the given model using the F1 measure.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
batch_size: Batch size.
init_weight_path: Filepath where weights will be initially saved before
training each class. This file will be rewritten by the function.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
verbose: Verbosity flag.
# Returns:
F1 score of the trained model
"""
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
# Save and reload initial weights after running for
# each class to avoid learning across classes
torch.save(model.state_dict(), init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i+1, nb_iter))
model.load_state_dict(torch.load(init_weight_path))
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
fit_model(model, loss_op, optim_op, train_gen, [(X_val_resamp, y_val_resamp)],
nb_epochs, checkpoint_weight_path, patience, verbose=0)
# Reload the best weights found to avoid overfitting
# Wait a bit to allow proper closing of weights file
sleep(1)
model.load_state_dict(torch.load(checkpoint_weight_path))
# Evaluate
y_pred_val = model(X_val).cpu().numpy()
y_pred_test = model(X_test).cpu().numpy()
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
def class_avg_chainthaw(model, nb_classes, loss_op, train, val, test, batch_size,
epoch_size, nb_epochs, checkpoint_weight_path,
f1_init_weight_path, patience=5,
initial_lr=0.001, next_lr=0.0001, verbose=True):
""" Finetunes given model using chain-thaw and evaluates using F1.
For a dataset with multiple classes, the model is trained once for
each class, relabeling those classes into a binary classification task.
The result is an average of all F1 scores for each class.
# Arguments:
model: Model to be finetuned.
nb_classes: Number of classes in the given dataset.
train: Training data, given as a tuple of (inputs, outputs)
val: Validation data, given as a tuple of (inputs, outputs)
test: Testing data, given as a tuple of (inputs, outputs)
batch_size: Batch size.
loss: Loss function to be used during training.
epoch_size: Number of samples in an epoch.
nb_epochs: Number of epochs.
checkpoint_weight_path: Filepath where weights will be checkpointed to
during training. This file will be rewritten by the function.
f1_init_weight_path: Filepath where weights will be saved to and
reloaded from before training each class. This ensures that
each class is trained independently. This file will be rewritten.
initial_lr: Initial learning rate. Will only be used for the first
training step (i.e. the softmax layer)
next_lr: Learning rate for every subsequent step.
seed: Random number generator seed.
verbose: Verbosity flag.
# Returns:
Averaged F1 score.
"""
# Unpack args
X_train, y_train = train
X_val, y_val = val
X_test, y_test = test
total_f1 = 0
nb_iter = nb_classes if nb_classes > 2 else 1
torch.save(model.state_dict(), f1_init_weight_path)
for i in range(nb_iter):
if verbose:
print('Iteration number {}/{}'.format(i+1, nb_iter))
model.load_state_dict(torch.load(f1_init_weight_path))
y_train_new, y_val_new, y_test_new = prepare_labels(y_train, y_val,
y_test, i, nb_classes)
train_gen, X_val_resamp, y_val_resamp = \
prepare_generators(X_train, y_train_new, X_val, y_val_new,
batch_size, epoch_size)
if verbose:
print("Training..")
# Train using chain-thaw
train_by_chain_thaw(model=model, train_gen=train_gen,
val_gen=[(X_val_resamp, y_val_resamp)],
loss_op=loss_op, patience=patience,
nb_epochs=nb_epochs,
checkpoint_path=checkpoint_weight_path,
initial_lr=initial_lr, next_lr=next_lr,
verbose=verbose)
# Evaluate
y_pred_val = model(X_val).cpu().numpy()
y_pred_test = model(X_test).cpu().numpy()
f1_test, best_t = find_f1_threshold(y_val_new, y_pred_val,
y_test_new, y_pred_test)
if verbose:
print('f1_test: {}'.format(f1_test))
print('best_t: {}'.format(best_t))
total_f1 += f1_test
return total_f1 / nb_iter
|
{"hexsha": "919ac68a8a354a3b0626fad1b6ec465c54dda01a", "size": 12853, "ext": "py", "lang": "Python", "max_stars_repo_path": "torchMoji/torchmoji/class_avg_finetuning.py", "max_stars_repo_name": "UmaTaru/run", "max_stars_repo_head_hexsha": "be29e4d41a4de3dee27cd6796801bfe51382d294", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 163, "max_stars_repo_stars_event_min_datetime": "2019-06-23T14:07:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T23:06:07.000Z", "max_issues_repo_path": "torchMoji/torchmoji/class_avg_finetuning.py", "max_issues_repo_name": "UmaTaru/run", "max_issues_repo_head_hexsha": "be29e4d41a4de3dee27cd6796801bfe51382d294", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-24T12:41:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:17:20.000Z", "max_forks_repo_path": "torchMoji/torchmoji/class_avg_finetuning.py", "max_forks_repo_name": "UmaTaru/run", "max_forks_repo_head_hexsha": "be29e4d41a4de3dee27cd6796801bfe51382d294", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2019-06-26T01:21:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T17:23:24.000Z", "avg_line_length": 40.6740506329, "max_line_length": 105, "alphanum_fraction": 0.5976036723, "include": true, "reason": "import numpy", "num_tokens": 2813}
|
using LocalRegistry
using LocalRegistry: find_package_path, find_registry_path
using Test
using Random
using Pkg
const TEST_GITCONFIG = Dict(
"user.name" => "LocalRegistryTests",
"user.email" => "localregistrytests@example.com",
)
include("utils.jl")
# Since these tests will need to modify active registries and we don't
# want interference from, e.g. the General registry, use a temporary
# DEPOT_PATH. But first add some packages while we have the General
# registry available. These will be used for some tests later.
pkg"add AutoHashEquals"
pkg"dev --local Multibreak"
empty!(DEPOT_PATH)
depot_path = mktempdir(@__DIR__)
push!(DEPOT_PATH, depot_path)
# We don't want Pkg to try to update our local registries since they
# contain fake URLs.
Pkg.UPDATED_REGISTRY_THIS_SESSION[] = true
# The following tests are primarily regression tests - checking that
# the results are the same as when the tests were written, regardless
# of correctness.
#
# The general strategy is that a number of project files have been
# stored. These are read and expanded to minimal stub packages, which
# are then registered in a newly created registry. The resulting
# registries after some selections of packages have been added have
# also been stored and are compared against.
#
# The project files have been extracted from the git history of the
# Flux and Images packages. The patch numbers have been faked to get
# more version samples out of their git histories. The FirstTest1
# project file is derived from this package.
# Set up packages and registry directories in a temporary location.
if VERSION >= v"1.2"
testdir = mktempdir(prefix = "LocalRegistryTests")
else
testdir = mktempdir()
end
packages_dir = joinpath(testdir, "packages")
if packages_dir ∉ LOAD_PATH
push!(LOAD_PATH, packages_dir)
end
registry_dir = joinpath(testdir, "TestRegistry")
# Create a new registry.
create_registry(registry_dir, "git@example.com:Julia/TestRegistry.git",
description = "For testing purposes only.",
uuid = "ed6ca2f6-392d-11ea-3224-d3daf7fee369",
gitconfig = TEST_GITCONFIG, push = false)
# Add the FirstTest1 package and check against the stored `registry1`.
prepare_package(packages_dir, "FirstTest1.toml")
using FirstTest
register(FirstTest, registry_dir, gitconfig = TEST_GITCONFIG, push = false)
@test check_result(registry_dir, "registry1")
# Reregister the same version of FirstTest to verify that nothing
# happens,
@test_logs (:info, "This version has already been registered and is unchanged.") register(FirstTest, registry_dir, gitconfig = TEST_GITCONFIG, push = false)
@test check_result(registry_dir, "registry1")
# Add 29 versions of the Flux project files and check against `registry2`.
for n = 1:29
prepare_package(packages_dir, "Flux$(n).toml")
using Flux
register(Flux, registry_dir, gitconfig = TEST_GITCONFIG, push = false)
end
@test check_result(registry_dir, "registry2")
# Add 15 versions of the Images project files and check against `registry3`.
for n = 1:15
prepare_package(packages_dir, "Images$(n).toml")
using Images
register(Images, registry_dir, gitconfig = TEST_GITCONFIG, push = false)
end
@test check_result(registry_dir, "registry3")
# Start over with a fresh registry and add all 46 project files but in
# shuffled order. Check that this also matches `registry3`.
registry_dir = joinpath(testdir, "test2", "TestRegistry")
create_registry(registry_dir, "git@example.com:Julia/TestRegistry.git",
description = "For testing purposes only.",
uuid = "ed6ca2f6-392d-11ea-3224-d3daf7fee369",
gitconfig = TEST_GITCONFIG, push = false)
project_files = vcat("FirstTest1.toml",
["Flux$(n).toml" for n = 1:29],
["Images$(n).toml" for n = 1:15])
Random.seed!(13)
shuffle!(project_files)
for project_file in project_files
prepare_package(packages_dir, project_file)
package = match(r"[a-zA-Z]+", project_file).match
# Register by path instead of module in this test.
register(joinpath(packages_dir, package), registry_dir,
gitconfig = TEST_GITCONFIG, push = false)
end
@test check_result(registry_dir, "registry3")
# Trying to register an already existing version with different content.
prepare_package(packages_dir, "Flux30.toml")
@test_throws ErrorException register(joinpath(packages_dir, "Flux"),
registry_dir, gitconfig = TEST_GITCONFIG,
push = false)
# Parse error in compat section.
prepare_package(packages_dir, "Broken1.toml")
if VERSION < v"1.2"
@test_throws ErrorException register(joinpath(packages_dir, "Broken"),
registry_dir,
gitconfig = TEST_GITCONFIG,
push = false)
else
@test_throws Pkg.Types.PkgError register(joinpath(packages_dir, "Broken"),
registry_dir,
gitconfig = TEST_GITCONFIG,
push = false)
end
# Trying to change name (UUID remains).
prepare_package(packages_dir, "Fluxx1.toml")
@test_throws ErrorException register(joinpath(packages_dir, "Fluxx"),
registry_dir, gitconfig = TEST_GITCONFIG,
push = false)
# Trying to change UUID.
prepare_package(packages_dir, "Flux31.toml")
@test_throws ErrorException register(joinpath(packages_dir, "Flux"),
registry_dir, gitconfig = TEST_GITCONFIG,
push = false)
# Depends on itself.
prepare_package(packages_dir, "Broken2.toml")
@test_throws ErrorException register(joinpath(packages_dir, "Broken"),
registry_dir, gitconfig = TEST_GITCONFIG,
push = false)
# Incorrect name of dependency.
prepare_package(packages_dir, "Broken3.toml")
@test_throws ErrorException register(joinpath(packages_dir, "Broken"),
registry_dir, gitconfig = TEST_GITCONFIG,
push = false)
# TODO: This should really be an error but RegistryTools 1.3.0 doesn't catch it.
# Incorrect UUID of dependency.
prepare_package(packages_dir, "Broken4.toml")
register(joinpath(packages_dir, "Broken"), registry_dir,
gitconfig = TEST_GITCONFIG, push = false)
# Incorrect UUID of stdlib.
prepare_package(packages_dir, "Broken5.toml")
@test_throws ErrorException register(joinpath(packages_dir, "Broken"),
registry_dir, gitconfig = TEST_GITCONFIG,
push = false)
# Change the git remote before registration and verify that the
# registered repo is not changed.
prepare_package(packages_dir, "Flux32.toml")
package_dir = joinpath(packages_dir, "Flux")
git = gitcmd(package_dir, TEST_GITCONFIG)
package_file = joinpath(registry_dir, "F", "Flux", "Package.toml")
old_repo = TOML.parsefile(package_file)["repo"]
new_repo = "https://example.com/Julia/Flux.jl.git"
run(`$git remote set-url origin $(new_repo)`)
register(joinpath(packages_dir, "Flux"), registry_dir,
gitconfig = TEST_GITCONFIG, push = false)
@test TOML.parsefile(package_file)["repo"] == old_repo
# Register with explicit repo argument and verify that the registered
# repo is updated.
prepare_package(packages_dir, "Flux33.toml")
register(joinpath(packages_dir, "Flux"), registry_dir, repo = new_repo,
gitconfig = TEST_GITCONFIG, push = false)
@test TOML.parsefile(package_file)["repo"] == new_repo
pop!(LOAD_PATH)
# Register a package in a subdirectory of a git repository. Also add
# some dirt outside the subdirectory to verify that it is ignored.
prepare_package(packages_dir, "SubdirTest1.toml", "subdir")
write(joinpath(packages_dir, "SubdirTest", "README.md"), "dirty")
register(joinpath(packages_dir, "SubdirTest", "subdir"), registry_dir,
gitconfig = TEST_GITCONFIG, push = false)
package_file = joinpath(registry_dir, "S", "SubdirTest", "Package.toml")
@test TOML.parsefile(package_file)["subdir"] == "subdir"
# Register a package with a JuliaProject.toml rather than a Project.toml.
prepare_package(packages_dir, "JuliaProjectTest1.toml",
use_julia_project = true)
register(joinpath(packages_dir, "JuliaProjectTest"), registry_dir,
gitconfig = TEST_GITCONFIG, push = false)
@test isfile(joinpath(registry_dir, "J", "JuliaProjectTest", "Package.toml"))
# Test automatic push functionality. The sequence of events is:
# 1. Create a bare "upstream" repository.
# 2. Create a new registry with the upstream as repo and `push = true`.
# 3. Register a package with `push = true`.
# 4. Verify that the registry and the upstream repo has the same two commits.
upstream_dir = joinpath(testdir, "upstream")
mkpath(upstream_dir)
upstream_git = gitcmd(upstream_dir, TEST_GITCONFIG)
run(`$(upstream_git) init --bare`)
registry_push_dir = joinpath(testdir, "TestRegistryPush")
create_registry(registry_push_dir, "file://$(upstream_dir)", push = true,
gitconfig = TEST_GITCONFIG)
downstream_git = gitcmd(registry_push_dir, TEST_GITCONFIG)
register(joinpath(packages_dir, "FirstTest"), registry_push_dir,
push = true, gitconfig = TEST_GITCONFIG)
@test readchomp(`$(downstream_git) log`) == readchomp(`$(upstream_git) log`)
@test length(readlines(`$(upstream_git) log --format=oneline`)) == 2
# Additional tests of `find_package_path` and `find_registry_path`.
# Many of these have the purpose to cover error cases, making them
# somewhat contrived. Another complicating factor is that some of the
# call variants have to interact with the package environment,
# including registries, of the running Julia process.
# Prepare by adding the registry used in previous tests.
Pkg.Registry.add(RegistrySpec(path = registry_dir))
# Use Multibreak as Guinea pig. The sleep is a Travis workaround. See
# a later comment.
sleep(1)
register("Multibreak", "TestRegistry",
push = false, gitconfig = TEST_GITCONFIG)
# Directory already exists. Also tests code handling a trailing slash.
create_registry("TestRegistry2", "", gitconfig = TEST_GITCONFIG, push = false)
@test_throws ErrorException create_registry("TestRegistry2/", "",
gitconfig = TEST_GITCONFIG,
push = false)
# Not a developed package.
@test_throws ErrorException find_package_path("AutoHashEquals")
# Not a registered package.
pkg = Pkg.Types.Project(Dict("name" => "UUIDs",
"uuid" => "cf7118a7-6976-5b1a-9a39-7adc72f591a4"))
@test_throws ErrorException find_registry_path(nothing, pkg)
# Find package by module and path.
using Multibreak
package_path = find_package_path(Multibreak)
@test find_package_path(package_path) == package_path
# Find package by name.
if !Base.Sys.isapple()
@test find_package_path("Multibreak") == package_path
else
# Workaround a Travis macOS failure where presumably the same path
# is obtained but prefixed by `/private`.
@test occursin(package_path, find_package_path("Multibreak"))
end
# Not a package path.
corrupt_path = joinpath(package_path, "no_such_dir")
@test_throws ErrorException find_package_path(corrupt_path)
# Unknown package.
@test_throws ErrorException find_package_path("ZeroethTest")
# Find a registry by name.
pkg = Pkg.Types.read_project(joinpath(package_path, "Project.toml"))
@test find_registry_path("TestRegistry") == joinpath(first(DEPOT_PATH),
"registries",
"TestRegistry")
# The named registry does not exist.
@test_throws ErrorException find_registry_path("General", pkg)
# Find which registry contains a package.
@test find_registry_path(nothing, pkg) == joinpath(first(DEPOT_PATH),
"registries", "TestRegistry")
# Workaround for bad `mtime` resolution of 1 second on MacOS workers
# on Travis.
#
# The issue is that `read_registry` caches its results with respect to
# the file `mtime`. Since `read_registry` is called from within
# `register`, the old data will be read into the cache. If the new
# data is written close enough to the previous registry update so that
# `mtime` does not change, subsequent `read_registry` will keep using
# the old data from the cache.
sleep(1)
# More than one registry contains the package.
register("Multibreak", "TestRegistry2",
repo = "file://$(packages_dir)/FirstTest",
gitconfig = TEST_GITCONFIG, push = false)
@test_throws ErrorException find_registry_path(nothing, pkg)
# Dirty the registry repository and try to register a package.
registry_path = find_registry_path("TestRegistry2")
filename = joinpath(registry_path, "Registry.toml")
open(filename, "a") do io
write(io, "\n")
end
@test_throws ErrorException register("Multibreak", "TestRegistry2",
gitconfig = TEST_GITCONFIG, push = false)
# Dirty the package repository and try to register the package.
package_path = find_package_path("Multibreak")
filename = joinpath(package_path, "README.md")
open(filename, "a") do io
write(io, "\n")
end
@test_throws ErrorException register("Multibreak", "TestRegistry2",
gitconfig = TEST_GITCONFIG, push = false)
# Remove Project.toml from a package and try to register.
mv(joinpath(package_path, "Project.toml"),
joinpath(package_path, "Project.txt"))
@test_throws ErrorException register("Multibreak", "TestRegistry2",
gitconfig = TEST_GITCONFIG, push = false)
mv(joinpath(package_path, "Project.txt"),
joinpath(package_path, "Project.toml"))
if VERSION < v"1.2"
rm(depot_path, recursive = true)
end
|
{"hexsha": "df7f053ecf856ec6179cf8672e433e1f08a7ceeb", "size": 14036, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "EricForgy/LocalRegistry.jl", "max_stars_repo_head_hexsha": "76a02ca3ca126a81f67a0db1d686d3f39d3dbdfe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "EricForgy/LocalRegistry.jl", "max_issues_repo_head_hexsha": "76a02ca3ca126a81f67a0db1d686d3f39d3dbdfe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "EricForgy/LocalRegistry.jl", "max_forks_repo_head_hexsha": "76a02ca3ca126a81f67a0db1d686d3f39d3dbdfe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4048338369, "max_line_length": 156, "alphanum_fraction": 0.6982046167, "num_tokens": 3259}
|
# https://github.com/SciML/GalacticOptim.jl/blob/master/test/rosenbrock.jl#L30
using GalacticOptim
using ForwardDiff
using Ipopt
rosenbrock(x,p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2
x0 = zeros(2)
p = Dict(1 => 1.0, 2=> 100.0)
function con2_c(x,p)
[x[1]^2 + x[2]^2, x[2]*sin(x[1])-x[1]]
end
optprob = OptimizationFunction(rosenbrock, GalacticOptim.AutoForwardDiff(); cons=con2_c)
prob = OptimizationProblem(optprob, x0, p, lcons=[-Inf,-Inf], ucons=[Inf,Inf])
sol = solve(prob, Ipopt.Optimizer())
println(sol.minimum)
println(sol.u)
|
{"hexsha": "15d9b10d9b838cd7d2dfb95930cfd606d9da799f", "size": 551, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "debug/galacticoptim-mwe.jl", "max_stars_repo_name": "lanl-ansi/rosetta-opf", "max_stars_repo_head_hexsha": "09e76f505c04cc788256a4f3f479033ba6abe1f0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2022-03-25T19:09:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:42:04.000Z", "max_issues_repo_path": "debug/galacticoptim-mwe.jl", "max_issues_repo_name": "lanl-ansi/rosetta-opf", "max_issues_repo_head_hexsha": "09e76f505c04cc788256a4f3f479033ba6abe1f0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2022-03-28T01:10:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:44:10.000Z", "max_forks_repo_path": "debug/galacticoptim-mwe.jl", "max_forks_repo_name": "lanl-ansi/rosetta-opf", "max_forks_repo_head_hexsha": "09e76f505c04cc788256a4f3f479033ba6abe1f0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0454545455, "max_line_length": 88, "alphanum_fraction": 0.6733212341, "num_tokens": 215}
|
import pandas as pd
import numpy as np
import sys
import os
import configparser
import skimage
from skimage import io
sys.path.append(os.path.abspath('../../utils'))
import preprocessing
config = configparser.ConfigParser()
config.read('../../config.ini')
vg_json = config['PATHS']['vg-json']
data_dir = config['PATHS']['data']
image_dir = config['PATHS']['vg-images']
model_dir = data_dir + 'models/'
output_dir = data_dir+'prediction_arrays/'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
print('created dir {path}'.format(path=output_dir))
source_dir = data_dir+'extracted_data/'
w2c_datasets = np.load(model_dir+'pixelwise_lookup_tables.npz')
def color_pixelwise(img, w2c, bb=False):
# cf. file im2c.m from http://lear.inrialpes.fr/people/vandeweijer/code/ColorNaming.tar
# Input: image path and w2c matrix (RGB values -> colour names)
img = io.imread(img)
# grayscale images
if len(img.shape) < 3:
img = skimage.color.gray2rgb(img, alpha=None)
if bb:
if type(bb) == list:
bb = {
'h': bb[0],
'w': bb[1],
'x': bb[2],
'y': bb[3]
}
img = img[bb['y']:bb['y']+bb['h'], bb['x']:bb['x']+bb['w']]
# split rgb channels
RR = img[:, :, 0]
GG = img[:, :, 1]
BB = img[:, :, 2]
index_img = np.array(
# R values (32 Bins)
np.floor(RR/8)+
# G values (32 Bins)
32* np.floor(GG/8)+
# B values (32 Bins)
32*32*np.floor(BB/8)
)
# initialize array for probability distribution over colour terms
clr_distribution = np.zeros(11)
# add probability distribution for every index to clr_distribution
for pxl_index in index_img.ravel():
clr_distribution = clr_distribution + w2c[int(pxl_index)][3:]
# normalize clr_distribution
clr_distribution = clr_distribution / len(index_img.ravel())
return clr_distribution
def rows_pixelwise_classification(row, w2c):
bb = {
'h': row.bb_h,
'w': row.bb_w,
'x': row.bb_x,
'y': row.bb_y
}
img_path = image_dir + str(row.image_id) + '.jpg'
return (np.append(row.Index, color_pixelwise(img_path, w2c, bb)))
def dataframe_pixelwise_classification(df, w2c):
results = np.empty((0, 12))
for row in df.itertuples():
res = rows_pixelwise_classification(row, w2c)
res = np.reshape(res, (1, 12))
results = np.append(results, res, axis=0)
if results.shape[0] % 1000 == 0:
print(results.shape[0], '/', len(df))
return results
if __name__ == "__main__":
test_df = pd.read_csv(source_dir+"test_df.csv", index_col=0)
dev_df = pd.read_csv(source_dir+"dev_df.csv", index_col=0)
print('Test set shape:', test_df.shape)
print('Dev set shape:', dev_df.shape)
print('Starting Classification')
dev_w2c = dataframe_pixelwise_classification(dev_df, w2c_datasets['w2c'])
test_w2c = dataframe_pixelwise_classification(test_df, w2c_datasets['w2c'])
# dev_chip_w2c = dataframe_pixelwise_classification(dev_df, w2c_datasets['chip_w2c'])
# test_chip_w2c = dataframe_pixelwise_classification(test_df, w2c_datasets['chip_w2c'])
# Als Datei exportieren
print('write numpy arrays to file')
export_filename = 'results_pixelwise.npz'
np.savez_compressed(
output_dir+export_filename,
test_w2c=test_w2c,
dev_w2c=dev_w2c,
# test_chip_w2c = test_chip_w2c,
# dev_chip_w2c = dev_chip_w2c
)
print('shapes:')
print('test_w2c:', test_w2c.shape)
print('dev_w2c:', dev_w2c.shape)
# print('test_chip_w2c', test_chip_w2c.shape)
# print('dev_chip_w2c', dev_chip_w2c.shape)
|
{"hexsha": "9601ee6934abf598cc7206c9aedd593e318bf6a7", "size": 3744, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/pixelwise/pixelwise_predict.py", "max_stars_repo_name": "clause-jena/colour-term-grounding", "max_stars_repo_head_hexsha": "af5a32e1511b588a07a5b478885120d271aa7546", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-04T13:23:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-30T12:08:02.000Z", "max_issues_repo_path": "models/pixelwise/pixelwise_predict.py", "max_issues_repo_name": "clause-jena/colour-term-grounding", "max_issues_repo_head_hexsha": "af5a32e1511b588a07a5b478885120d271aa7546", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/pixelwise/pixelwise_predict.py", "max_forks_repo_name": "clause-jena/colour-term-grounding", "max_forks_repo_head_hexsha": "af5a32e1511b588a07a5b478885120d271aa7546", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7142857143, "max_line_length": 91, "alphanum_fraction": 0.6386217949, "include": true, "reason": "import numpy", "num_tokens": 1026}
|
cat('\n---Polygenic Burden (PB) - Large Effect Variant (LEV) SCAN (PB-LEV-SCAN)---\n')
cat('---version 1.0---\n')
library('ggplot2')
library("optparse")
options(scipen = 10)
option_list = list(
#path
make_option("--plink_path", action="store", default="plink", type='character',
help="Path to plink1.9 [%default]"),
make_option("--plink2_path", action="store", default="plink2", type='character',
help="Path to plink2 [%default]"),
make_option("--maf_distribution_path", action="store", default=NA, type='character',
help="Path to the maf distribution file (estimated from empirical data) [required]"),
make_option("--ldsc_path", action="store", default=NA, type='character',
help="Path to the ldsc file (estimated from empirical data) [required]"),
make_option("--tmp_folder", action="store", default=NA, type='character',
help="tmp folder for intermediate files [required]. Please vary the tmp folder names (e.g., tmp_1; tmp_2; tmp_3; etc.) when you have multiple jobs to run in parallel. Otherwise it will overwrite each other"),
make_option("--out_folder", action="store", default=NA, type='character',
help="path for output folder [required]"),
make_option("--out_prefix", action="store", default=NA, type='character',
help="prefix for output [required]"),
#model
make_option("--genetic_architecture", action="store", default='polygenic', type='character',
help="polygenic=polygenic architecture \nNegativeSelection=with negative selection \nLDAK=LD-adjusted kinship"),
make_option("--disease_model", action="store", default='LTM', type='character',
help="LTM=liability threshold model\nlogit=logit model"),
#parameter
make_option("--n_simu", action="store", default=500, type='integer',
help="simulation times"),
make_option("--sample_size", action="store", default=10000, type='integer',
help="number of sample size"),
make_option("--h2_PB", action="store", default=0.3, type='double',
help="heritability due to common variant based polygenicity"),
make_option("--h2_LEV", action="store", default=NA, type='double',
help="heritability due to large effect variant(s). Use h2_LEV or OR_LEV. If both h2_LEV and OR_LEV are provided, OR_LEV will be ignored"),
make_option("--OR_LEV", action="store", default=NA, type='double',
help="OR of large effect variant. Use h2_LEV or OR_LEV. If both h2_LEV and OR_LEV are provided, OR_LEV will be ignored"),
make_option("--freq_LEV", action="store", default=0.01, type='double',
help="allele frequency of large effect variant(s)"),
make_option("--prevalence", action="store", default=0.01, type='double',
help="disease prevalence in population"),
make_option("--pi0", action="store", default=0, type='double',
help="proportion of non-causal common, small effect variants"),
make_option("--seed", action="store", default=1, type='integer',
help="seed for sampling"),
#other options
make_option("--generate_a_figure", action="store_true", default=TRUE,
help="Generate a figure comparsion the number of LEV-carriers among patients with different polygenic burdens [default: %default]"),
make_option("--clean_tmp", action="store_true", default=TRUE,
help="Delete the tmp folder and all temporary files [default: %default]")
)
opt = parse_args(OptionParser(option_list=option_list))
#input
plink_path = opt$plink_path
plink2_path = opt$plink2_path
maf_distribution_path = opt$maf_distribution_path
ldsc_path = opt$ldsc_path
tmp_folder = opt$tmp_folder
out_folder = opt$out_folder
out_prefix = opt$out_prefix
genetic_architecture = opt$genetic_architecture
disease_model = opt$disease_model
N_simulation = opt$n_simu
N_samples = opt$sample_size
h2_PB = opt$h2_PB
freq_LEV = opt$freq_LEV
k = opt$prevalence
seed = opt$seed
pi0 = opt$pi0
generate_a_figure = opt$generate_a_figure
clean_tmp = opt$clean_tmp
if(is.na(opt$h2_LEV) & is.na(opt$OR_LEV)){
h2_LEV = 0.01
beta_LEV = sqrt(h2_LEV/2/freq_LEV/(1-freq_LEV))
}else if(!is.na(opt$h2_LEV) & is.na(opt$OR_LEV)){
h2_LEV = opt$h2_LEV
beta_LEV = sqrt(h2_LEV/2/freq_LEV/(1-freq_LEV))
}else if(is.na(opt$h2_LEV) & !is.na(opt$OR_LEV)){
#r2=var(ln(OR)*G)/(var(ln(OR)*G)+3.29) ref Hong S. Lee, Gen Epi
h2_LEV = (log(opt$OR_LEV,2.718)^2 * 2 * freq_LEV * (1-freq_LEV)) / (log(opt$OR_LEV,2.718)^2 * 2 * freq_LEV * (1-freq_LEV) + 3.29)
beta_LEV = sqrt(h2_LEV/2/freq_LEV/(1-freq_LEV))
}else if(!is.na(opt$h2_LEV) & !is.na(opt$OR_LEV)){
h2_LEV = opt$h2_LEV
beta_LEV = sqrt(h2_LEV/2/freq_LEV/(1-freq_LEV))
warning('Please only provide h2_LEV or OR_LEV, or OR_LEV will be ignored')
}
#load empirical ldsc
cat('INFO loading ldsc from empirical dataset\n')
ldsc_raw<-read.table(ldsc_path,header = T,stringsAsFactors = F)
#make tmp dir
if(!dir.exists(tmp_folder)){dir.create(tmp_folder)}
#make output dir
if(!dir.exists(out_folder)){dir.create(out_folder)}
#-----------------------------------------------
#df for simulation results
simu<-data.frame(n_simu=seq(1,N_simulation),wilcox_p=NA,p1=NA,p2=NA,p3=NA,p4=NA,p5=NA,p6=NA,p7=NA,p8=NA,p9=NA,p10=NA)
cat('INFO start simulation\n')
for (i in 1:N_simulation){
#if(i %in% c(1,seq(1,1000)*10+1)){
cat(paste0('INFO ',i,' / ',N_simulation,' \n'))
#}
###------simulate genotype for common causal variants------
#use plink to simulate genotype data. Note: N_sample/2 doesn't means half cases and half control. See plink website for details.
cmd=paste0('plink --simulate ',maf_distribution_path,' --simulate-ncases ',N_samples/2,' --simulate-ncontrols ',N_samples/2,' --seed ',seed,' --silent --make-bed --out ',tmp_folder,'/tmp_geno; plink --bfile ',tmp_folder,'/tmp_geno --silent --freq --out ',tmp_folder,'/tmp_freq')
system(cmd,wait = T,ignore.stdout=T,ignore.stderr=T)
#10k samples ~30 secs
#100k samples ~3 mins
#df of effect size for small effect common variants
df<-read.table(paste0(tmp_folder,'/tmp_freq.frq'),stringsAsFactors = F,header = T)
N_causal_SNPs=nrow(df)
#ldsc, matching the correlation between ldsc and MAF from empirical data
df$id=seq(1,nrow(df))
df<-df[order(df$MAF),]
ldsc<-ldsc_raw[sample(nrow(ldsc_raw),nrow(df),replace = T),]
ldsc<-ldsc[order(ldsc$MAF),]
df$ldsc=ldsc$ldscore
# a Infinitesimal architecture
# beta_poly~(0,h2_PB/N_caucal_SNPs)
if(genetic_architecture=='polygenic'){
set.seed(i)
df$effect=(1-pi0)*rnorm(N_causal_SNPs,0,(h2_PB/N_causal_SNPs)^0.5)
# b Negative selection
# beta_poly~N(O,k_constant([f(1-f)]^(1+alpha)))
}else if(genetic_architecture=='NegativeSelection'){
set.seed(i)
df$effect=df[,'effect']<-sapply(df$MAF,function(f) rnorm(1,mean=0,sd=((f*(1-f))^0.63)^0.5))
#find k constant
k_constant=(h2_PB/N_causal_SNPs/var(df[,'effect']))
#multiply by k constant
df[,'effect']<-df[,'effect']*k_constant^0.5*(1-pi0)
# c LD-adjusted kinship
# beta_poly~N(O,k_constant([f(1-f)]^(1+alpha)*(1/(1+ldsc))))
}else if(genetic_architecture=='LDAK'){
set.seed(i)
df$effect=df[,'effect']<-mapply(function(f,ld) rnorm(1,mean=0,sd=(((f*(1-f))^0.75)*1/(1+ld))^0.5), df$MAF, df$ldsc)
#find k constant
k_constant=(h2_PB/N_causal_SNPs/var(df[,'effect']))
#multiply by k constant
df[,'effect']<-df[,'effect']*k_constant^0.5*(1-pi0)
}
#write weight file
write.table(df[,c('SNP','A1','effect')],paste0(tmp_folder,'/tmp.weight'),row.names = F,col.names = F,sep='\t',quote = F)
#use plink to calculate the PB (polygenic burden, i.e.,PRS of common variants)
cmd=paste0('plink2 --bfile ',tmp_folder,'/tmp_geno --score ',tmp_folder,'/tmp.weight variance-standardize --out ',tmp_folder,'/tmp.score')
system(cmd,wait = T,ignore.stdout=T,ignore.stderr=T)
#load PB results (A)
PB=read.table(paste0(tmp_folder,'/tmp.score.sscore'),stringsAsFactors = F)
PB$IID=PB$V1
PB$A=PB$V6*2*N_causal_SNPs
PB<-PB[,c('IID','A')]
###------simulate genotype for the large effect variants (LEV)------
set.seed(seed+10)
R_raw_genotype = rbinom(n = N_samples, size = 2, prob = freq_LEV)
#genetic risk due to LEV (R)
R<- R_raw_genotype * beta_LEV
#merge A and R
LEV<-data.frame(IID=PB$IID,R=R,R_raw_genotype=R_raw_genotype)
combined<-merge(PB,LEV,by='IID')
#x=A+R total genetic risk
combined$x=combined$A+combined$R
#h2_e error/environment
h2_e=max(1-h2_PB-var(combined$R),0)
# #
# print(h2_PB)
# print(h2_e)
# print(sum(combined$x))
# print(nrow(combined))
#disease liability (L) L=A+R+e
set.seed(i+100)
combined$L=combined$x+rnorm(nrow(combined),0,h2_e^0.5)
#threshold for liability-threshold model (LTM)
t=quantile(combined$L,1-k)[[1]]
#disease probability
combined$prob=pnorm(-(t-combined$x)/h2_e^0.5)
#generate binary trait
if(disease_model=='LTM'){
combined$y_binary<-ifelse(combined$L>t,1,0)
}else if (disease_model=='logit'){
set.seed(i*10)
combined$y_binary<-sapply(combined$prob,function(x) rbinom(1,1,x))
}else{
stop('unknown disease model, please provide "LTM" or "logit"')
}
#LEV carrier
combined$carrier<-ifelse(combined$R_raw_genotype!=0,1,0)
#check the 2 by 2 table
table_check<-table(combined$y_binary,combined$carrier)
#simulated cases (patients)
cases<-combined[combined$y_binary==1,]
#test PB-LEV in cases only
if(length(which(combined$y_binary==1 & combined$carrier==1))>0 & length(which(combined$y_binary==1 & combined$carrier==0))>0){
#test PB-LEV correlation, one side
ans_inverse<-wilcox.test(cases[cases$carrier==1,'A'],cases[cases$carrier==0,'A'],alternative = c("less"))
simu[i,'wilcox_p']<-ans_inverse$p.value
}else{
#not enough samples in the 2 by 2 table
simu[i,'wilcox_p']<-1
}
#group cases in to 10 equally sized bins based on their PB risk
cases$group <- as.numeric(cut(cases$A, 10))
#pool cases from each simulation
if(i==1){
cases_pool=cases
}else{
cases_pool=rbind(cases_pool,cases)
}
#number of LEV-carriers for each bin
tmp_c<-data.frame(Group.1=seq(1,10))
tmp_c<-merge(tmp_c,as.data.frame(aggregate(cases$carrier,list(cases$group),sum)),all=T)
#number of samples in each bin
tmp_n<-as.data.frame(table(cases$group))
#merge
tmp_c<-merge(tmp_c,tmp_n,by=1,all=T)
tmp_c[is.na(tmp_c)]<-0
colnames(tmp_c)<-c('group','x','n') #'group'; # of carriers; # of cases
#number of LEV-carriers per 1000 cases
simu[i,3:12]<-round(tmp_c$x/tmp_c$n*1000,2)
}
cat(paste0('INFO simulation finished\n'))
cat(paste0('INFO saving results\n'))
#save cases results
write.table(cases_pool,paste0(out_folder,'/',out_prefix,'_cases.txt'),quote = F,sep='\t',row.names = F)
#save simu results
write.table(simu,paste0(out_folder,'/',out_prefix,'_simu.txt'),quote = F,sep='\t',row.names = F)
#result df
output<-data.frame(genetic_architecture=genetic_architecture,
disease_model=disease_model,
h2_PB=h2_PB,
OR_LEV=2.718^beta_LEV,
freq_LEV=freq_LEV,
prevalence=k,
pi0=pi0,
N_samples=N_samples,
N_simulation=N_simulation,
seed=seed,
utility=length(which(simu[,'wilcox_p']<0.05))/nrow(simu)
)
#mean and sd for the number of LEV-carriers per 1000 cases
for(j in 1:10){
output[1,paste0('mean_p',j)]<-mean(simu[,paste0('p',j)],na.rm = T)
output[1,paste0('sd_p',j)]<-sd(simu[,paste0('p',j)],na.rm = T)
}
#save result
write.table(output,paste0(out_folder,'/',out_prefix,'.txt'),quote = F,sep='\t',row.names = F)
#---figure generator (LEV_carriers_per_1000_cases_comparison)---
if(generate_a_figure){
cat(paste0('INFO figure generating\n'))
#get the x-coordinates for each cSEV-PB bins
p_tmp<-ggplot(data=cases_pool,aes(x=A))+ #use the A from last simulation
geom_histogram(bins = 10)
#df for # carriers in each bin
carrier_info<-data.frame(pos=ggplot_build(p_tmp)$data[[1]]$x,
mean=as.numeric(output[1,paste0('mean_p',seq(1,10))]),
sd=as.numeric(output[1,paste0('sd_p',seq(1,10))]))
carrier_info$lower=carrier_info$mean-carrier_info$sd
carrier_info$upper=carrier_info$mean+carrier_info$sd
carrier_info$lower=ifelse(carrier_info$lower<0,0,carrier_info$lower)
carrier_info$upper=ifelse(carrier_info$upper>1000,1000,carrier_info$upper)
#find ylim (the highest density)
y_density_max<-max(ggplot_build(p_tmp)$data[[1]]$density)*1.2
#coeff for adjusting dual y-axis
coeff=max(carrier_info$upper,na.rm = T)/y_density_max
#plot
p<-ggplot(data=carrier_info)+
#cSEV-PB distribution
geom_histogram(data=cases_pool,aes(x=A,y=..density..), colour="black", fill="white",bins=10)+
geom_density(data=cases_pool,aes(x=A),alpha=.2, fill="#FF6666",bw=0.2)+
#mean of 'number of LEV-carriers per 1000 cases'
geom_point(aes(x=carrier_info$pos,y=carrier_info$mean/coeff),color='dodgerblue3')+
#error bar for 'number of LEV-carriers per 1000 cases' (+- 1sd)
geom_pointrange(aes(x=carrier_info$pos,y=carrier_info$mean/coeff,ymin=carrier_info$lower/coeff, ymax=carrier_info$upper/coeff),color='dodgerblue3')+
scale_y_continuous(
# Features of the first axis
name = "density",
#limits for density
limits = c(0,y_density_max),
# Add a second axis and specify its features
sec.axis = sec_axis(~.*coeff, name="number of LEV-carriers per 1000 cases")
)+
xlab('polygenic burden (PB)')+
theme(legend.title = element_blank(),
panel.grid =element_blank(),
panel.background = element_blank(),
panel.border = element_blank(),
axis.line = element_line(colour = "black"),
legend.position = 'none',
axis.title.y.right = element_text(color = 'dodgerblue3'),
axis.text.y.right = element_text(color = 'dodgerblue3'))
#figure
pdf(paste0(out_folder,'/',out_prefix,'_LEV_carriers_per_1000_cases_comparison_figure.pdf'),height = 5,width = 5)
suppressWarnings(print(p))
dev.off()
}
if(clean_tmp){
cat(paste0('INFO cleaning up tmp folder\n'))
cmd=paste0('rm -r ',tmp_folder)
system(cmd,wait = F)
}
cat(paste0('INFO finished.\n'))
|
{"hexsha": "3f5f03505d692bb6446bfae026ab34b204d03601", "size": 14895, "ext": "r", "lang": "R", "max_stars_repo_path": "src/PB-LEV-SCAN_1.0.r", "max_stars_repo_name": "gamazonlab/Polygenic_Background_Rare_Variant_Axis", "max_stars_repo_head_hexsha": "f86f3e385ae458e738ce9dcd0c0dbaa6e376fac4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-08-05T14:31:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:58:29.000Z", "max_issues_repo_path": "src/PB-LEV-SCAN_1.0.r", "max_issues_repo_name": "gamazonlab/Polygenic_Background_Rare_Variant_Axis", "max_issues_repo_head_hexsha": "f86f3e385ae458e738ce9dcd0c0dbaa6e376fac4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PB-LEV-SCAN_1.0.r", "max_forks_repo_name": "gamazonlab/Polygenic_Background_Rare_Variant_Axis", "max_forks_repo_head_hexsha": "f86f3e385ae458e738ce9dcd0c0dbaa6e376fac4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-10T16:27:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-10T16:27:27.000Z", "avg_line_length": 37.9974489796, "max_line_length": 281, "alphanum_fraction": 0.6504196039, "num_tokens": 4486}
|
[STATEMENT]
lemma guarantees_imp: "(Y = UNIV guarantees Y) ==> ex_prop(Y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Y = UNIV guarantees Y \<Longrightarrow> ex_prop Y
[PROOF STEP]
by (auto simp add: guar_def ex_prop_equiv component_of_def dest: sym)
|
{"llama_tokens": 98, "file": null, "length": 1}
|
[STATEMENT]
lemma iT_Div_assoc:"I \<oslash> a \<oslash> b = I \<oslash> (a * b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. I \<oslash> a \<oslash> b = I \<oslash> a * b
[PROOF STEP]
by (simp add: iT_Div_def image_image div_mult2_eq)
|
{"llama_tokens": 115, "file": "Nat-Interval-Logic_IL_IntervalOperators", "length": 1}
|
import sys
import pandas as pd
import numpy as np
import nltk
from joblib import dump
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data(database_filepath):
"""
Load and generate datasets for fitting along with message categories list
Parameters
-----------
database_filepath : str
SQLite database file path
Returns
----------
X : DataFrame
Contains messages for generating features
Y : DataFrame
Contains binary labels for various message categories
category_names : list
List of different message categories
"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table("DisasterResponseData", con=engine)
X = df["message"]
Y = df[[col for col in df.columns.tolist() if col not in ["id", "message", "original", "genre"]]]
category_names = Y.columns.tolist()
return X, Y, category_names
def tokenize(text):
"""
Passed string is normalized, lemmatized, and tokenized
Parameters
-----------
text : str
text to be tokenized
Returns
----------
clean_tokens : list
Contains generated tokens
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
This transformer class extract the starting verb of a sentence
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def build_model(useGridSearch=False):
"""
Creates scikit Pipeline object for processing text messages and fitting a classifier.
Parameters
-----------
useGridSearch: bool
If grid search be used for model training
Returns
----------
pipeline : Pipeline
Pipeline object
"""
pipeline = Pipeline([
("features", FeatureUnion([
('text_pipeline', Pipeline([
('count_vectorizer', CountVectorizer(tokenizer=tokenize)),
('scaler', StandardScaler(with_mean=False))
])),
('tfidf_transformer', TfidfVectorizer()),
('starting_verb_extr', StartingVerbExtractor())
])),
("clf", MultiOutputClassifier(AdaBoostClassifier()))
])
if useGridSearch:
parameters = {
'features__text_pipeline__count_vectorizer__max_df': (0.5, 1.0),
'features__tfidf_transformer__use_idf': (True, False),
'features__transformer_weights': (
{'text_pipeline': 1, 'tfidf_transformer': 1, 'starting_verb': 1},
{'text_pipeline': 0.5, 'tfidf_transformer': 1, 'starting_verb': 0.5},
)
}
cv = GridSearchCV(pipeline, param_grid=parameters, cv=3, verbose=2.1)
return cv
return pipeline
def evaluate_model(model, X_test, Y_test, category_names):
"""
Method applies scikit pipeline to test set and prints the model performance (accuracy and f1score)
Parameters
-----------
model : Pipeline
fit pipeline
X_test : ndarray
test features
Y_test : ndarray
test labels
category_names : list
List of different message categories
Returns
----------
None
"""
Y_pred = model.predict(X_test)
print(classification_report(Y_test, Y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
Save trained model
Parameters
-----------
model : Pipeline
fit pipeline
model_filepath : str
path with dump format
Returns
----------
None
"""
dump(model, "{}".format(model_filepath))
def main():
"""
Runner function
This function:
1) Extract data from SQLite db
2) Train ML model on training set
3) Estimate model performance on test set
4) Save trained model
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X.values, Y.values, test_size=0.2, random_state=42)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
{"hexsha": "7942980b6aa13cebcca6b812f2901652441fa6ae", "size": 6483, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/train_classifier.py", "max_stars_repo_name": "jeena72/disaster-response-pipeline", "max_stars_repo_head_hexsha": "4621425a29e7fa2f162c725555787b6fc24f8010", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/train_classifier.py", "max_issues_repo_name": "jeena72/disaster-response-pipeline", "max_issues_repo_head_hexsha": "4621425a29e7fa2f162c725555787b6fc24f8010", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/train_classifier.py", "max_forks_repo_name": "jeena72/disaster-response-pipeline", "max_forks_repo_head_hexsha": "4621425a29e7fa2f162c725555787b6fc24f8010", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2943925234, "max_line_length": 111, "alphanum_fraction": 0.6296467685, "include": true, "reason": "import numpy", "num_tokens": 1383}
|
# https://arxiv.org/pdf/1301.1071.pdf "Direct TSQR"
import os
os.environ["OMP_NUM_THREADS"] = "24" # This is the default on my machine (Zemaitis)
import sys
import argparse
import numpy as np
import scipy.linalg
from time import perf_counter as time
# Accepts a matrix and returns a list of its blocks
# block_size rows are grouped together
def make_blocked(A, block_size):
nrows = A.shape[0]
nblocks = (nrows + block_size - 1) // block_size # ceiling division
block_list = []
for i in range(0, nblocks):
lower = i * block_size; # first row in block, inclusive
upper = (i + 1) * block_size # last row in block, exclusive
if upper > nrows:
upper = nrows
block_list.append(A[lower:upper])
return block_list, nblocks
# Get back to original matrix form
def unblock(A):
return np.concatenate(A)
def tsqr_blocked(A):
if NCOLS > BLOCK_SIZE:
print('Block size must be greater than or equal to the number of columns in the input matrix', file=sys.stderr)
exit(1)
A_blocked, nblocks = make_blocked(A, BLOCK_SIZE)
Q1 = []
R1 = []
for block in A_blocked:
# Use numpy's built in qr for the base factorization
block_Q, block_R = scipy.linalg.qr(block, mode='economic')
Q1.append(block_Q)
R1.append(block_R)
R1 = unblock(R1)
# R here is the final R result
Q2, R = scipy.linalg.qr(R1, mode='economic')
# Q1 and Q2 must have an equal number of blocks, where Q1 blocks' ncols = Q2 blocks' nrows
# Q2 is currently an (ncols * nblocks) x ncols matrix. Need nblocks of ncols rows each
Q2 = make_blocked(Q2, A.shape[1])[0]
Q = [np.matmul(Q1[i], Q2[i]) for i in range(nblocks)]
Q = unblock(Q)
return Q, R
def check_result(A, Q, R):
# Check product
is_correct_prod = np.allclose(np.matmul(Q, R), A)
# Check orthonormal
Q_check = np.matmul(Q.transpose(), Q)
is_ortho_Q = np.allclose(Q_check, np.identity(NCOLS))
# Check upper
is_upper_R = np.allclose(R, np.triu(R))
return is_correct_prod and is_ortho_Q and is_upper_R
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rows", help="Number of rows for input matrix; must be >> cols", type=int, default=5000)
parser.add_argument("-c", "--cols", help="Number of columns for input matrix", type=int, default=100)
parser.add_argument("-b", "--block_size", help="Block size to break up input matrix; must be >= cols", type=int, default=500)
parser.add_argument("-i", "--iterations", help="Number of iterations to run experiment", type=int, default=1)
parser.add_argument("-w", "--warmup", help="Number of warmup runs to perform before the experiment", type=int, default=0)
parser.add_argument("-K", "--check_result", help="Checks final result on CPU", action="store_true")
parser.add_argument("--csv", help="Prints stats in csv format", action="store_true")
args = parser.parse_args()
# Set global config variables
NROWS = args.rows
NCOLS = args.cols
BLOCK_SIZE = args.block_size
ITERS = args.iterations
WARMUP = args.warmup
CHECK_RESULT = args.check_result
CSV = args.csv
print('%**********************************************************************************************%\n')
print('Config: rows=', NROWS, ' cols=', NCOLS, ' block_size=', BLOCK_SIZE, ' iterations=', ITERS, ' warmup=', WARMUP, \
' check_result=', CHECK_RESULT, ' csv=', CSV, sep='')
for i in range(WARMUP + ITERS):
# Original matrix
np.random.seed(i)
A = np.random.rand(NROWS, NCOLS)
# Multithreaded blocked version with VECs
start = time()
Q, R = tsqr_blocked(A)
end = time()
if (i >= WARMUP):
print(end - start)
if CHECK_RESULT:
print(check_result(A, Q, R))
print('\n%**********************************************************************************************%\n')
|
{"hexsha": "24df71e491b021cae5be9c31517d6f7bc44a55cb", "size": 4037, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/TSQR/qr_simple_blocked.py", "max_stars_repo_name": "UTexas-PSAAP/Parla.py", "max_stars_repo_head_hexsha": "3d92a156be41983e32576940ef6baffd78d4eb84", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-06-22T16:20:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T22:54:40.000Z", "max_issues_repo_path": "examples/TSQR/qr_simple_blocked.py", "max_issues_repo_name": "UTexas-PSAAP/Parla.py", "max_issues_repo_head_hexsha": "3d92a156be41983e32576940ef6baffd78d4eb84", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 86, "max_issues_repo_issues_event_min_datetime": "2019-10-24T07:43:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T05:20:17.000Z", "max_forks_repo_path": "examples/TSQR/qr_simple_blocked.py", "max_forks_repo_name": "UTexas-PSAAP/Parla.py", "max_forks_repo_head_hexsha": "3d92a156be41983e32576940ef6baffd78d4eb84", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-10-24T05:46:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T19:47:58.000Z", "avg_line_length": 34.8017241379, "max_line_length": 129, "alphanum_fraction": 0.6130790191, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1043}
|
[STATEMENT]
lemma trunc_ell2_union_disjoint: \<open>M\<inter>N = {} \<Longrightarrow> trunc_ell2 (M \<union> N) \<psi> = trunc_ell2 M \<psi> + trunc_ell2 N \<psi>\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M \<inter> N = {} \<Longrightarrow> trunc_ell2 (M \<union> N) \<psi> = trunc_ell2 M \<psi> + trunc_ell2 N \<psi>
[PROOF STEP]
by (simp add: trunc_ell2_union)
|
{"llama_tokens": 150, "file": "Complex_Bounded_Operators_Complex_L2", "length": 1}
|
import numpy as np
from util import util
from config.atlas_config import PnCConfig, WBCConfig
from pnc.wbc.wbc import WBC
from pnc.wbc.joint_integrator import JointIntegrator
class AtlasController(object):
def __init__(self, tf_container, robot):
self._tf_container = tf_container
self._robot = robot
# Initialize WBC
act_list = [False] * robot.n_floating + [True] * robot.n_a
self._wbc = WBC(act_list, PnCConfig.SAVE_DATA)
if WBCConfig.B_TRQ_LIMIT:
self._wbc.trq_limit = self._robot.joint_trq_limit
else:
self._wbc.trq_limit = None
self._wbc.lambda_q_ddot = WBCConfig.LAMBDA_Q_DDOT
self._wbc.lambda_rf = WBCConfig.LAMBDA_RF
# Initialize Joint Integrator
self._joint_integrator = JointIntegrator(robot.n_a,
PnCConfig.CONTROLLER_DT)
self._joint_integrator.pos_cutoff_freq = WBCConfig.POS_CUTOFF_FREQ
self._joint_integrator.vel_cutoff_freq = WBCConfig.VEL_CUTOFF_FREQ
self._joint_integrator.max_pos_err = WBCConfig.MAX_POS_ERR
self._joint_integrator.joint_pos_limit = self._robot.joint_pos_limit
self._joint_integrator.joint_vel_limit = self._robot.joint_vel_limit
self._b_first_visit = True
def get_command(self):
if self._b_first_visit:
self.first_visit()
# Dynamics properties
mass_matrix = self._robot.get_mass_matrix()
mass_matrix_inv = np.linalg.inv(mass_matrix)
coriolis = self._robot.get_coriolis()
gravity = self._robot.get_gravity()
self._wbc.update_setting(mass_matrix, mass_matrix_inv, coriolis,
gravity)
# Task and Contact Setup
w_hierarchy_list = []
for task in self._tf_container.task_list:
task.update_jacobian()
task.update_cmd()
w_hierarchy_list.append(task.w_hierarchy)
self._wbc.w_hierarchy = np.array(w_hierarchy_list)
for contact in self._tf_container.contact_list:
contact.update_contact()
# WBC commands
joint_trq_cmd, joint_acc_cmd, rf_cmd = self._wbc.solve(
self._tf_container.task_list, self._tf_container.contact_list,
False)
# Double integration
joint_vel_cmd, joint_pos_cmd = self._joint_integrator.integrate(
joint_acc_cmd, self._robot.joint_velocities,
self._robot.joint_positions)
command = self._robot.create_cmd_ordered_dict(joint_pos_cmd,
joint_vel_cmd,
joint_trq_cmd)
return command
def first_visit(self):
joint_pos_ini = self._robot.joint_positions
self._joint_integrator.initialize_states(np.zeros(self._robot.n_a),
joint_pos_ini)
self._b_first_visit = False
|
{"hexsha": "cbdffaacd157c16433b8f98f575f33558350c26c", "size": 2994, "ext": "py", "lang": "Python", "max_stars_repo_path": "pnc/atlas_pnc/atlas_controller.py", "max_stars_repo_name": "junhyeokahn/ASE389", "max_stars_repo_head_hexsha": "a57d668f968da1db56f0dfe8dadad548ad631f33", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pnc/atlas_pnc/atlas_controller.py", "max_issues_repo_name": "junhyeokahn/ASE389", "max_issues_repo_head_hexsha": "a57d668f968da1db56f0dfe8dadad548ad631f33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pnc/atlas_pnc/atlas_controller.py", "max_forks_repo_name": "junhyeokahn/ASE389", "max_forks_repo_head_hexsha": "a57d668f968da1db56f0dfe8dadad548ad631f33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-05T06:59:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T20:14:02.000Z", "avg_line_length": 39.92, "max_line_length": 76, "alphanum_fraction": 0.6406145625, "include": true, "reason": "import numpy", "num_tokens": 670}
|
# -*- coding:utf8 -*-
r"""Namescope Transformer
Transformers that get rid of namescope/nodes which are not needed
for inference
"""
import re
from collections import defaultdict
from copy import deepcopy
import numpy as np
import tensorflow as tf
from utensor_cgen.frontend.tensorflow import GraphDefParser
from utensor_cgen.ir import OperationInfo, uTensorGraph
from utensor_cgen.logger import logger
from utensor_cgen.matcher import uTensorGraphMatcher
from utensor_cgen.utils import (parse_tensor_name, prune_graph,
topologic_order_graph)
from .base import Transformer
from .pipeline import TransformerPipeline
__all__ = ["DropoutTransformer", "BatchNormTransformer", "InlineTransformer", "TensorLifeProbe"]
@TransformerPipeline.register_transformer
class TensorLifeProbe(Transformer):
METHOD_NAME = 'tensorlife'
KWARGS_NAMESCOPE = '_utensor_utlife'
DATA_NAME = 'address'
def __init__(
self,
buff_size=100000, #1k bytes
unit_size=4
):
self.buff_size = buff_size
self.unit_size = unit_size
def transform(self, ugraph):
new_ugraph = deepcopy(ugraph)
new_ugraph.setup_data_manager({self.DATA_NAME: " "})
# use_def_table: dict, tensor_name -> {'start': op_idx, 'end': op_idx}
use_def_table = self._create_resource_table(new_ugraph)
allocate_table = dict()
allocate_success = self.allocate_graph(new_ugraph, allocate_table, use_def_table, self.buff_size, self.unit_size)
if allocate_success:
for node_name in new_ugraph.topo_order:
in_t_infos = new_ugraph.ops_info[node_name].input_tensors
for in_o in in_t_infos:
if in_o.name in allocate_table:
new_ugraph.data_manager.address = (in_o.name, allocate_table[in_o.name]['offsetstart'])
out_t_infos = new_ugraph.ops_info[node_name].output_tensors
for out_o in out_t_infos:
if out_o.name in allocate_table:
new_ugraph.data_manager.address = (out_o.name, allocate_table[out_o.name]['offsetstart'])
return new_ugraph
return ugraph
def _query_offset_fromallocate_table(self, allocate_table, start, end):
new_start = start
new_end = end
for key in allocate_table:
if allocate_table[key]['offsetstart'] >= start and allocate_table[key]['offsetend'] <= end:
continue
elif allocate_table[key]['offsetstart'] <= start and allocate_table[key]['offsetend'] >= start:
new_start = allocate_table[key]['offsetstart']
if allocate_table[key]['offsetend'] >= end:
new_end = max(new_end, allocate_table[key]['offsetend'])
else:
new_end = max(end, new_end)
elif allocate_table[key]['offsetstart'] >= start and allocate_table[key]['offsetend'] >= start:
if allocate_table[key]['offsetend'] >= end:
new_end = max(new_end, allocate_table[key]['offsetend'])
else:
new_end = max(end, new_end)
return new_start, new_end
def _query_time_fromallocate_table(self, allocate_table, start, end):
time_start = start
time_end = end
for key in allocate_table:
if allocate_table[key]['start'] >= start and allocate_table[key]['end'] <= end:
continue
elif allocate_table[key]['start'] <= start and allocate_table[key]['end'] >= start:
if allocate_table[key]['end'] >= end:
time_end = max(time_end, allocate_table[key]['end'])
else:
time_end = max(end, time_end)
elif allocate_table[key]['start'] >= start and allocate_table[key]['end'] >= start:
if allocate_table[key]['end'] >= end:
time_end = max(time_end, allocate_table[key]['end'])
else:
time_end = max(end, time_end)
return time_start, time_end
def _query_result(self, allocate_table, offset, length, timestart, timeend):
for key in allocate_table:
mem_occupied = (
(allocate_table[key]['offsetstart'] >= offset and allocate_table[key]['offsetstart'] <= offset + length) or
(allocate_table[key]['offsetstart'] <= offset and allocate_table[key]['offsetend'] >= offset)
)
life_span_occupied = (
(allocate_table[key]['start'] >= timestart and allocate_table[key]['start'] <= timeend) or
(allocate_table[key]['start'] <= timestart and allocate_table[key]['end'] >= timestart)
)
if mem_occupied and life_span_occupied:
return True
return False
def allocate_tensor(self, tensors, tensor_index, allocate_table, use_def_table, buffer_size, unit_size):
if tensor_index == len(tensors):
return True
if tensors[tensor_index].name in allocate_table:
return self.allocate_tensor(tensors, tensor_index + 1, allocate_table, use_def_table, buffer_size, unit_size)
tensor = tensors[tensor_index]
candidates = self._get_candidates(allocate_table, use_def_table, buffer_size, unit_size, tensor)
if not candidates:
return False
success = False
for candidate in candidates:
self._update_allocation_table(allocate_table, use_def_table, tensor, candidate, candidate + tensor.size)
success = self.allocate_tensor(tensors, tensor_index + 1, allocate_table, use_def_table, buffer_size, unit_size)
if success:
break
else:
self._remove_allocate_table(allocate_table, tensor)
return success
def allocate_graph(self, ugraph, allocate_table, use_def_table, buffer_size, unit_size):
tensors = []
for node_name in ugraph.topo_order:
in_t_infos = [
tensor
for tensor in ugraph.ops_info[node_name].input_tensors
if tensor.op.op_type != 'Inline'
]
out_t_infos = [
tensor
for tensor in ugraph.ops_info[node_name].output_tensors
if tensor.op.op_type != 'Inline'
]
tensors.extend(in_t_infos)
tensors.extend(out_t_infos)
succ = self.allocate_tensor(tensors, 0, allocate_table, use_def_table, buffer_size, unit_size)
return succ
def _check(self, allocate_table, use_def_table, tensor, tensor_offset_start, tensor_offset_end):
valid = False
timestart = use_def_table[tensor.name]['start']
timeend = use_def_table[tensor.name]['end']
offset, length = self._query_offset_fromallocate_table(allocate_table, tensor_offset_start, tensor_offset_end)
timestart, timeend = self._query_time_fromallocate_table(allocate_table, timestart, timeend)
occupied = self._query_result(allocate_table, offset, length, timestart, timeend)
if not occupied:
valid = True
return valid
def _get_candidates(self, allocate_table, use_def_table, buffer_size, unit_size, in_o):
ret = []
for i in range(0, buffer_size, unit_size):
if self._check(allocate_table, use_def_table, in_o, i, i + in_o.size):
ret.append(i)
return ret
def _update_allocation_table(
self,
allocate_table,
use_def_table,
tensor,
offset_start,
offset_end
):
time_start = use_def_table[tensor.name]['start']
time_end = use_def_table[tensor.name]['end']
attribute = dict()
attribute['start'] = time_start
attribute['end'] = time_end
attribute['offsetstart'] = offset_start
attribute['offsetend'] = offset_end
allocate_table[tensor.name] = attribute
return allocate_table
def _remove_allocate_table(self, allocate_table, tensor):
del allocate_table[tensor.name]
def _create_resource_table(self, ugraph):
resource_table = dict()
len_map = {
op_name: idx
for idx, op_name in enumerate(ugraph.topo_order)
}
for node_name in ugraph.topo_order:
for tensor_info in ugraph.ops_info[node_name].input_tensors:
if tensor_info.name not in resource_table:
lifetime = dict()
lifetime['start'] = len_map[node_name]
lifetime['end'] = len_map[node_name]
resource_table[tensor_info.name] = lifetime
resource_table[tensor_info.name]['end']= len_map[node_name]
for outtensor in ugraph.ops_info[node_name].output_tensors:
if outtensor.name not in resource_table:
lifetime = dict()
lifetime['start'] = len_map[node_name]
lifetime['end'] = len_map[node_name]
resource_table[outtensor.name] = lifetime
return resource_table
@TransformerPipeline.register_transformer
class BiasAddTransformer(Transformer):
METHOD_NAME = 'biasAdd'
KWARGS_NAMESCOPE = '_utensor_biasAdd'
def transform(self, ugraph):
for node_name in ugraph.topo_order:
op_type = ugraph.ops_info[node_name].op_type
if op_type == 'QuantizedBiasAdd':
op_info = ugraph.ops_info[node_name]
op_info.op_type = 'QuantizedAdd'
elif op_type == 'BiasAdd':
op_info = ugraph.ops_info[node_name]
op_info.op_type = 'Add'
return ugraph
@TransformerPipeline.register_transformer
class InlineTransformer(Transformer):
METHOD_NAME = 'inline'
KWARGS_NAMESCOPE = '_utensor_inline'
def transform(self, ugraph):
for node_name in ugraph.topo_order:
op_type = ugraph.ops_info[node_name].op_type
if op_type == 'Const':
op_info = ugraph.ops_info[node_name]
op_info.op_type = 'Inline'
return ugraph
@TransformerPipeline.register_transformer
class DropoutTransformer(Transformer):
"""Dropout removal transformer
Pros
====
- Insensitive to the dropout layer pattern so it works across different
versions of tensorflow
Cons
====
- naming constrains on the dropout layers, layer name must matched to the
given `name_pattern` (default to r'(dropout[_\w\d]*)/.*') and the keep_prob
op must be with name starts with 'keep_prop'
"""
METHOD_NAME = 'dropout'
KWARGS_NAMESCOPE = '_utensor_dropout'
TARGET_NODENAME_PATTERN = re.compile(r'(dropout[_\w\d]*)/.*')
def __init__(self, name_pattern=r'(dropout[_\w\d]*)/.*'):
self._op_name_pattern = re.compile(name_pattern)
def transform(self, ugraph):
new_graph = uTensorGraph(output_nodes=ugraph.output_nodes)
dropout_input_map = self._find_input(ugraph)
new_ops_info = {}
for node_name in ugraph.ops_info:
match = self._op_name_pattern.match(node_name)
if match:
# ignore all dropout nodes
continue
# replace inputs with dropout inputs
op_info = ugraph.ops_info[node_name]
in_t_infos = [deepcopy(t_info, {'ugraph': new_graph})
for t_info in op_info.input_tensors]
out_t_infos = [deepcopy(t_info, {'ugraph': new_graph})
for t_info in op_info.output_tensors]
op_attr = deepcopy(op_info.op_attr)
for i, t_info in enumerate(in_t_infos):
op_name = parse_tensor_name(t_info.name)[0]
match = self._op_name_pattern.match(op_name)
if match:
name_scope = match.group(1)
# assume there should be only on input except keep_prob
dropout_in_tensor = dropout_input_map[name_scope]
in_t_infos.pop(i)
in_t_infos.insert(i, dropout_in_tensor)
new_op_info = OperationInfo(name=op_info.name,
input_tensors=in_t_infos,
n_inputs=len(in_t_infos),
output_tensors=out_t_infos,
n_outputs=len(out_t_infos),
op_type=op_info.op_type,
backend=op_info.backend,
op_attr=op_attr,
ugraph=new_graph)
new_ops_info[node_name] = new_op_info
new_graph.ops_info = new_ops_info
new_graph._backend = ugraph._backend
return new_graph
def _find_dropout_clusters(self, ugraph):
clusters = defaultdict(lambda: [])
for node_name in ugraph.topo_order:
match = self._op_name_pattern.match(node_name)
if match:
name_scope = match.group(1)
clusters[name_scope].append(node_name)
return dict(clusters)
def _find_input(self, ugraph):
"""dropout_name --> input_tensor_info
input_tensor_info := the tensor info of a tensor which is not generated
in the dropout namescope but is consumed by ops in
dropout namescope with name not starts with 'keep_prob'
"""
clusters = self._find_dropout_clusters(ugraph)
input_map = {}
for node_name in ugraph.topo_order:
match = self._op_name_pattern.match(node_name)
if match:
name_scope = match.group(1)
cluster = clusters[name_scope]
op_info = ugraph.ops_info[node_name]
for in_tensor_info in op_info.input_tensors:
in_op_name = in_tensor_info.op.name
if in_op_name not in cluster and not in_op_name.startswith('keep_prob'):
input_map[name_scope] = in_tensor_info
# assuming there is only one input for dropout
break
return input_map
@TransformerPipeline.register_transformer
class DropoutTransformerV2(Transformer):
"""Dropout removal transformer version 2
Implemented with subgraph matcher
Pros
====
- no naming requirements on the dropout layer and keep prob op
Cons
====
- sensitive to the dropout layer pattern. The pattern of dropout
layer may differ across different version of tensorflow so this
transformer may fail to match the dropout layer if the given graph
is not using the same version
"""
METHOD_NAME = 'dropout_v2'
KWARGS_NAMESCOPE = '_utensor_dropout_v2'
@property
def pattern_ugraph(self):
graph = tf.Graph()
with graph.as_default():
dummy_x = tf.constant(np.random.rand(10, 10), dtype=tf.float32, name='dummy_x')
dummy_rate = tf.placeholder(dtype=tf.float32, name='dummy_rate')
dropout = tf.nn.dropout(dummy_x, rate=dummy_rate, name='dropout')
patrn_ugraph = GraphDefParser.parse(graph.as_graph_def(), output_nodes=[dropout.op.name])
# replace dummy_x
patrn_ugraph['dropout/truediv'].replace_with_null_input_tensor(0)
# # replace dummy_rate
patrn_ugraph['dropout/sub'].replace_with_null_input_tensor(1)
# # replace Shape Op
patrn_ugraph['dropout/random_uniform/RandomUniform'].replace_with_null_input_tensor(0)
patrn_ugraph = prune_graph(patrn_ugraph)
topologic_order_graph(patrn_ugraph)
return patrn_ugraph
def transform(self, ugraph):
new_ugraph = deepcopy(ugraph)
if new_ugraph.backend == 'tensorflow':
new_ugraph = self._transform_tf(new_ugraph)
else:
raise ValueError(
'only support dropout transformer for tensorflow: get {}'.format(new_ugraph.backend)
)
return new_ugraph
def _transform_tf(self, ugraph):
matcher = uTensorGraphMatcher(pattern_ugraph=self.pattern_ugraph)
matches = matcher.match(ugraph, n=1)
while matches:
match = matches[0]
ugraph = self._handle_match_tf(match)
matches = matcher.match(ugraph)
return ugraph
def _handle_match_tf(self, match):
subj_ugraph = match.subject_ugraph
subj_in_tensor = (
match.patrn2subj_op_map['dropout/truediv']
.input_tensors[0]
.op
.output_tensors[0]
)
subj_out_op = match.patrn2subj_op_map['dropout/mul']
subj_out_tensor = subj_out_op.output_tensors[0]
for op in subj_out_op.output_nodes:
for idx, tensor in enumerate(op.input_tensors):
if tensor.name == subj_out_tensor.name:
op.input_tensors[idx] = subj_in_tensor
for idx, op_name in enumerate(subj_ugraph.output_nodes):
if op_name == subj_out_op.name:
subj_ugraph.output_nodes[idx] = subj_in_tensor.op_name
match.subject_ugraph = prune_graph(subj_ugraph)
topologic_order_graph(match.subject_ugraph)
return match.subject_ugraph
@TransformerPipeline.register_transformer
class BatchNormTransformer(Transformer):
"""Replace Batch Norm namescope with uTensor Op
"""
METHOD_NAME = 'batch_norm'
KWARGS_NAMESCOPE = '_batch_norm'
def transform(self, ugraph):
# TODO: implement this!
raise RuntimeError('bach norm transformer is not yet implemented')
@TransformerPipeline.register_transformer
class FakeGatherV2Transformer(Transformer):
"""Force converting GatherV2 op to Gather op
"""
METHOD_NAME = 'fake_gather_v2'
KWARGS_NAMESCOPE = '_fake_gatherv2'
def transform(self, ugraph):
logger.warning(
"enabling {} will force replacing GatherV2 with Gather".format(self.METHOD_NAME)
)
for key, op in ugraph.ops_info.items():
if op.op_type == "GatherV2":
op.op_type = "Gather"
ugraph.ops_info[key] = op
return ugraph
|
{"hexsha": "e6c668691f28ffee42205f518e050eb307a0e5ff", "size": 16634, "ext": "py", "lang": "Python", "max_stars_repo_path": "utensor_cgen/transformer/ns_transformer.py", "max_stars_repo_name": "dboyliao/utensor_cgen", "max_stars_repo_head_hexsha": "aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-29T17:40:49.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-29T17:40:49.000Z", "max_issues_repo_path": "utensor_cgen/transformer/ns_transformer.py", "max_issues_repo_name": "dboyliao/utensor_cgen", "max_issues_repo_head_hexsha": "aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-12-28T02:25:45.000Z", "max_issues_repo_issues_event_max_datetime": "2017-12-28T02:25:45.000Z", "max_forks_repo_path": "utensor_cgen/transformer/ns_transformer.py", "max_forks_repo_name": "dboyliao/utensor_cgen", "max_forks_repo_head_hexsha": "aacd3adf4ee2a521a8eb2e75807fe3c1c0d1e1e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-12-27T17:15:38.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-29T06:43:00.000Z", "avg_line_length": 36.9644444444, "max_line_length": 118, "alphanum_fraction": 0.6884092822, "include": true, "reason": "import numpy", "num_tokens": 3937}
|
# encoding: utf-8
"""
IndStateAnalyzer.py
"""
import sys
from lmfit import report_fit
import Meta as mt
import numba
from IndTypes import IndType
from ModellingMode import ModellingMode as mm
cumsum = lambda series : series.cumsum().iloc[:]
movavg_cumulative = lambda n, series : cumsum(series).iloc[:].rolling(window=n).mean()
movavg = lambda n, series : series.iloc[:].rolling(window=n).mean()
gaussMovAvg_cumulative = lambda n, std, series : cumsum(series) \
.iloc[:].rolling(window=n, win_type='gaussian').mean(std=std)
gaussMovAvg = lambda n, std, series : series.iloc[:].rolling(window=n, win_type='gaussian').mean(std=std)
pctChg = lambda series : series.pct_change()
class IndStateAnalyzer:
def __init__(self, df, mode):
self.df = df
self.mode = mode
def singleStateMetric(self, stateCode, indTypeName, routine):
pdf = self.df.filter(items=['Date', 'Status', stateCode])
for name in pdf.columns:
if (name in mt.IndStateAbbrMap.keys()):
pdf[name] = routine(pdf[name])
return pdf[pdf['Status'].str.contains(indTypeName)]
def guessAndFit(self, model, stateCode, fill,
status = IndType.CONFIRMED.value):
df = self.singleStateMetric(stateCode, status, lambda x:x)
mdf = self.paramsDatatable(df)
mdf = fill(mdf)
params = model.guess(mdf[stateCode], x=mdf['Date'].index)
"""
import numpy as np
import matplotlib.pyplot as plt
y_eval = model.eval(params, x=mdf['Date'].index)
plt.plot(mdf['Date'].index, y_eval)
plt.show() """
fdf = self.fittingDatatable(df)
fdf = fill(fdf)
result = model.fit(fdf[stateCode], params, x=fdf['Date'].index)
return [params, model, result]
def paramsDatatable(self, df):
if(self.mode == mm.FIRST_SECOND or self.mode == mm.FIRST_ALL):
mdf = df.head(365).tail(325).reset_index(drop=True)
elif (self.mode == mm.ALL or self.mode == mm.ALL_SECOND):
mdf = df.reset_index(drop=True)
return mdf
def fittingDatatable(self, df):
if(self.mode == mm.ALL_SECOND or self.mode == mm.FIRST_SECOND):
fdf = df.tail(150).reset_index(drop=True)
elif (self.mode == mm.ALL or self.mode == mm.FIRST_ALL):
fdf = df.reset_index(drop=True)
return fdf
def lorentzianModel(self, stateCode):
from lmfit.models import LorentzianModel
model = LorentzianModel()
return self.guessAndFit(model, stateCode, lambda x: x.fillna(0))
def expModel(self, stateCode):
from lmfit.models import ExponentialModel
model = ExponentialModel()
return self.guessAndFit(model, stateCode, lambda x: x.dropna())
def polyModel(self, stateCode):
from lmfit.models import PolynomialModel
model = PolynomialModel(4)
return self.guessAndFit(model, stateCode, lambda x: x.dropna())
def gaussianModel(self, stateCode):
from lmfit.models import GaussianModel
model = GaussianModel()
return self.guessAndFit(model, stateCode, lambda x: x.fillna(0))
|
{"hexsha": "7c381b1088e6c95c0c8cf538e6c8e735629953cc", "size": 3241, "ext": "py", "lang": "Python", "max_stars_repo_path": "india-covid19india-py/Ind/IndStateAnalyzer.py", "max_stars_repo_name": "vpt101/covid-19_data_analysis", "max_stars_repo_head_hexsha": "1d02385ad75b650e584e119a8891433aa70e90d8", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "india-covid19india-py/Ind/IndStateAnalyzer.py", "max_issues_repo_name": "vpt101/covid-19_data_analysis", "max_issues_repo_head_hexsha": "1d02385ad75b650e584e119a8891433aa70e90d8", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "india-covid19india-py/Ind/IndStateAnalyzer.py", "max_forks_repo_name": "vpt101/covid-19_data_analysis", "max_forks_repo_head_hexsha": "1d02385ad75b650e584e119a8891433aa70e90d8", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4157303371, "max_line_length": 113, "alphanum_fraction": 0.6300524529, "include": true, "reason": "import numpy,import numba", "num_tokens": 809}
|
#include <boost/test/unit_test.hpp>
#include <scorum/chain/database/database.hpp>
#include <scorum/chain/database_exceptions.hpp>
#include <scorum/chain/schema/scorum_objects.hpp>
#include <scorum/chain/schema/account_objects.hpp>
#include <scorum/chain/services/account.hpp>
#include <scorum/chain/services/comment.hpp>
#include <scorum/chain/services/comment_statistic.hpp>
#include "database_default_integration.hpp"
#include <string>
#include <map>
using namespace database_fixture;
BOOST_FIXTURE_TEST_SUITE(comment_beneficiaries_tests, database_default_integration_fixture)
struct comment_benefactor_reward_visitor
{
database& _db;
std::map<account_name_type, asset> reward_map;
comment_benefactor_reward_visitor(database& db)
: _db(db)
{
}
void operator()(const comment_benefficiary_reward_operation& op)
{
reward_map.insert(std::make_pair(op.benefactor, op.reward));
}
template <typename Op> void operator()(Op&&) const
{
} /// ignore all other ops
};
BOOST_AUTO_TEST_CASE(old_tests)
{
try
{
BOOST_TEST_MESSAGE("Test Comment Beneficiaries");
ACTORS((alice)(bob)(sam)(dave))
vest("alice", ASSET_SCR(100e+3));
vest("bob", ASSET_SCR(100e+3));
vest("sam", ASSET_SCR(100e+3));
vest("dave", ASSET_SCR(100e+3));
generate_block();
comment_operation comment;
vote_operation vote;
comment_options_operation op;
comment_payout_beneficiaries b;
signed_transaction tx;
comment.author = "alice";
comment.permlink = "test";
comment.parent_permlink = "test";
comment.title = "test";
comment.body = "foobar";
tx.operations.push_back(comment);
tx.set_expiration(db.head_block_time() + SCORUM_MIN_TRANSACTION_EXPIRATION_LIMIT);
tx.sign(alice_private_key, db.get_chain_id());
db.push_transaction(tx);
BOOST_TEST_MESSAGE("--- Test failure on more than 8 benefactors");
b.beneficiaries.push_back(beneficiary_route_type(account_name_type("bob"), SCORUM_1_PERCENT));
for (size_t i = 0; i < 8; i++)
{
b.beneficiaries.push_back(beneficiary_route_type(
account_name_type(TEST_INIT_DELEGATE_NAME + fc::to_string(i)), SCORUM_1_PERCENT));
}
op.author = "alice";
op.permlink = "test";
op.allow_curation_rewards = false;
op.extensions.insert(b);
tx.clear();
tx.operations.push_back(op);
tx.sign(alice_private_key, db.get_chain_id());
SCORUM_REQUIRE_THROW(db.push_transaction(tx), plugin_exception);
BOOST_TEST_MESSAGE("--- Test specifying a non-existent benefactor");
b.beneficiaries.clear();
b.beneficiaries.push_back(beneficiary_route_type(account_name_type("doug"), SCORUM_1_PERCENT));
op.extensions.clear();
op.extensions.insert(b);
tx.clear();
tx.operations.push_back(op);
tx.sign(alice_private_key, db.get_chain_id());
SCORUM_REQUIRE_THROW(db.push_transaction(tx), fc::assert_exception);
BOOST_TEST_MESSAGE("--- Test setting when comment has been voted on");
vote.author = "alice";
vote.permlink = "test";
vote.voter = "bob";
vote.weight = SCORUM_PERCENT(100);
b.beneficiaries.clear();
b.beneficiaries.push_back(beneficiary_route_type(account_name_type("bob"), 25 * SCORUM_1_PERCENT));
b.beneficiaries.push_back(beneficiary_route_type(account_name_type("sam"), 50 * SCORUM_1_PERCENT));
op.extensions.clear();
op.extensions.insert(b);
tx.clear();
tx.operations.push_back(vote);
tx.operations.push_back(op);
tx.sign(alice_private_key, db.get_chain_id());
tx.sign(bob_private_key, db.get_chain_id());
SCORUM_REQUIRE_THROW(db.push_transaction(tx), fc::assert_exception);
BOOST_TEST_MESSAGE("--- Test success");
tx.clear();
tx.operations.push_back(op);
tx.sign(alice_private_key, db.get_chain_id());
db.push_transaction(tx);
BOOST_TEST_MESSAGE("--- Test setting when there are already beneficiaries");
b.beneficiaries.clear();
b.beneficiaries.push_back(beneficiary_route_type(account_name_type("dave"), 25 * SCORUM_1_PERCENT));
op.extensions.clear();
op.extensions.insert(b);
tx.sign(alice_private_key, db.get_chain_id());
SCORUM_REQUIRE_THROW(db.push_transaction(tx), fc::assert_exception);
BOOST_TEST_MESSAGE("--- Payout and verify rewards were split properly");
tx.clear();
tx.operations.push_back(vote);
tx.sign(bob_private_key, db.get_chain_id());
db.push_transaction(tx, 0);
generate_blocks(db.obtain_service<dbs_comment>().get("alice", std::string("test")).cashout_time
- SCORUM_BLOCK_INTERVAL);
BOOST_REQUIRE_EQUAL(db.account_service().get_account("bob").balance, ASSET_SCR(0));
BOOST_REQUIRE_EQUAL(db.account_service().get_account("sam").balance, ASSET_SCR(0));
asset bob_sp_before = db.account_service().get_account("bob").scorumpower;
asset sam_sp_before = db.account_service().get_account("sam").scorumpower;
comment_benefactor_reward_visitor visitor(db);
db.post_apply_operation.connect([&](const operation_notification& note) { note.op.visit(visitor); });
generate_block();
validate_database();
BOOST_REQUIRE_EQUAL(visitor.reward_map.size(), size_t(2));
BOOST_REQUIRE(visitor.reward_map.find("bob") != visitor.reward_map.end());
BOOST_REQUIRE(visitor.reward_map.find("sam") != visitor.reward_map.end());
BOOST_REQUIRE_EQUAL(visitor.reward_map["bob"],
(db.account_service().get_account("bob").scorumpower - bob_sp_before));
BOOST_REQUIRE_EQUAL(visitor.reward_map["sam"],
(db.account_service().get_account("sam").scorumpower - sam_sp_before));
// clang-format off
const auto &alice_post_id = db.obtain_service<dbs_comment>().get("alice", std::string("test")).id;
BOOST_REQUIRE_EQUAL(db.obtain_service<dbs_comment_statistic_sp>().get(alice_post_id).beneficiary_payout_value,
(visitor.reward_map["sam"] + visitor.reward_map["bob"]));
// clang-format on
}
FC_LOG_AND_RETHROW()
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "a24d59595e962a4b74ab3fb49dfcac5da4370d91", "size": 6496, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/chain_tests/rewards/comment_beneficiaries_apply_tests.cpp", "max_stars_repo_name": "scorum/scorum", "max_stars_repo_head_hexsha": "1da00651f2fa14bcf8292da34e1cbee06250ae78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 53.0, "max_stars_repo_stars_event_min_datetime": "2017-10-28T22:10:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:20:48.000Z", "max_issues_repo_path": "tests/chain_tests/rewards/comment_beneficiaries_apply_tests.cpp", "max_issues_repo_name": "Scorum/Scorum", "max_issues_repo_head_hexsha": "fb4aa0b0960119b97828865d7a5b4d0409af7876", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 38.0, "max_issues_repo_issues_event_min_datetime": "2017-11-25T09:06:51.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-31T09:17:22.000Z", "max_forks_repo_path": "tests/chain_tests/rewards/comment_beneficiaries_apply_tests.cpp", "max_forks_repo_name": "Scorum/Scorum", "max_forks_repo_head_hexsha": "fb4aa0b0960119b97828865d7a5b4d0409af7876", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27.0, "max_forks_repo_forks_event_min_datetime": "2018-01-08T19:43:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T10:50:42.000Z", "avg_line_length": 36.4943820225, "max_line_length": 118, "alphanum_fraction": 0.6673337438, "num_tokens": 1429}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module for the Numerov Scrodinger equation solver
Description: This module defines all the necessary functions that are used in the main script Numerov.py
author: Félix Desrochers
email: felix.desrochers@polymtl.ca
MIT License
Copyright (c) 2017 Félix Desrochers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
####################################
#Importing the necessary modules
####################################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
######################################################################################
# 1) Potential functions
# These functions are used to modify and be sure that the entered potential is correct
######################################################################################
def ModifyPotential(potential):
"""This fonction replaces any mathematical expression that is usually used but that is incorect in python.
Parameters:
----------
potential (str) : a string that indicates the mathematical form of the potential
Returns:
--------
potential (str) : a new potential that now has changed any mathematical expression that is usually used but that is incorrect in python
For instance:
x^2 -> x**2
|x| -> math.fabs(x)
"""
#Relacing exponential
potential = potential.replace('^','**')
#Replacing absolute value
pot_list = potential.rsplit('|')
for i in [ i for i in range(1,(len(pot_list)-1)*2) if i%2==1 ]:
insertion = 'np.absolute(' if i%4 ==1 else ')'
pot_list.insert(i,insertion)
potential=''.join(pot_list)
#Replacing trigonometric functions
potential = potential.replace('cos','np.cos')
potential = potential.replace('sin','np.sin')
potential = potential.replace('tan','np.tan')
return potential
def VerifySyntaxPotential(potential):
""" Verify if the potential entered has an invalid syntax and demands another potential untils there is no more syntax error
Parameters:
-----------
potential (str) : a string that indicates the mathematical form of the potential
Returns:
--------
potential (str) : a new string with a valid python mathematical syntax
"""
i=0
while i == 0:
#Tries to evaluate the potential at x=0 and asks for a new one until there is no more syntax error
try:
x=0
eval(potential)
except SyntaxError:
potential = input('The potential\'s syntax is incorrect enter a new one: ')
potential = ModifyPotential(potential)
else:
i=1
return potential
def VerifyLimitsPotential(potential):
"""Verify if the potential seems to verify the borders conditions (to allow bound states). If it doesn't it ask to the user if he is sure that the potential respects these conditions
Parameters:
-----------
potential (str) : a string that indicates the mathematical form of the potential
Returns:
--------
potential (str) : a new string with a valid python mathematical syntax and with value bigger than V(x=0) for x=-100 and x=100
"""
#Verify if the potential is bigger than V(x=0) for x=100 and x=-100
i=1
while i == 1:
eval_pot = list()
x=-100
eval_pot.append(eval(potential))
x=100
eval_pot.append(eval(potential))
eval_pot = np.array(eval_pot)
x = 0
#if it doesn't respect the condition ask for a new potential
if eval_pot[eval_pot < eval(potential)]:
QuestionPotential = input('The potential doesn\'t seem to be correct. Are you it corresponds to a bound state (y/n)? ')
if QuestionPotential == 'n':
potential = input('Enter a new potential: ')
#Check the syntax for the new potential
potential = ModifyPotential(potential)
potential = VerifySyntaxPotential(potential)
elif QuestionPotential == 'y':
i = 0
#If it respects the condition, exit the while loop
else :
i = 0
return potential
def GetFirstEnergyGuess(PotentialArray):
"""Defines the first energy level as a value between the the average potential and the minimum value. More explicitly: (1/50000)*((V_average + V_min)/2)
Parameters:
-----------
PotentialArray (numpy.ndarray) : a numpy array that contains the potential value between 'x_V_min' and 'x_V_max' at every interval of length 'Division'
Returns:
--------
First_E_guess (float) : the first energy guess that will be used in the Numerov algorithm. It correponds to the average of the minimum value of the potential and the average of the
potential times 1/50000
"""
First_E_guess = PotentialArray.min() + (1/500000) * (PotentialArray.mean() + PotentialArray.min())
return First_E_guess
def VerifyConcavity(PotentialArray, First_E_guess):
"""Evaluates the concavity of the potential and returns its value: positive if the concavity is correct or negative if it is incorrect. To be positive, the smallest meeting
point of an energy guess with the potential needs to have a negative derivative and the greatest meeting point needs to have a positive one. If the function finds no meeting point
then tries a smaller energy guess and restarts the process
Parameters:
-----------
PotentialArray (numpy.ndarray) : a numpy array that contains the potential value between 'x_V_min' and 'x_V_max' at every interval of length 'Division'
First_E_guess (float) : the first energy guess that will be used in the Numerov algorithm
Returns:
--------
concavity (str) : a string that indicates the global concavity of the potential. It can either be positive if it respects the condition or negative if it doesn't
"""
i = 1
#Continue while it doesn't find meeting points
while i == 1:
print('First Energy guess:', First_E_guess)
index_min=list()
index_max=list()
#Tries to find meeting points and to compare them
try:
for i in range(0,len(PotentialArray)-2):
#Gets all the points where the potential meets the E_verify value and filters them depending on their derivatives
if PotentialArray[i] > First_E_guess and PotentialArray[i+1] < First_E_guess:
index_min.append(i)
elif PotentialArray[i] < First_E_guess and PotentialArray[i+1] > First_E_guess:
index_max.append(i)
elif PotentialArray[i] == First_E_guess:
if PotentialArray[i-1] > First_E_guess and PotentialArray[i+1] < First_E_guess:
index_min.append(i)
elif PotentialArray[i-1] < First_E_guess and PotentialArray[i+1] > First_E_guess:
index_max.append(i)
#Defines the concavity value depending on
print('index max: ',index_max)
print('index_min: ',index_min)
if (max(index_max) > max(index_min)) and (min(index_max) > min(index_min)):
concavity = 'positive'
else:
concavity = 'negative'
#If we are not able to compare the potential, we define a new energy guess
except ValueError:
First_E_guess = First_E_guess/2
#If it is able to compare them, exit the loop
else:
i = 0
return concavity,First_E_guess
def EvaluateOnePotential(position,potential):
"""Defines a function that evaluate the potential at a certain point x. This function will be vectorized with np.vectorize to evaluate the potential on a list of position [x1,x2,...]
Parameters:
-----------
position (float) : a float that defines the x position where we want to evaluate the potential
potential (str) : a string that defines the mathematical expression of the potential
Returns:
--------
EvalPotential (float) : the potential value at the x position
"""
x = position
EvalPotential = eval(potential)
return EvalPotential
def TranslationPotential(PositionPotential, PotentialArray):
"""Checks approximately where the minimum of the potential is and outputs the necessary translation in x and y to recenter the minimum at x=0 and y=0
Parameters:
-----------
potential (str) : a string that defines the mathematical expression of the potential
Returns:
--------
trans_x (float) : the necessary x translation to replace the minimum of the potential at x=0
trans_y (float) : the necessary y translation to be sure that all the potential values are positive
"""
# i) Gets the minimum value for the potential and the translation in y
trans_y = PotentialArray.min()
#index = float(np.where(PotentialArray==trans_y)[0])
# ii) Defines the necessary translation in x
#trans_x = x_min + (Div * index)
#trans_x = PositionPotential[index]
# iii) Translates the potential
PotentialArray = PotentialArray - trans_y
#PositionPotential = PositionPotential - trans_x
#print('trans_x; ',trans_x)
print('trans_y; ',trans_y)
return PositionPotential, PotentialArray
def TranslatePotential(potential,trans_x,trans_y):
'''Modify the potential expression to center its minimum at x=0 and y=0'''
#x translation
#potential = potential.replace('x','(x+' + str(trans_x) + ')')
#y translation
potential = potential + '-' + str(trans_y)
print(potential)
return potential
##################################################
# 2) Numerov algorithm functions
# Defines the functions used in the Numerov method
##################################################
#########################
# i) Initial Energy guess
def E_Guess(EnergyLevelFound, E_guess_try, iteration, First_E_guess):
"""Defines the energy guess depending on the energy levels that have been found and on the energy that have already been guessed.
Parameters:
-----------
EnergyLevelFound (Dict) : a string that defines the mathematical expression of the potential
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
iteration (int) : the number of iteration in the Numerov algorithm
First_E_guess (float) : the first energy guess. Has been defined preivously.
Returns:
--------
E_guess (float) : the energy guess that will be used in the current Numerov algorithm iteration. We always want to find the smallest energy level that hasn't been discovered yet, so
we define it with the previous energy guess that have been made (with E_guess_try).
"""
print('Iteration: ',iteration)
#If it is the first time, return the first energy level of the quantum harmonic oscillator
if iteration == 1:
E_guess = First_E_guess #Takes as intial guess the First_E_guess that has previously been defined
return E_guess
# I) Define the energy level that we want to find E_level_guess (the lowest energy level that hasn't been found yet)
#List for the energy that have been found
Lvl_found = list(EnergyLevelFound.keys())
Lvl_found.sort()
#Gets the energy level that we want to find
E_level_missing = [index for index,Energy in enumerate(Lvl_found) if not Energy <= index]
if not E_level_missing:
if not Lvl_found:
E_level_guess = 0
else:
E_level_guess = max(Lvl_found) +1
else:
E_level_guess = min(E_level_missing)
# II) Defining the energy guess depending on the guess that have already been done (E_guess_try)
#Finds the closest energy energy level (number of nodes) that has been guessed and that corresponds to a smaller or an equal number of nodes than E_level_guess
try:
E_level_smaller = max([ E for E in E_guess_try.keys() if E <= E_level_guess ])
except ValueError:
E_level_smaller = None
#Finds the closest energy energy level (number of nodes) that has been guessed and that corresponds to a bigger number of nodes than E_level_guess
try:
E_level_bigger = min([ E for E in E_guess_try.keys() if E > E_level_guess ])
except ValueError:
E_level_bigger = None
#Define the energy guess
#If the smaller and higher exist take the average
if (not E_level_smaller == None) and (not E_level_bigger ==None):
E_guess = ( E_guess_try[E_level_smaller][1] + E_guess_try[E_level_bigger][0] ) / 2
#If only the higher exists take the half
elif not E_level_bigger == None:
E_guess = E_guess_try[E_level_bigger][0]/2
#If only the smaller exists take the double
elif not E_level_smaller == None:
E_guess = E_guess_try[E_level_smaller][1] * 2
print('E_level_guess:', E_level_guess )
print('E_level_bigger: ', E_level_bigger)
print('E_level_smaller: ', E_level_smaller)
return E_guess
##################################################################################
# ii) Setting the minimal and maximal points (where the wave function equals zero)
def MeetingPointsPotential(E_guess, PotentialArray, PositionPotential, E_guess_try):
"""Finds the minimal and maximal points where the energy that has been guessed is equal to the potential.
Parameters:
-----------
E_guess (float) : the guessed energy
PotentialArray (numpy.darray) : a Numpy array that contains the potential for certain points
PositionPotential (numpy.darray) : a Numpy array that contains the positions that correspond to the potential array
Returns:
--------
MeetingPoints (tuple) : a tuple of the smallest and biggest meeting point that has th form (Position_min, Position_max)
end_program (bool) : a boolean that defines if we have to exits the Numerov while loop. end _program is true if we have made ten energy guess and still haven't found
two meeting points and is false otherwise.
"""
#Initializing constant for the while loop
p = 1
iteration = 0
end_program = False
while p == 1:
#Finds all the meeting points
MeetingPoints = [None,None]
for i in range(0,len(PotentialArray)-2):
#Gets all the meeting points
if (PotentialArray[i] < E_guess and PotentialArray[i+1] > E_guess) or (PotentialArray[i] > E_guess and PotentialArray[i+1] < E_guess) or (PotentialArray[i] == E_guess):
#And filter them
if (MeetingPoints[0] == None) or (PositionPotential[i] < MeetingPoints[0]):
print('index rencontre min: ',i)
MeetingPoints[0] = PositionPotential[i]
elif (MeetingPoints[1] == None) or (PositionPotential[i] > MeetingPoints[1]):
MeetingPoints[1] = PositionPotential[i]
print('index renccontre max: ', i)
#If we have not found at least two meeting points, then make a new smaller energy guess and repeat for at most ten times
if (MeetingPoints[0] == None) or (MeetingPoints[1] == None):
print('Restting the energy guess!\n')
E_guess = (E_guess + max([k for j,k in E_guess_try.values() if k < E_guess]))/2
iteration += 1
print('E_guess: ',E_guess)
if iteration > 10:
end_program = True
break
else:
p = 0
MeetingPoints = tuple(MeetingPoints)
return MeetingPoints,end_program,E_guess
def DetermineMinAndMax(MeetingPoints):
"""This function determines the minimal and maximal position where the wave function will be set to 0 depending on the points where the potential meets the guess energy and on
the minimum and maximum that are initially set for the potential.
Parameter:
----------
MeetingPoints (tuple) : the minimum and maximum point where the potentil meets the guessed energy
E_guess (float) : The minimum value of the position for the potential
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
PotentialArray (numpy.darray) : a Numpy array that contains the potential for certain points
PositionPotential (numpy.darray) : a Numpy array that contains the positions that correspond to the potential array
Returns:
--------
Position_min (float) : the minimum value where psi=0
Position_max (float) : the maximum value where psi=0
"""
#Sets the min and max as the half of the distance between the min and the max plus the min or the max
Position_min = MeetingPoints[0] - (MeetingPoints[1] - MeetingPoints[0])/1
Position_max = MeetingPoints[1] + (MeetingPoints[1] - MeetingPoints[0])/1
return Position_min,Position_max
#######################################
# iii) Calculate the wave function
def WaveFunctionNumerov(potential, E_guess, nbr_division, Initial_augmentation, Position_min, Position_max):
"""This function calculates the wave function values depending on the x coordinate by using the Numerov method. The function returns a list that contains tuple with the x coordinate and
the wave function value.
Parameter:
----------
potential (str) : a string that defines the mathematical form of the potential
nbr_division (int) : defines the number of division in the wave function, wich is equivalent to the number of iteration to be made
Initial_augmentation (float) : Defines the initial augmentation after the minimal x point where the wave function is set to zero
Position_min : the minimum value where psi=0
Position_max : the maximum value where psi=0
Returns:
--------
WaveFunction (list) : Defines the wave function. Has the general form: [(x0, psi(x0)), (x1, psi(x1)), ...]
"""
#Initializing the wave function
WaveFunction = []
#Setting the divisions
Division = (Position_max - Position_min) / nbr_division
#Setting the first values of the wave function
WaveFunction.append((float(Position_min),0))
WaveFunction.append((float(Position_min+Division), Initial_augmentation))
#Defing an array and an index to use in the for loop
index = 0
PositionArray = np.arange(Position_min, Position_max, Division)
#Calculating the wave function for other values
for i in np.arange(Position_min + (2 * Division), Position_max, Division):
#Evaluating the potential
#For V_i+1
x = i
V_plus1 = eval(potential)
#For V_i
x = PositionArray[index+1]
V = eval(potential)
#For V_i-1
x = PositionArray[index]
V_minus1 = eval(potential)
#Setting the k**2 values ( where k**2 = (2m/HBar)*(E-V(x)) )
k_2_plus1 = 2 * (E_guess - V_plus1)
k_2 = 2 * (E_guess - V)
k_2_minus1 = 2 * (E_guess - V_minus1)
#Calculating the wave function
psi = ((2 * (1 - (5/12) * (Division**2) * (k_2)) * (WaveFunction[-1][1])) - (1 + (1/12) * (Division**2) * k_2_minus1 ) * (WaveFunction[-2][1])) / (1 + (1/12) * (Division**2) * k_2_plus1)
#Saving the wave function and the x coordinate
WaveFunction.append((i,psi))
#Incrementing the index
index += 1
return WaveFunction
########################################################
# iv) Determine the number of nodes in the wave function
def NumberNodes(WaveFunction):
"""This function evaluates the number of nodes in the wavefunction. The number of nodes will allow us the determine the energy level to which a certain wave function corresponds.
Parameter:
----------
WaveFunction (list) : Defines the wave function. Has the general form: [(x0, psi(x0)), (x1, psi(x1)), ...]
Returns:
--------
NumerberOfNodes (int) : Defines the number of nodes in the wave function (the number of time this function passed by the x axis). The number of nodes in a wave funtion
corresponds to the energy level of that wave function
PositionNodes (list) : Defines the x position of all the nodes. Has the form : [position_nodes_1, position_nodes_2, ...]
x_max (float) : the greatest position of a node. Corresponds to the maximum value of PositionNodes
"""
#Initialize the number of nodes and their position
NumberOfNodes = 0
PositionNodes = list()
#Calculate the number of nodes
for i in range(1,len(WaveFunction)-1):
if (WaveFunction[i][1] > 0 and WaveFunction[i+1][1] < 0) or (WaveFunction[i][1] < 0 and WaveFunction[i+1][1] > 0) or (WaveFunction[i][1] == 0):
NumberOfNodes += 1
PositionNodes.append(WaveFunction[i][0])
#Gets the biggest position
x = list()
for position,wave in WaveFunction:
x.append(position)
x_max = max(x)
return NumberOfNodes,PositionNodes,x_max
#####################################################
# v) Verify if wave function respects the restriction
def VerifyTolerance(WaveFunction, Tolerance, E_guess, E_guess_try, NumberOfNodes):
"""See if the wave function for the given energy level respects the tolerance. The tolerance is defined in the parameters of the Numerov.py script. The tolerance is respected
if the last value of the wave function is smaller than this tolerance or if two energy guess are very very close (ratio of 0.9999999999). The function return yes in this case
and no otherwise.
Parameter:
----------
WaveFunction (list) : Defines the wave function. Has the general form: [(x0, psi(x0)), (x1, psi(x1)), ...]
Tolerance (float) : Defines the tolerance wich the wave function must respect
E_guess (float) : The minimum value of the position for the potential
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
NumerberOfNodes (int) : Defines the number of nodes in the wave function (the number of time this function passed by the x axis). The number of nodes in a wave funtion
corresponds to the energy level of that wave function
Returns:
--------
VerificationTolerance (str) : defines if the wave function respects the condition. Has the value 'yes' if it resects them and 'no' otherwise
"""
# i) Checks if the last value of the wave function respects the tolerance
VerificationTolerance = 'yes' if np.absolute(WaveFunction[-1][1]) < Tolerance else 'no'
print('Last value Wave Function: ', WaveFunction[-1][1])
# ii) Checks if the energy guess doesn't change a lot
try:
E_minus = E_guess_try[NumberOfNodes][1]
E_plus = E_guess_try[NumberOfNodes + 1][0]
except KeyError:
pass
else:
if (E_guess < E_plus and E_guess > E_minus) and ((E_minus/E_plus) > 0.9999999999) :
VerificationTolerance = 'yes'
return VerificationTolerance
def CorrectNodeNumber(NumberOfNodes, PositionNodes, x_max, E_guess, E_guess_try):
"""This function corrects the number of nodes. So it removes a node if it is too close to the maximum value where \psi(x) is set to zero or if the E_guess doesn't correspond the the
energy levels defined by the number of nodes.
Parameter:
----------
NumerberOfNodes (int) : Defines the number of nodes in the wave function (the number of time this function passed by the x axis). The number of nodes in a wave funtion
corresponds to the energy level of that wave function
PositionNodes (list) : Defines the x position of all the nodes. Has the form : [position_nodes_1, position_nodes_2, ...]
x_max (float) : the greatest position of a node. Corresponds to the maximum value of PositionNodes
E_guess (float) : The minimum value of the position for the potential
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
Returns:
--------
NumberOfNodesCorrected(int) : the corrected number of nodes
"""
NumberOfNodesCorrected = NumberOfNodes
#Correct the number of nodes if E_guess is between the lowest energy for this number of nodes and the maximum for the number of nodes - 1
try:
if (E_guess_try[NumberOfNodes][1] > E_guess) and (E_guess_try[NumberOfNodes - 1][1] < E_guess):
NumberOfNodesCorrected -= 1
#If the dictionnary E_guess_try doesn't contain these keys check if the Last number of nodes is close to the maximum value in x x_max
except KeyError:
if (PositionNodes/x_max) > 94:
NumberOfNodesCorrected -= 1
return NumberOfNodesCorrected
#######################################################
# vi) Saves energy and the correponding number of nodes
def SaveEnergy(NumberOfNodes, E_guess, E_guess_try):
"""This function saves the guessed energy and the number of nodes corresponding to it.
Parameter:
----------
NumerberOfNodes (int) : Defines the number of nodes in the wave function (the number of time this function passed by the x axis). The number of nodes in a wave funtion
corresponds to the energy level of that wave function
E_guess (float) : The minimum value of the position for the potential
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
Returns:
--------
E_guess_try (Dict) : a dictionnary that contains the previous energy guess. Has the form : {nbr_nodes1:[E_min,E_max], nbr_nodes2:[E_min,E_max],...}
"""
#Checks if the key Number of Nodes exists. If it doesn't, define the two values in the list corresponding to the key NumberOfNodes as E_guess.
try:
E_guess_try[NumberOfNodes]
except KeyError:
E_guess_try[NumberOfNodes] = [E_guess, E_guess]
return E_guess_try
#Checks if the energy guess is smaller than the smallest value in the list
if E_guess < E_guess_try[NumberOfNodes][0]:
E_guess_try[NumberOfNodes][0] = E_guess
#Checks if the energy guess is greater than the biggest value in the list
elif E_guess > E_guess_try[NumberOfNodes][1]:
E_guess_try[NumberOfNodes][1] = E_guess
return E_guess_try
#####################################
# 3) Ouput (Energy levels and figure)
####################################
#############################
# i) ouput the energy levels
def OuputEnergy(EnergyLevelFound):
for i,Energy in EnergyLevelFound.items():
print('Energy level', i, ':', Energy)
############################
# ii) Draw the figure
#Define the wave funcions to plot, the lines corresponding to these wave function and the energy lines
def DefineWhatToPlot(WaveFunctionFound, EnergyLevelFound):
"""This functions defines what to plot in the figure with the wave function and the corresponding energy levels.
Parameter:
----------
E_guess_tr (Dict) : a dictionnary that contains the wave function that respecetd the tolerance. Has the form {nbr_nodes1:WaveFunction1, nbr_nodes2:WaveFunction2, ...}
EnergyLevelFound (Dict) : a dictionnary that contains the energy guess that respecetd the tolerance. Has the form {nbr_nodes1:E1, nbr_nodes2:E2, ...}
Returns:
--------
y_max (float) : defines the maximum limit in the y axis that will be set. Correspond to 1.1 times the greatest energy level found
min_x (float) : defines the minium x value of the x axis. Correspond to the smallest x coordinate of all the wave function.
max_x (float) : defines the maximum x value of the x axis. Correspond to the greatest x coordinate of all the wave function.
WavPlot (list) : contains tuples that contain the numpy arrays with the x and y coordinates of the wave functions
WavLines (list) : contains tuples that contain numpy arrays with the x and y array of a line that passes in the middle of the wave function
EnergyLines (list) : contains tuples that contain numpy arrays with the x and y array of a line that defines an energy level
"""
# i) Determine the maximum energy to set the maximum value for the y axis
y_max = 1.1*EnergyLevelFound[max(EnergyLevelFound)]
Y_by_E_level = (y_max/(max(EnergyLevelFound)+2))
# ii) For the wave function
WavPlot = []
for i in WaveFunctionFound.keys():
x=[]
y=[]
for j in range(400,len(WaveFunctionFound[i])-240):
if not (j > 3750 and np.absolute(WaveFunctionFound[i][j][1]) > (max(y)*0.07)):
x.append(WaveFunctionFound[i][j][0])
y.append(WaveFunctionFound[i][j][1])
x = np.array(x)
y = np.array(y)
mult = (0.9 * Y_by_E_level)/(2 * y.max())
y = (mult * y) + (Y_by_E_level * (i+1))
WavPlot.append((x,y))
# iii) Determines the min and max in x
min_x = x.min()
max_x = x.max()
# iv) Get lines to where the wave function is centered
WavLines = []
for i in WaveFunctionFound.keys():
Wav_line_y=[]
for j in range(len(x)):
Wav_line_y.append(Y_by_E_level * (i+1))
WavLines.append((x,Wav_line_y))
# v) get lines for all the Energy levels
EnergyLines = []
for i in WaveFunctionFound.keys():
En_y = []
for j in range(len(x)):
En_y.append(EnergyLevelFound[i])
EnergyLines.append((x,En_y))
return y_max, min_x, max_x, WavPlot, WavLines, EnergyLines
#Draw the wave Functions, the energy levels and sets the axis limits
def DrawWaveFunction(y_max, min_x, max_x, WavPlot, WavLines, EnergyLines, PositionPotential, PotentialArray):
"""This functions plots a figure with two subplots: the first contains the potential and a line for each energy level and the second contains the potential and
animation of the real and imaginary part of the wave function (note that the wave function aren't centered at their corresponding energy level but are equally spaced
for visibility)
Parameter:
----------
y_max (float) : defines the maximum limit in the y axis that will be set. Correspond to 1.1 times the greatest energy level found
min_x (float) : defines the minium x value of the x axis. Correspond to the smallest x coordinate of all the wave function.
max_x (float) : defines the maximum x value of the x axis. Correspond to the greatest x coordinate of all the wave function.
WavPlot (list) : contains tuples that contain the numpy arrays with the x and y coordinates of the wave functions
WavLines (list) : contains tuples that contain numpy arrays with the x and y array of a line that passes in the middle of the wave function
EnergyLines (list) : contains tuples that contain numpy arrays with the x and y array of a line that defines an energy level
PotentialArray (numpy.darray) : a Numpy array that contains the potential for certain points
PositionPotential (numpy.darray) : a Numpy array that contains the positions that correspond to the potential array
Returns:
--------
"""
###################################################################################################
# i) Define a new figure with two subplot: the energy levels and the corresponding wave function
f,(En,Wav) = plt.subplots(1,2,sharey=True)
#Set figure title
f.suptitle("Schrödinger equation solutions",fontsize=20,fontweight='bold')
################################
# ii) Draw the wave functions
lines = [Wav.plot(x,y,'b',label=r"$Re(\psi(x))$",zorder=3)[0] for x,y in WavPlot]
lines2 = [Wav.plot(x,y,'m',label=r"$Im(\psi(x))$",zorder=3)[0] for x,y in WavPlot]
for x,y in WavLines:
Wav.plot(x,y,'k--',zorder=1)
#Sets the axis limits
Wav.axis([min_x, max_x, 0, y_max])
#Draw the potential
Wav.plot(PositionPotential, PotentialArray, 'r',label='Potential',zorder=2)
################################
# iii) Draw the Energy levels
i = 0
for x,y in EnergyLines:
PlotColor = cm.viridis(i/len(EnergyLines))
En.plot(x,y,'--',color=PlotColor,label='E'+str(i),zorder=2)
i+=1
#Set the axis limit
En.axis([min_x, max_x, 0, y_max])
#Draw the potential
En.plot(PositionPotential, PotentialArray, 'r',label='Potential',zorder=1)
####################################################
# iv) Sets differents aesthetic components
#For the wave function set the title and the axis title
Wav.set_xlabel(r'x ($a_0$)')
Wav.set_title('Wave Function',fontsize=14)
#Verify if the labels reappear multiple times and set legend for the wave function
handles, labels = plt.gca().get_legend_handles_labels()
newLabels, newHandles = [], []
for handle, label in zip(handles, labels):
if label not in newLabels:
newLabels.append(label)
newHandles.append(handle)
leg1 = Wav.legend(newHandles, newLabels, loc='upper left', fontsize='x-small')
leg1.get_frame().set_alpha(1)
#Identify each wave function
for i in range(len(EnergyLines)):
Wav.text(((max_x - min_x) * 0.04) + min_x, WavLines[i][1][0] - (0.25 * (y_max/(len(EnergyLines)+2))), r'$\Psi_{%s}(x)$'%(i))
#For the energy levels set the title, the axis title and the legend
En.set_xlabel(r'x ($a_0$)')
En.set_ylabel('Energy (Hartree)')
En.set_title('Energy levels',fontsize=14)
leg2 = En.legend(loc='upper left', fontsize='x-small')
leg2.get_frame().set_alpha(1)
#################################
# v) Animate the wave function
def init():
for line in lines:
line.set_data([], [])
return lines
def UpdateData(t):
for j,line in enumerate(lines):
x = WavPlot[j][0]
y = ((WavPlot[j][1] - (WavLines[j][1][0])) * np.cos(EnergyLines[j][1][0]*t/20)) + (WavLines[j][1][0])
line.set_data(x,y)
for j,line in enumerate(lines2):
x = WavPlot[j][0]
y = ((WavPlot[j][1] - (WavLines[j][1][0])) * np.sin(EnergyLines[j][1][0]*t/20)) + (WavLines[j][1][0])
line.set_data(x,y)
return lines,lines2
anim = animation.FuncAnimation(f, UpdateData, init_func=init, interval=10, blit=False, repeat=True, save_count=300, )
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5, forward=True)
plt.show()
#Saving the animation
#anim.save('Schrod.gif', writer='imagemagick', dpi=100, fps=25)
#anim.save('Schrod.gif', writer='ffmpeg', dpi=100, fps=25)
|
{"hexsha": "fcc8a8ba2419620562ad332e63e2510048c01f28", "size": 36253, "ext": "py", "lang": "Python", "max_stars_repo_path": "Fct_Numerov.py", "max_stars_repo_name": "Guo-Wen-Xiang/Numerov", "max_stars_repo_head_hexsha": "035a301e50697063e0c89d84192729c198803d1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2017-07-28T18:24:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:10:20.000Z", "max_issues_repo_path": "Fct_Numerov.py", "max_issues_repo_name": "Guo-Wen-Xiang/Numerov", "max_issues_repo_head_hexsha": "035a301e50697063e0c89d84192729c198803d1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-05-04T16:50:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-11T21:57:49.000Z", "max_forks_repo_path": "Fct_Numerov.py", "max_forks_repo_name": "Guo-Wen-Xiang/Numerov", "max_forks_repo_head_hexsha": "035a301e50697063e0c89d84192729c198803d1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-02-12T17:10:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-16T08:31:09.000Z", "avg_line_length": 41.4794050343, "max_line_length": 194, "alphanum_fraction": 0.6503461782, "include": true, "reason": "import numpy", "num_tokens": 8492}
|
"""VectorSpaceData tests.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
Matthias Rupp 2019-2020, Citrine Informatics.
"""
import pytest
import numpy as np
import smlb
from test_data import validate_data_interface
def test_VectorSpaceData_instantiation():
"""Test VectorSpaceData initialization"""
# unlabeled, no domain
ds = smlb.VectorSpaceData(dimensions=3)
validate_data_interface(ds)
assert ds.dimensions == 3 and ds.domain is None
assert not ds.is_labeled and not ds.is_finite
assert (ds.samples([[1, 2, 3], [4, 5, 6]]) == [[1, 2, 3], [4, 5, 6]]).all()
with pytest.raises(smlb.BenchmarkError):
ds.labels()
# unlabeled, domain
ds = smlb.VectorSpaceData(dimensions=3, domain=(1, 5))
validate_data_interface(ds)
assert ds.dimensions == 3 and len(ds.domain) == 3
assert not ds.is_labeled and not ds.is_finite
assert (ds.samples([[1, 2, 3], [4, 5, 5]]) == [[1, 2, 3], [4, 5, 5]]).all()
with pytest.raises(smlb.InvalidParameterError):
ds.samples([[1, 2, 3], [4, 5, 6]])
with pytest.raises(smlb.BenchmarkError):
ds.labels()
# labeled, no domain
f = lambda arg: np.sum(arg, axis=1) # noqa: E731
ds = smlb.VectorSpaceData(dimensions=2, function=f)
validate_data_interface(ds)
assert ds.dimensions == 2 and ds.domain is None
assert ds.is_labeled and not ds.is_finite
assert (ds.labels([[1, 2], [3, 4]]) == [3, 7]).all()
@pytest.fixture
def fixture_VectorSpaceData_parabola_1d():
f = lambda v: v[:, 0] ** 2 # noqa: E731
ds = smlb.VectorSpaceData(dimensions=1, function=f, domain=[-2, 2])
return ds
def test_VectorSpaceData_parabola_1d(fixture_VectorSpaceData_parabola_1d):
"""Simple parabola example"""
ds = fixture_VectorSpaceData_parabola_1d
assert ds.labels(((0.0,),)) == 0.0
assert ds.labels([[2]]) == 4.0
# outside of domain
with pytest.raises(smlb.BenchmarkError):
ds.labels([[np.nextafter(-2, -3)]]) # outside of domain to the left
with pytest.raises(smlb.BenchmarkError):
ds.labels([[np.nextafter(2, 3)]]) # outside of domain to the right
# def test_ComputedLabelsVectorSpaceData_intersection_1(
# fixture_ComputedLabelsVectorSpaceData_parabola_1d,
# ):
# """Test correctness of intersection for some examples.
# Include test for different labels y of the same inputs x
# being recognized as different examples (x,y).
# """
# ds = fixture_ComputedLabelsVectorSpaceData_parabola_1d
# a = ds.subset([[-1], [0], [2], [1]])
# b = ds.subset([[-1.1], [2], [0.5], [1]])
# assert (np.sort(a.intersection(b).samples()) == [[1.0], [2.0]]).all()
# assert (np.sort(a.intersection(b).labels()) == [1.0, 4.0]).all()
# f = lambda v: v[:, 0] ** 2 + np.abs(v[:, 0])
# ds2 = smlb.ComputedLabelsVectorSpaceData(dimensions=1, function=f, domain=[-2, 2])
# c = ds2.subset([[1], [1.9], [0], [-0.5]])
# assert (np.sort(a.intersection(c).samples()) == [[0.0]]).all()
# assert (np.sort(a.intersection(c).labels()) == [0]).all()
# @pytest.mark.timeout(2)
# def test_ComputedLabelsVectorSpaceData_intersection_2(
# fixture_ComputedLabelsVectorSpaceData_parabola_1d,
# ):
# """Computational efficiency.
# Full-match intersection for larger dataset with computed labels.
# Because this test uses a parabola on [-2,2], it also tests
# correctness of intersection for different inputs x with same
# labels y in (x,y), e.g., (-1,1) and (1,1).
# """
# n = 5000
# ds = fixture_ComputedLabelsVectorSpaceData_parabola_1d
# inds = np.transpose(np.asfarray([np.linspace(-2, 2, n)]))
# a = ds.subset(inds)
# b = ds.subset(inds)
# assert a.intersection(b).num_samples == n
# @pytest.mark.timeout(2)
# def test_intersection_2():
# """Computational efficiency of intersection for an actual synthetic dataset as test case."""
# from smlb.datasets.synthetic.friedman_1979.friedman_1979 import Friedman1979Data
# data = Friedman1979Data(dimensions=10)
# n = 1000
# lhs = smlb.RandomVectorSampler(size=n, rng=0).fit(data).apply(data)
# rhs = smlb.RandomVectorSampler(size=n, rng=1).fit(data).apply(data)
# assert lhs.intersection(rhs).num_samples == 0
|
{"hexsha": "0c6c7220ae61c04f860f3c363941933b1ccf3990", "size": 4320, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/core/test_vector_space_data.py", "max_stars_repo_name": "CitrineInformatics/smlb", "max_stars_repo_head_hexsha": "28a3689bd36aa8d51031b4faf7e2331bbd8148a9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-07-27T21:08:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-04T07:00:29.000Z", "max_issues_repo_path": "tests/core/test_vector_space_data.py", "max_issues_repo_name": "CitrineInformatics/smlb", "max_issues_repo_head_hexsha": "28a3689bd36aa8d51031b4faf7e2331bbd8148a9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-09-01T00:47:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-15T22:16:56.000Z", "max_forks_repo_path": "tests/core/test_vector_space_data.py", "max_forks_repo_name": "CitrineInformatics/smlb", "max_forks_repo_head_hexsha": "28a3689bd36aa8d51031b4faf7e2331bbd8148a9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-24T21:50:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-06T05:18:57.000Z", "avg_line_length": 33.75, "max_line_length": 98, "alphanum_fraction": 0.6599537037, "include": true, "reason": "import numpy", "num_tokens": 1300}
|
import numpy
def data_generator(celltypes, assays, data, n_positions, batch_size):
while True:
celltype_idxs = numpy.zeros(batch_size, dtype='int32')
assay_idxs = numpy.zeros(batch_size, dtype='int32')
genomic_25bp_idxs = numpy.random.randint(n_positions, size=batch_size)
genomic_250bp_idxs = genomic_25bp_idxs / 10
genomic_5kbp_idxs = genomic_25bp_idxs / 200
value = numpy.zeros(batch_size)
keys = data.keys()
idxs = numpy.random.randint(len(data), size=batch_size)
for i, idx in enumerate(idxs):
celltype, assay = keys[idx]
track = data[(celltype, assay)]
celltype_idxs[i] = celltypes.index(celltype)
assay_idxs[i] = assays.index(assay)
value[i] = track[genomic_25bp_idxs[i]]
d = {
'celltype_input': celltype_idxs,
'assay_input': assay_idxs,
'genome_25bp_input': genomic_25bp_idxs,
'genome_250bp_input': genomic_250bp_idxs,
'genome_5kbp_input': genomic_5kbp_idxs
}
yield d, value
def sequential_data_generator(celltypes, assays, data, n_positions, batch_size):
start = 0
while True:
celltype_idxs = numpy.zeros(batch_size, dtype='int32')
assay_idxs = numpy.zeros(batch_size, dtype='int32')
genomic_25bp_idxs = numpy.arange(start, start+batch_size) % n_positions
genomic_250bp_idxs = genomic_25bp_idxs / 10
genomic_5kbp_idxs = genomic_25bp_idxs / 200
value = numpy.zeros(batch_size)
keys = data.keys()
idxs = numpy.random.randint(len(data), size=batch_size)
for i, idx in enumerate(idxs):
celltype, assay = keys[idx]
track = data[(celltype, assay)]
celltype_idxs[i] = celltypes.index(celltype)
assay_idxs[i] = assays.index(assay)
value[i] = track[genomic_25bp_idxs[i]]
d = {
'celltype_input': celltype_idxs,
'assay_input': assay_idxs,
'genome_25bp_input': genomic_25bp_idxs,
'genome_250bp_input': genomic_250bp_idxs,
'genome_5kbp_input': genomic_5kbp_idxs
}
yield d, value
start += batch_size
|
{"hexsha": "2f6f4c79d6865a51701994c472d576dfbc74e50d", "size": 2007, "ext": "py", "lang": "Python", "max_stars_repo_path": "avocado/io.py", "max_stars_repo_name": "luizirber/avocado", "max_stars_repo_head_hexsha": "7551492d6e60ba9cc1ddcc791882d3278e69607c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "avocado/io.py", "max_issues_repo_name": "luizirber/avocado", "max_issues_repo_head_hexsha": "7551492d6e60ba9cc1ddcc791882d3278e69607c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "avocado/io.py", "max_forks_repo_name": "luizirber/avocado", "max_forks_repo_head_hexsha": "7551492d6e60ba9cc1ddcc791882d3278e69607c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8769230769, "max_line_length": 80, "alphanum_fraction": 0.7015445939, "include": true, "reason": "import numpy", "num_tokens": 607}
|
#include <boost/test/unit_test.hpp>
#include "algorithms/dp/binary_watch_time.hpp"
BOOST_AUTO_TEST_SUITE(TestBinaryWatchTime)
BOOST_AUTO_TEST_CASE(test_get_possible_time)
{
{
const int num_led_on = 0;
const std::vector<std::string> expected = {"0:00"};
BOOST_CHECK(expected == Algo::DP::BinaryWatch::get_time(num_led_on));
}
{
const int num_led_on = 1;
auto result = Algo::DP::BinaryWatch::get_time(num_led_on);
std::vector<std::string> expected = {
"1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"};
std::sort(result.begin(), result.end());
std::sort(expected.begin(), expected.end());
BOOST_CHECK(expected == result);
}
{
const int num_led_on = -1;
const std::vector<std::string> expected = {};
BOOST_CHECK(expected == Algo::DP::BinaryWatch::get_time(num_led_on));
}
{
const int num_led_on = 9;
const std::vector<std::string> expected = {};
BOOST_CHECK(expected == Algo::DP::BinaryWatch::get_time(num_led_on));
}
{
const int num_led_on = 11;
const std::vector<std::string> expected = {};
BOOST_CHECK(expected == Algo::DP::BinaryWatch::get_time(num_led_on));
}
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "db03510eae77e8af74f16b6aa55562e0fbcb5464", "size": 1457, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/algorithms/dp/test_binary_watch_time.cpp", "max_stars_repo_name": "iamantony/CppNotes", "max_stars_repo_head_hexsha": "2707db6560ad80b0e5e286a04b2d46e5c0280b3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-07-31T14:13:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T09:51:43.000Z", "max_issues_repo_path": "test/algorithms/dp/test_binary_watch_time.cpp", "max_issues_repo_name": "iamantony/CppNotes", "max_issues_repo_head_hexsha": "2707db6560ad80b0e5e286a04b2d46e5c0280b3f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 28.0, "max_issues_repo_issues_event_min_datetime": "2015-09-22T07:38:21.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-02T11:00:58.000Z", "max_forks_repo_path": "test/algorithms/dp/test_binary_watch_time.cpp", "max_forks_repo_name": "iamantony/CppNotes", "max_forks_repo_head_hexsha": "2707db6560ad80b0e5e286a04b2d46e5c0280b3f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-10-11T14:10:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-27T08:53:50.000Z", "avg_line_length": 30.3541666667, "max_line_length": 96, "alphanum_fraction": 0.5470144132, "num_tokens": 354}
|
import skimage
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from imageManipulation import *
from arguments import *
def main():
args = setupArguments()
img = openImage(args.imagePath)
for method in args.limiarizationMethods:
print('Applying ' + method.name)
histogram(img)
out = limiarizate(img, method.method)
saveImage('out/' + args.imageName + '_' + method.name + '_.pgm', out.astype(np.uint8))
histogram(out)
print('\nDone')
def limiarizate(img, method):
return method(img.astype(int))
def histogram(image):
# show histogram
plt.hist(image.ravel(), bins=32)
plt.show()
if __name__ == '__main__':
main()
|
{"hexsha": "3c41a7b1b99f9f5cf6613f47975b9bb956c0e151", "size": 718, "ext": "py", "lang": "Python", "max_stars_repo_path": "limiarization/main.py", "max_stars_repo_name": "giovaninppc/MC920", "max_stars_repo_head_hexsha": "7d46238f4079dabc4769c72cbed44d024fcf5c97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-23T19:23:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-23T19:23:18.000Z", "max_issues_repo_path": "limiarization/main.py", "max_issues_repo_name": "giovaninppc/MC920", "max_issues_repo_head_hexsha": "7d46238f4079dabc4769c72cbed44d024fcf5c97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "limiarization/main.py", "max_forks_repo_name": "giovaninppc/MC920", "max_forks_repo_head_hexsha": "7d46238f4079dabc4769c72cbed44d024fcf5c97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-05T23:56:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-05T23:56:49.000Z", "avg_line_length": 22.4375, "max_line_length": 94, "alphanum_fraction": 0.6587743733, "include": true, "reason": "import numpy", "num_tokens": 169}
|
# Copyright (c) 2020 Greg Pintilie - gregp@slac.stanford.edu
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import _multiscale
from CGLutil.AdaptiveTree import AdaptiveTree
import chimera
import FitMap
import os
import Matrix
import VolumeData
import VolumeViewer
import _contour
import _gaussian
chargedIons = { "MG":2, "NA":1, "CL":-1, "CA":2, "ZN":2, "MN":2, "FE":3, "CO":2, "NI":2 }
# returns the min and max density value in a map
def MinMaxD ( dmap ) :
# dmap - the map
M = dmap.data.full_matrix()
maxM = numpy.max(M)
minM = numpy.min(M)
maxD = min ( numpy.average(M)+numpy.std(M)*10, maxM )
minD = max ( numpy.average(M)-numpy.std(M)*1, minM )
# xray
#maxD = min ( numpy.average(M)+numpy.std(M)*3.5, maxM )
#minD = max ( numpy.average(M)-numpy.std(M)*0.77, minM )
#print "%s - %.2f->%.2f, %.2f->%.2f" % (dmap.name, minD, maxD, minM, maxM )
#minD = numpy.min(M)
#minD, maxD = numpy.min(M), numpy.max(M)
return minD, maxD
# attempt to do Q-score with volume-volume CC rather than sphere points
# works ok, but is not faster - main reason why to try
# another difference is that with sphere points, the same number of points
# is used at each distance, so the map values at each radial distance even weigh
def QscoreM ( atoms, dmap, sigma, agrid=None, allAtTree=None, show=0, log=0, toRAD=2.0, step=0.2, minD=None, maxD=None, useMask=False ) :
xyz = _multiscale.get_atom_coordinates(atoms, transformed = False)
#_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
li,lj,lk = numpy.min ( xyz, axis=0 ) - (toRAD, toRAD, toRAD)
hi,hj,hk = numpy.max ( xyz, axis=0 ) + (toRAD, toRAD, toRAD)
nO = ( li, lj, lk )
#print nO
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li,lj,lk, hi,hj,hk, d1,d2,d3 )
d1, d2, d3 = hi - li, hj - lj, hk - lk
nstep = (step, step, step)
#nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )
nn1 = int ( numpy.ceil ( float(d1) / step) )
nn2 = int ( numpy.ceil ( float(d2) / step) )
nn3 = int ( numpy.ceil ( float(d3) / step) )
#print " - step %.2f, n: %d %d %d" % (S, nn1, nn2, nn3)
nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )
ii = 1.0 / step
ni = -ii
xyz_to_ijk = ((ii, 0.0, 0.0, ni*nO[0]), (0.0, ii, 0.0, ni*nO[1]), (0.0, 0.0, ii, ni*nO[2]))
ijk_to_xyz = ((step, 0.0, 0.0, nO[0]), (0.0, step, 0.0, nO[1]), (0.0, 0.0, step, nO[2]))
#print ijk_to_xyz
#ijk[:] = xyz
weights = [ 1.0 for a in atoms]
sdevs = [ [sigma, sigma, sigma] for a in atoms ]
cutoff_range = 5
A, B = maxD - minD, minD
#ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
#print ndata.xyz_to_ijk_transform
#print ndata.ijk_to_xyz_transform
#Matrix.transform_points(xyz, ndata.xyz_to_ijk_transform)
if useMask == False :
Matrix.transform_points(xyz, xyz_to_ijk)
_gaussian.sum_of_gaussians(xyz, weights, sdevs, cutoff_range, nmat)
#print " -gm max %.3f" % numpy.max ( nmat )
nmat *= A
nmat += B
#print " -gm max %.3f" % numpy.max ( nmat )
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
if useMask :
nearAts = []
if agrid != None :
for at in atoms :
nats = agrid.AtsNearPtLocal ( at.coord() )
for nat, v in nats :
if at != nat :
nearAts.append ( nat )
#print " - %s, %d.%s - %.3f" % (nat.name, nat.residue.id.position, nat.residue.id.chainId, v.length)
if allAtTree != None :
for at in atoms :
opointsNear = allAtTree.searchTree ( at.coord(), toRAD )
for nat in opointsNear :
if nat == at :
continue
v = at.coord() - nat.coord()
if v.length < toRAD :
nearAts.append (nat)
if len(nearAts) == 0 :
print " - no near ats?"
#print " - %d near ats" % len(nearAts)
for k in range(nn3) :
pz = nO[2] + float(k)*step
for j in range(nn2) :
py = nO[1] + float(j)*step
for i in range(nn1) :
px = nO[0] + float(i)*step
P = chimera.Point(px, py, pz)
minDToAt = 1e9
for at in atoms :
v = at.coord() - P
if v.length < minDToAt :
minDToAt = v.length
if minDToAt > toRAD :
nmat[k,j,i] = B-0.1
continue
closestToAt = True
for nat in nearAts :
v = nat.coord() - P
if v.length < minDToAt :
closestToAt = False
#break
if not closestToAt :
nmat[k,j,i] = minD-0.1
else :
nmat[k,j,i] = A * numpy.exp ( -0.5 * numpy.power(minDToAt/sigma,2) ) + B
if 0 and agrid :
nearAts = []
for at in atoms :
nats = agrid.AtsNearPtLocal ( at.coord() )
for nat, v in nats :
if at != nat :
print " - %s, %d.%s - %.3f" % (nat.name, nat.residue.id.position, nat.residue.id.chainId, v.length)
nearAts.append ( at )
#print "%d near ats" % len(nearAts)
mat1 = numpy.ones ( (nn1,nn2,nn3), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( mat1, nO, nstep, dmap.data.cell_angles )
points = _multiscale.get_atom_coordinates(nearAts, transformed = False)
mdata = VolumeData.zone_masked_grid_data ( ndata, points, toRAD, invert_mask=False )
#nmat = mdata.matrix()
nv = VolumeViewer.volume.volume_from_grid_data ( mdata )
nv.openState.xform = dmap.openState.xform
mdata = mask
fpoints = VolumeData.grid_indices ( (nn3,nn2,nn1), numpy.single) # i,j,k indices
_contour.affine_transform_vertices ( fpoints, ijk_to_xyz )
fpoint_weights = numpy.ravel(nmat).astype(numpy.single)
#print " - %d points" % len(fpoints)
ge = numpy.greater_equal(fpoint_weights, B)
fpoints = numpy.compress(ge, fpoints, 0)
fpoint_weights = numpy.compress(ge, fpoint_weights)
#print " - %d above thr" % len(fpoint_weights)
#nz = numpy.nonzero( fpoint_weights )[0]
#print " - %d above thr" % len(nz)
#map_values, outside = VolumeData.interpolate_volume_data(pts, xyz_to_ijk_tf, darray)
#olap0, cc0, other = overlap_and_correlation ( wts, map_values )
map_values = dmap.interpolated_values ( fpoints, atoms[0].molecule.openState.xform )
#print map_values
olap, cc, ccm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#print olap, cc, ccm
if show :
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
nv.openState.xform = dmap.openState.xform
nv.name = "bam"
return ccm
def zone_mask ( grid_data, zone_points, zone_radius, invert_mask = False, zone_point_mask_values = None ):
from numpy import single as floatc, array, ndarray, zeros, int8, intc
if not isinstance(zone_points, ndarray):
zone_points = array(zone_points, floatc)
if (not zone_point_mask_values is None and not isinstance(zone_point_mask_values, ndarray)):
zone_point_mask_values = array(zone_point_mask_values, int8)
shape = tuple(reversed(grid_data.size))
mask_3d = zeros(shape, int8)
mask_1d = mask_3d.ravel()
if zone_point_mask_values is None:
if invert_mask:
mask_value = 0
mask_1d[:] = 1
else:
mask_value = 1
from VolumeData import grid_indices
from _contour import affine_transform_vertices
from _closepoints import find_closest_points, BOXES_METHOD
size_limit = 2 ** 22 # 4 Mvoxels
if mask_3d.size > size_limit:
# Calculate plane by plane to save memory with grid point array
xsize, ysize, zsize = grid_data.size
grid_points = grid_indices((xsize,ysize,1), floatc)
affine_transform_vertices(grid_points, grid_data.ijk_to_xyz_transform)
zstep = [grid_data.ijk_to_xyz_transform[a][2] for a in range(3)]
for z in range(zsize):
i1, i2, n1 = find_closest_points(BOXES_METHOD, grid_points, zone_points, zone_radius)
offset = xsize*ysize*z
if zone_point_mask_values is None:
mask_1d[i1 + offset] = mask_value
else:
mask_1d[i1 + offset] = zone_point_mask_values[n1]
grid_points[:,:] += zstep
else :
grid_points = grid_indices(grid_data.size, floatc)
affine_transform_vertices(grid_points, grid_data.ijk_to_xyz_transform)
i1, i2, n1 = find_closest_points(BOXES_METHOD, grid_points, zone_points, zone_radius)
if zone_point_mask_values is None:
mask_1d[i1] = mask_value
else:
mask_1d[i1] = zone_point_mask_values[n1]
return mask_3d
# this method calculates CC between radial points placed around the atoms and the map
# - two values are returned - CC and CC about the mean - the latter is the Q-score
def Qscore ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#sigma = 1.0
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
#print " __%s__ " % (atoms[0].name), pts[0]
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals = (numpy.ones ( [len(pts)*numPts,1] ) * refG).astype(numpy.float64, copy=False)
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)
if mol == None :
mol = atoms[0].molecule
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)
#print pts
#print d_vals
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,len(pts)*numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
# check if any atoms are too close; ignore those atoms and give them q=0
if 0 and allAtTree :
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), 2.0 )
for nat in anear :
if nat != at :
v = at.coord() - nat.coord()
if v.length < 1.0 :
print "c"
return 0.0
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for at in atoms :
#npts = numPts # 8 # int ( npts )
npts = int (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
#npts = numPts * (RAD*RAD / (dRAD*dRAD))
#print RAD, dRAD, numPts, " -> ", npts
for i in range (0, 50) :
outPts = SpherePts ( at.coord(), RAD, npts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
if 1 :
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
if len(opointsNear) == 0 :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= npts : # or show :
#print " - %.2f - after %d" % (RAD, i)
pts.extend ( at_pts[0:at_pts_i] )
break
if show :
pmod = AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f %s" % (RAD,atoms[0].name) )
pmod.openState.xform = atoms[0].molecule.openState.xform
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log and not fitg :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
#A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
# this is the CC between averaged radial values - not at robust
if 0 :
olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
if log :
print "olap -avg-: %.3f cc: %.3f, Q: %.3f -- %d" % (olap, CC, Qs, len(d_vals_avg))
#print "%f\t%f\t%f" % (olap, CC, Qs)
olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals, g_vals )
# this is the CC between _all_ radial values
Qs = CCmean
if log :
print "olap --N--: %.3f cc: %.3f, ccmean (Q-score): %.3f -- %d" % (olap, CC, Qs, len(d_vals))
#print "%f\t%f\t%f" % (olap, CC, Qs)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
errp = err / r_avg[0][1] * 100.0
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
#print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
mx = 0.0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if y > mx :
mx = y
if gv > mx :
mx = gv
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
print ""
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y/mx, gv/mx)
i += 1
return Qs, yds, err
else :
return Qs
# qscores on a grid
def QscoreG ( atoms, dmap, sigma, agrid=None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#sigma = 1.0
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
#print " __%s__ " % (atoms[0].name), pts[0]
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals = (numpy.ones ( [len(pts)*numPts,1] ) * refG).astype(numpy.float64, copy=False)
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)
if mol == None :
mol = atoms[0].molecule
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,len(pts)*numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for at in atoms :
#npts = numPts # 8 # int ( npts )
npts = int (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
#npts = numPts * (RAD*RAD / (dRAD*dRAD))
#print RAD, dRAD, numPts, " -> ", npts
for i in range (0, 100) :
outPts = SpherePts ( at.coord(), RAD, npts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
#apt = numpy.array ( vPt )
P = chimera.Point ( pt[0], pt[1], pt[2] )
if agrid != None :
#opointsNear = allAtTree.searchTree ( vPt, outRad )
nearAts = agrid.AtsNearPtLocal ( P )
if len(nearAts) <= 1 :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= npts or i >= 95 : # or show :
pts.extend ( at_pts[0:at_pts_i] )
break
if show :
pmod = AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f %s" % (RAD,atoms[0].name) )
pmod.openState.xform = atoms[0].molecule.openState.xform
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log and not fitg :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
#A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
# this is the CC between averaged radial values - not at robust
if 0 :
olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
if log :
print "olap -avg-: %.3f cc: %.3f, Q: %.3f -- %d" % (olap, CC, Qs, len(d_vals_avg))
#print "%f\t%f\t%f" % (olap, CC, Qs)
olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals, g_vals )
# this is the CC between _all_ radial values
Qs = CCmean
if log :
print "olap --N--: %.3f cc: %.3f, ccmean (Q-score): %.3f -- %d" % (olap, CC, Qs, len(d_vals))
#print "%f\t%f\t%f" % (olap, CC, Qs)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
errp = err / r_avg[0][1] * 100.0
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
#print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
mx = 0.0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if y > mx :
mx = y
if gv > mx :
mx = gv
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
print ""
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y/mx, gv/mx)
i += 1
return Qs, yds, err
else :
return Qs
# this is an older Q-score function which does not try to make sure to use numPts around each atom
def Qscore_ ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#sigma = 1.0
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
#print " __%s__ " % (atoms[0].name), pts[0]
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)
if mol == None :
mol = atoms[0].molecule
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,len(pts)*numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
for at in atoms :
anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for at in atoms :
outPts = SpherePts ( at.coord(), RAD, numPts )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
if 1 :
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
if len(opointsNear) == 0 :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
pts.extend ( at_pts[0:at_pts_i] )
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if 0 and log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )
#d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
RAD += dRAD
i+=1
if 0 and log :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
Qscore = CCm
if log :
print "olap -avg-: %.3f cc: %.3f, ccm (Q-score): %.3f -- %d" % (olap, CC, CCm, len(d_vals_avg))
#print "%f\t%f\t%f" % (olap, CC, CCm)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
return Qscore, yds, err
else :
return Qscore
def QscorePt ( atPt, xfI, dmap, sigma, allAtTree = None, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#xfI = chimera.Xform()
atPtC = chimera.Point ( *atPt )
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals = (numpy.ones ( [numPts,1] ) * refG).astype(numpy.float64, copy=False )
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False )
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( [atPt], xfI ).astype(numpy.float64, copy=False)
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree...
if 1 and allAtTree != None :
ats_near = []
anear = allAtTree.searchTree ( atPt, toRAD*2.0 )
ats_near.extend ( anear )
points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
for i in range (0, 100) :
outPts = SpherePts ( atPtC, RAD, numPts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allAtTree != None :
opointsNear = allAtTree.searchTree ( vPt, outRad )
foundNearPt = False
for npt in opointsNear :
v = apt - npt.coord().data()
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= numPts or i >= 15 : # or show :
pts.extend ( at_pts[0:at_pts_i] )
break
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, xfI )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log and not fitg :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
#A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
#if log :
# olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
# print "olap -avg-: %.3f cc: %.3f, ccm: %.3f -- %d" % (olap, CC, CCm, len(d_vals_avg))
# #print "%f\t%f\t%f" % (olap, CC, CCm)
olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals, g_vals )
qscore = CCm
if log :
print "olap --N--: %.3f cc: %.3f, ccm: %.3f -- %d" % (olap, CC, CCm, len(d_vals))
#print "%f\t%f\t%f" % (olap, CC, CCm)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:25] ], float(25)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
#print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
return qscore, yds, err
else :
return qscore
# calculate Q-score given a point (rather than atom), and using a 'points tree' rather than 'atoms tree'
def QscorePt2 ( atPt, xfI, dmap, sigma, allPtTree = None, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :
if minD == None or maxD == None :
minD, maxD = MinMaxD (dmap)
#xfI = chimera.Xform()
atPtC = chimera.Point ( *atPt )
#print atPtC
A, B = maxD - minD, minD
refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B
#print " - refg: ", refG
# g_vals should have the reference gaussian...
g_vals = (numpy.ones ( [numPts,1] ) * refG).astype(numpy.float64, copy=False )
g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False )
# r_avg holds the average values and number of points at each radial distance
d_vals = dmap.interpolated_values ( [atPt], xfI ).astype(numpy.float64, copy=False)
#print atPt
#print d_vals
d_vals = numpy.repeat ( d_vals, numPts )
avgV = numpy.average ( d_vals )
r_avg = [ [0,avgV,numPts] ]
d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)
# make smaller atom tree, shaves a few ms off running time for each point
if 1 and allPtTree != None :
pts_near = []
anear = allPtTree.searchTree ( atPt, toRAD*2.0 )
pts_near.extend ( anear )
#points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )
if log :
print " - new search tree: %d pts" % ( len(ats_near) )
allPtTree = AdaptiveTree ( pts_near, pts_near, 1.0)
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
#outRad2 = outRad * outRad
pts = []
# try to get at least [numPts] points at [RAD] distance
# from the atom, that are not closer to other atoms
for i in range (0, 50) :
# points on a sphere at radius RAD...
outPts = SpherePts ( atPtC, RAD, numPts+i*2 )
at_pts, at_pts_i = [None]*len(outPts), 0
for pt in outPts :
vPt = [pt[0], pt[1], pt[2]]
apt = numpy.array ( vPt )
if allPtTree != None :
opointsNear = allPtTree.searchTree ( vPt, outRad )
foundNearPt = False
for npt in opointsNear :
v = apt - npt
r2 = numpy.sum ( v * v )
if r2 < outRad2 :
foundNearPt = True
break
if not foundNearPt :
at_pts[at_pts_i] = vPt
at_pts_i += 1
else :
at_pts[at_pts_i] = vPt
at_pts_i += 1
#if log :
# print " - %d, %d pts" % (i, len(at_pts))
if at_pts_i >= numPts : # or show :
#print " - %.2f - after %d" % (RAD, i)
pts.extend ( at_pts[0:at_pts_i] )
break
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
r_avg.append ( [RAD,0,0] )
else :
d_vals_n = dmap.interpolated_values ( pts, xfI )
d_vals = numpy.append ( d_vals, d_vals_n )
avg = numpy.average ( d_vals_n )
#gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#A, B = GV, 0
#A, B = GV - minD, minD
A,B = maxD - minD, minD
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )
g_vals_avg = numpy.append ( g_vals_avg, gv )
d_vals_avg = numpy.append ( d_vals_avg, avg )
r_avg.append ( [RAD,avg,len(pts)] )
#if log :
# print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, len(pts))
RAD += dRAD
i+=1
if log and not fitg :
min, max = r_avg[0][1], r_avg[0][1]
for RAD, avg, numPts in r_avg :
if avg < min : min = avg
if avg > max : max = avg
A,B = max-min, min
#A,B = maxD - minD, minD
#A,B = GV - minD, minD
for RAD, avg, numPts in r_avg :
gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B
#print "%.1f\t%f\t%f\t%d" % (RAD, avg+0.02, gv+0.02, numPts)
print "%.1f\t%f\t%f\t%d" % (RAD, avg, gv, numPts)
#d_vals = d_vals + 0.02
#g_vals = g_vals + 0.02
#if log :
# olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )
# print "olap -avg-: %.3f cc: %.3f, ccm: %.3f -- %d" % (olap, CC, CCm, len(d_vals_avg))
# #print "%f\t%f\t%f" % (olap, CC, CCm)
olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals, g_vals )
qscore = CCm
if log :
print "olap --N--: %.3f cc: %.3f, ccm: %.3f -- %d" % (olap, CC, CCm, len(d_vals))
#print "%f\t%f\t%f" % (olap, CC, CCm)
if fitg :
if log : print "fitting gaussian : "
#V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))
V, N = [ [x[0],x[1]] for x in r_avg[0:25] ], float(25)
sdev, A, B = optSGD ( V, 5000, 1.0 )
sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )
err = numpy.sqrt(err3(V,sdev,A,B)/N)
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f, err: %f" % (sdev, A, B, err)
sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
err = numpy.sqrt(err3(V,sdev,A,B)/N)
#print "max:", r_avg[0][1]
errp = err / r_avg[0][1] * 100.0
if log : print " gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)" % (sdev, A, B, err, errp)
yds, i = numpy.zeros ( len(r_avg) ), 0
for x, y, n in r_avg:
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
#yds[i] = y - gv
yds[i] = y
if log : print "%.1f\t%f\t%f" % (x, y, gv)
i += 1
return qscore, yds, err
else :
return qscore
def RadAts ( atoms, dmap, allAtTree = None, show=0, log=0, numPts=20, toRAD=2.0, dRAD=0.1 ) :
if len(atoms) == 0 :
#print " - no RAD atoms?"
return None
#pts = []
#for at in atoms :
# p = at.coord()
# pts.append ( [p[0], p[1], p[2]] )
from _multiscale import get_atom_coordinates
pts = get_atom_coordinates(atoms, transformed = False)
RD_, X, Y = [], [], []
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
RD_.append ( [0,avg] ); X.append (0); Y.append (avg)
#dRAD, toRAD, RAD = 0.2, 1.8, 0.1
RAD = dRAD
i = 1.0
while RAD < toRAD + 0.01 :
outRad = RAD*0.9
outRad2 = outRad * outRad
pts = []
for at in atoms :
npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts
npts = int ( npts )
#print RAD, dRAD, numPts, " -> ", npts
outPts = SpherePts ( at.coord(), RAD, npts )
for pt in outPts :
ppt = [pt[0], pt[1], pt[2]]
if allAtTree != None :
vPt = numpy.array ( ppt )
opointsNear = allAtTree.searchTree ( ppt, outRad )
if 1 :
clash = False
for p in opointsNear :
v = vPt - p.coord().data()
sqSum = numpy.sum ( v * v )
if sqSum < outRad2 :
clash = True
break
if clash == False :
pts.append ( ppt )
else :
if len(opointsNear) == 0 :
pts.append ( ppt )
else :
pts.append ( ppt )
if show :
AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, "RAD points %.1f" % RAD )
if len (pts) < 1 :
if log :
print " - no points for RAD %.1f - %d.%s - " % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),
print "SC" if atoms[0].isSC else "BB"
else :
d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )
avg = numpy.average ( d_vals )
RD_.append ( [RAD,avg] );
if log :
print RAD, avg, len(pts)
X.append (RAD); Y.append (avg)
RAD += dRAD
#minSd = opt0 ( RD_, 0.1 )
#if minSd != None :
# if show :
# print " SD0: %.1f" % minSd
sdev = toRAD
slope = 0
if RD_[0][1] <= RD_[-1][1] :
sdev = 10.0
else :
#for i in range ( len(RD_) ) :
# RD_[i][1] = RD_[i][1] - RD_[-1][1]
# if log :
# Y[i] = Y[i] - Y[-1]
#import time
#start = time.time()
sdev, A, B = optSGD ( RD_, 9000, 0.2 )
sdev, A, B = optSGD ( RD_, 9000, 0.02, sdev, A, B )
sdev, A, B = optSGD ( RD_, 9000, 0.002, sdev, A, B )
#end = time.time()
#if log : print " sgd - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start))
sdev = sdev
if log : print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
#start = time.time()
#sdev, A, B = optGN ( RD_, 0.0001 )
#print " gn - sdev: %.4f, A %.4f, B %.4f -- %f" % (sdev, A, B, (end - start))
#end = time.time()
if 1 :
if 0 and sdev != None :
if log :
print " gn1 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
else :
sdev, A, B = optSGD ( RD_, 10000, 0.01 )
if log :
print " sgd - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
sdev2, A2, B2 = optGN ( RD_, 0.0001, sdev, A, B )
if sdev2 != None :
sdev, A, B = sdev2, A2, B2
if log :
print " gn2 - sdev: %.4f, A %.4f, B %.4f" % (sdev, A, B)
#else :
# return 10.0
if log :
r = numpy.polyfit ( X, Y, 1, rcond=None, full=False, w=None, cov=False)
print " sdev: %.4f, A %.4f, B %.4f // slope: %.4f y %.4f" % (sdev, A, B, r[0], r[1])
#A, B = 0.26+0.08, -0.08
lastX = 0
for i in range ( len(RD_) ) :
x, y = RD_[i]
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B
lv = x * r[0] + r[1]
print "%.1f\t%f\t%f\t%f" % (x, y, gv, gvRef)
lastX = x
if 1 :
x = lastX + dRAD
#while x < min(4 * sdev,50.0) :
while x < min(10.0,50.0) :
gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B
gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B
lv = x * r[0] + r[1]
print "%.1f\t\t%f\t%f" % (x, gv, gvRef)
x += dRAD
#return abs(sdev), abs(slope)
return abs(sdev)
def TimeLeftStr ( atI, totI, totSec ) :
leftTime = ""
leftSec = 0.0
iPerSec = float(atI) / totSec
if iPerSec > 0 :
leftSec = float ( totI - atI ) / iPerSec
leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )
leftSec = leftSec - leftHour * 60.0 * 60.0
leftMin = numpy.floor ( leftSec / 60.0 )
leftSec = leftSec - leftMin * 60.0
leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec)
return leftTime
return ""
def optGN ( V, err, S=None, A=None, B=None ) :
y0 = V[0][1]
yN = V[-1][1]
if S == None :
S = 0.5
A = y0+yN
B = yN
an = numpy.array ( [A,B,S] )
#print " _ -- A %.3f B %.3f s %.3f" % (A, B, S)
reg = 1.0
badMatCount = 0
for i in range ( 1000 ) :
J = numpy.zeros ( [len(V),3] )
e = numpy.zeros ( [len(V),1] )
err0 = 0
j = 0
for x,y in V :
expv = numpy.exp ( -0.5 * numpy.power(x/S,2) )
v = A * expv + B
yd = v - y
err0 += yd * yd
#print "%.2f,%.2f/%.2f(%.2f)" % (x, y, v, yd),
dA = expv
dB = 1
dS = A*x*x*numpy.power(S,-3) * expv
J[j,:] = [dA, dB, dS]
e[j,0] = yd
j += 1
Jt = numpy.transpose(J)
try :
J_ = numpy.dot ( numpy.linalg.inv ( numpy.dot(Jt,J) ), Jt )
except :
#print " - bad matrix?"
#print numpy.dot(Jt,J)
badMatCount += 1
if badMatCount > 3 :
return None, None, None
from numpy import random as R
an = numpy.array ( [R.random()*(y0+yN),R.random()*yN,R.random()*10.0] )
A,B,S = an[0], an[1], an[2]
#print " ? -- A %.3f B %.3f s %.3f" % (A, B, S)
reg = 1.0
continue
ad = numpy.dot ( J_, e )
ann = an - ( ad[:,0] * reg )
A,B,S = ann[0], ann[1], ann[2]
err1 = err3 ( V, S, A, B )
#if err1 > err0 :
# reg = reg * 0.1
# if reg < err :
# break
#else :
an = ann
#print " %d -- A %.3f B %.3f s %.3f - err %.3f, reg %.5f" % (i, A, B, S, err1, reg)
if abs(err0 - err1) < err :
#print " - done"
break
i += 1
return S,A,B
def optSGD ( V, N, err, S=None, A=None, B=None ) :
if S == None :
y0 = V[0][1]
yN = V[-1][1]
S = 0.5
A = y0+yN
B = yN
from numpy import random
lastE = err3 ( V, S, A, B )
#while True :
for i in range(N) :
S_ = S + random.normal ( 0, err ) # mean, sigma
A_ = A + random.normal ( 0, err ) # mean, sigma
B_ = B + random.normal ( 0, err ) # mean, sigma
e = err3 ( V, S_, A_, B_ )
#print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd)
if e < lastE :
S, A, B = S_, A_, B_
lastE = e
return S,A,B
def err3 ( XYz, sd, A, B ) :
y0 = XYz[0][1]
err = 0
#for x,y in XYz[1:] :
for x,y in XYz :
yd = y - A * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) - B
err += yd * yd
#err /= float(len(XYz))
return err
def err ( XYz, sd ) :
y0 = XYz[0][1]
err = 0
for x,y in XYz[1:] :
yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )
err += yd * yd
#err /= float(len(XYz))
return err
def opt0 ( RD_, dStep ) :
sd = 0.1
y0 = RD_[0][1]
minSd, minErr, N = None, 1e99, float ( len(RD_)-1 )
while sd < 10.0 :
err = 0
for x,y in RD_[1:] :
yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )
err += yd * yd
err /= N
#print err
if err < minErr :
minErr = err
minSd = sd
sd += dStep
def opt ( V, maxErr ) :
dd = 1.0
sdAt = 0.1
lastE = err ( V, sdAt )
#while True :
for i in range(10000) :
sdAt += dd
e = err ( V, sdAt )
#print "%d %.2f %f %f %.4f" % (i, sdAt, e, numpy.log(e), dd)
if e >= lastE :
dd *= -0.75
if abs(dd) < maxErr :
return sdAt
lastE = e
return sdAt
def CalcQForAts ( dmap, mol, ats, sigma=0.6 ) :
minD, maxD = MinMaxD (dmap)
from _multiscale import get_atom_coordinates
from CGLutil.AdaptiveTree import AdaptiveTree
allAts = [at for at in mol.atoms if not at.element.name == "H"]
points = get_atom_coordinates ( allAts, transformed = False )
allAtTree = AdaptiveTree ( points.tolist(), allAts, 1.0)
for at in ats :
at.Q = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, minD=minD, maxD=maxD )
def Calc ( chimeraPath, numProc, res=3.0, bfactorF=-1, sigma=0.6 ) :
print "Calc Q scores"
print " - chimera path: ", chimeraPath
print " - num processors: ", numProc
print " - resolution: ", res
print " - sigma: ", sigma
if bfactorF > 0 :
print " - b-factor: ", bfactorF
from VolumeViewer import Volume
vols = chimera.openModels.list(modelTypes = [Volume])
if len(vols) == 0 :
print " - no volumes loaded"
return
dmap = vols[0]
print " - volume: %s" % dmap.name
from chimera import Molecule
mols = chimera.openModels.list(modelTypes = [Molecule])
if len(mols) == 0 :
print " - no molecules loaded"
return
for mi, mol in enumerate (mols) :
print ""
print "Model %d/%d: %s" % (mi+1, len(mols), mol.name)
SetBBAts ( mol )
if numProc == 1 :
CalcQ ( mol, None, dmap, sigma, log=True )
else :
CalcQp ( mol, None, dmap, sigma, numProc=numProc, chimeraPath=chimeraPath )
SaveQStats ( mol, "All", dmap, sigma, res )
if bfactorF > 0 :
minb, maxb = 1.0e9, 0.0
for at in mol.atoms :
at.bfactor = bfactorF * (1.0 - at.Q)
#at.occupancy = 1.0 # max(0,at.Q)
#dval = self.cur_dmap.interpolated_values ( [ at.coord() ], self.cur_mol.openState.xform ).astype(numpy.float64, copy=False)[0]
#at.occupancy = (dval - minD) / (maxD - minD)
minb = min ( minb, at.bfactor )
maxb = max ( maxb, at.bfactor )
molPath = os.path.splitext(mol.openedAs[0])[0]
nname = molPath + "_B%.0f.pdb" % bfactorF
print "Saving pdb with B'-factors, f=%.0f:" % bfactorF
print " -> ", nname
print " - bfactor = %.0f*(1-Qscore), range %.2f to %.2f" % (bfactorF, minb, maxb)
#print " - occupancies set to 1"
print ""
chimera.PDBio().writePDBfile ( [mol], nname )
# this is the function that the MP version executes once Chimera is opened
# with partial model and map
def CalcQForOpenModelsRess () :
from VolumeViewer import Volume
D = chimera.openModels.list(modelTypes = [Volume])
dmap = D[0]
#dmapA = D[1]
print " - dmap: %s" % dmap.data.path
#print " - dmapA: %s" % dmapA.name
tempPath, mapNameExt = os.path.split ( dmap.data.path )
mapName, mapExt = os.path.splitext ( mapNameExt )
procI = mapName.split("_")[0]
print " - proc: %s" % procI
print " - in path: %s" % tempPath
# read ids and position of all atoms
aPosMap = {}
fina = open ( os.path.join(tempPath, "all_atoms.txt") )
l1 = fina.readline()
sigma, minD, maxD, numAts = l1.split()
sigma, minD, maxD, numAts = float(sigma), float(minD), float(maxD), int(numAts)
#fout.write ( "Sigma: %.1f, minD: %.3f, maxD: %.3f, numAllAts: %d\n" % (sigma, minD, maxD, numAts) )
print "Sigma: %.1f, minD: %.3f, maxD: %.3f, numAllAts: %d\n" % (sigma, minD, maxD, numAts)
allPts = [None] * numAts # numpy.array.zeros ( [numAts,3] )
ati = 0
for l in fina :
atIdStr, sx, sy, sz = l.split()
pt = [ float(sx), float(sy), float(sz) ]
allPts[ati] = [ float(sx), float(sy), float(sz) ]
aPosMap[atIdStr] = pt
ati += 1
fina.close()
if ati != numAts :
print " ---!!--- got only %d of %d atoms" (ati, numAts)
return
##ats = [at for at in mol.atoms if not at.element.name == "H"]
##points = _multiscale.get_atom_coordinates ( allPts, transformed = False )
##print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
allPtTree = AdaptiveTree ( allPts, allPts, 1.0)
print " - points tree with %d points" % len(allPts)
fin = open ( os.path.join ( tempPath, "%s_atoms.txt" % procI ) )
fout = open ( os.path.join ( tempPath, "%s_out.txt" % procI ), "w" )
fout_status = os.path.join ( tempPath, "%s_stat.txt" % procI )
# get positions of atoms to do in this process
doAts = []
for l in fin :
#atIdStr, sx, sy, sz = l.split()
atIdStr = l.strip()
if not atIdStr in aPosMap :
fout.write ( "- atid not found: %s\n" % atIdStr )
#at = atids[atIdStr]
#pt = [ float(sx), float(sy), float(sz) ]
doAts.append ( [atIdStr, aPosMap[atIdStr]] )
fin.close()
# write status to a temp file
fs = open ( fout_status, "w" );
fs.write ( "at atom %d/%d" % (0,len(doAts) ) );
fs.close()
import time
start = time.time()
xfI = dmap.openState.xform
i = 1
for atId, atPt in doAts :
#print "%d.%s.%s" % (r.id.position,r.id.chainId,at.name),
##qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
qs = QscorePt2 ( atPt, xfI, dmap, sigma, allPtTree=allPtTree, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )
fout.write ( "%s %f\n" % (atId, qs) )
if i%10 == 0 :
end = time.time()
totSec = end - start
leftTime = ""
leftSec = 0.0
iPerSec = float(i) / totSec
if iPerSec > 0 :
leftSec = float ( len(doAts) - i ) / iPerSec
leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )
leftSec = leftSec - leftHour * 60.0 * 60.0
leftMin = numpy.floor ( leftSec / 60.0 )
leftSec = leftSec - leftMin * 60.0
leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec)
# update status
fs = open ( fout_status, "w" );
fs.write ( "at atom %d/%d - ETA %s" % (i+1,len(doAts),leftTime) );
fs.close()
i += 1
fout.close()
fs = open ( fout_status, "w" ); fs.write ( "done" ); fs.close()
def CalcQp ( mol, cid, dmap, sigma, useOld=False, log=False, numProc=None, chimeraPath=None ) :
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
mapPath = os.path.split ( dmap.data.path )[0]
mapBase = os.path.splitext ( dmap.data.path )[0]
if useOld :
SetBBAts ( mol )
nname = molPath + "__Q__" + mapName + ".pdb"
if QsFromPdbFile ( mol, nname ) :
Qavg = QStats1 ( mol, cid )
return Qavg
#numProc = 2
if numProc == None :
import multiprocessing
numProc = multiprocessing.cpu_count() / 2
print "Q Scores - p - %d" % numProc
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
print " - sigma: %.2f" % sigma
minD, maxD = MinMaxD ( dmap )
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
import time
start = time.time()
tempPath = mapBase + "__Q-scores__temp__"
print " - making temp path: %s" % tempPath
try :
os.mkdir(tempPath)
except :
print " - could not make temp path (an old calc may have failed):"
print " : check/remove temp path manually and try again"
print " : or, check write permissions"
allAtsFilePath = os.path.join ( tempPath, "all_atoms.txt" )
# write all (non-H) atoms to one file
allAtoms = [at for at in mol.atoms if not at.element.name == "H"]
fout = open ( allAtsFilePath, "w" )
print " - all atoms -> %s" % allAtsFilePath
fout.write ( "%.3f %f %f %d\n" % (sigma, minD, maxD, len(allAtoms)) )
for at in allAtoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atId = "%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)
p = at.coord()
fout.write ( "%s %f %f %f\n" % (atId,p.x,p.y,p.z) )
fout.close()
# atoms for which to calculate Q-scores
SetBBAts ( mol )
ress = []
atoms = []
for r in mol.residues :
if cid == None or cid == "All" or r.id.chainId == cid :
for at in r.atoms :
if not at.element.name == "H" :
atoms.append ( at )
print " - atoms to do: %d" % len(atoms)
import subprocess
import sys
print "cmd:",
#print sys.argv
for arg in sys.argv :
print arg,
print ""
if chimeraPath == None :
# '/Users/greg/_mol/Chimera.app/Contents/Resources/share/__main__.py'
chimeraPath = os.path.split ( sys.argv[0] )[0]
print ""
print " ------------ ", chimeraPath
print ""
chimeraPath, share = os.path.split ( chimeraPath )
chimeraPath = os.path.join ( chimeraPath, 'bin' )
chimeraPath = os.path.join ( chimeraPath, 'chimera' )
if os.path.isfile ( chimeraPath ) :
print " -- on unix/mac"
else :
chimeraPath += ".exe"
if os.path.isfile ( chimeraPath ) :
print " -- on windows"
else :
print " - chimera path not found..."
print chimeraPath
print ""
return
print " -- path to Chimera:", chimeraPath
dir_path = os.path.dirname(os.path.realpath(__file__))
inDir = os.path.split(dir_path)[0]
print " -- working dir:", inDir
#mapQPPath = os.path.join ( inDir, 'Segger' )
mapQPPath = os.path.join ( dir_path, 'mapqp.py' )
print " -- path to mapQ script:", mapQPPath
n = len(atoms)
g = [atoms[(n*c)/numProc:(n*(c+1))/numProc] for c in range(numProc)]
procs = []
for mi, atoms1 in enumerate(g) :
ress1 = atoms1[0].residue
ressN = atoms1[-1].residue
print " - %d/%d, %d-%d" % (mi+1, numProc, ress1.id.position, ressN.id.position)
procAtomsPath = os.path.join ( tempPath, "%d_atoms.txt" % mi )
fout = open ( procAtomsPath, "w" )
for at in atoms1 :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
p = at.coord()
#fout.write ( "%d.%s.%s.%s %.3f %.3f %.3f\n" % (r.id.position,r.id.chainId,at.name,altLoc,p.x,p.y,p.z) )
fout.write ( "%d.%s.%s.%s\n" % (r.id.position,r.id.chainId,at.name,altLoc) )
fout.close()
nmap_path = os.path.join ( tempPath, "%d_map.mrc" % mi )
if 1 :
nmap = MaskMapResize ( atoms1, 6, dmap, nmap_path )
else :
import shutil
shutil.copyfile ( dmap.data.path, nmap_path )
#args = [chimeraPath, '--nogui', '--silent', '--nostatus', mol.openedAs[0], nmap_path, mapQPPath]
#args = [chimeraPath, '--nogui', '--silent', '--nostatus', nmap_path, dmap.data.path, mapQPPath]
args = [chimeraPath, '--nogui', '--silent', '--nostatus', nmap_path, mapQPPath]
if mi == 0 :
print "running proc:",
for arg in args :
print arg,
print ""
fout = open ( os.path.join(tempPath, "%d.log" % mi), "w" )
foute = open ( os.path.join(tempPath, "%d_err.log" % mi), "w" )
p = subprocess.Popen(args, stdout=fout, stderr=foute, cwd=inDir)
procs.append ( [mi, p, fout, foute] )
print ""
print "Waiting...",
for mi, p, fout, foute in procs :
p.wait()
fout.close()
foute.close()
print "%d" % mi,
print ""
atids = {}
for r in mol.residues :
for at in r.atoms :
r = at.residue
altLoc = '_' if at.altLoc == '' else at.altLoc
atids["%d.%s.%s.%s" % (r.id.position,r.id.chainId,at.name,altLoc)] = at
print ""
print "Getting...",
for mi, p, fout, foute in procs :
fin = os.path.join(tempPath, "%d_out.txt" % mi)
#print " - getting from: ", fin
fp = open ( fin )
for l in fp :
#print " - ", l
try :
atId, Q = l.split()
except :
print " - err line: ", l
at = atids[atId.strip()]
#at = r.atomsMap[atName][0]
at.Q = float(Q)
#at.CC = float(cc)
at.bfactor = at.Q
fp.close()
if mi == 0 :
print ""
print ""
print "__StdOut for process %d__" % mi
foute = open ( os.path.join(tempPath, "%d.log" % mi), "r" )
for l in foute :
print l,
print ""
foute.close()
print "__StdErr file for process %d__" % mi
foute = open ( os.path.join(tempPath, "%d_err.log" % mi), "r" )
for l in foute :
print l,
print ""
foute.close()
if 1 :
for mi, p, fout, foute in procs :
print "Removing temp files",
os.remove ( os.path.join(tempPath, "%d_out.txt" % mi) )
try :
os.remove ( os.path.join(tempPath, "%d_stat.txt" % mi) )
except :
print " - did not find _stat file"
pass
os.remove ( os.path.join(tempPath, "%d_atoms.txt" % mi) )
os.remove ( os.path.join(tempPath, "%d_map.mrc" % mi) )
os.remove ( os.path.join(tempPath, "%d.log" % mi) )
os.remove ( os.path.join(tempPath, "%d_err.log" % mi) )
print "%d" % mi,
print ""
os.remove ( os.path.join(tempPath, "all_atoms.txt") )
os.rmdir ( tempPath )
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
SaveQFile ( mol, cid, dmap, sigma )
Qavg = QStats1 ( mol, cid )
return Qavg
def QStats1 ( mol, chainId='All', doCalcResQ=True ) :
totQ, totN = 0.0, 0.0
#QT, QN = { "Protein":0.0, "Nucleic":0.0, "Other":0.0 }, { "Protein":0.0, "Nucleic":0.0, "Other":0.0}
QT, QN = {}, {}
QT_, QN_ = {}, {}
QH, QL = {}, {}
if chainId == None :
chainId = "All"
print "Q for %d res, chain %s" % ( len(mol.residues), chainId )
for r in mol.residues :
if r.id.chainId == chainId or chainId == "All" :
if doCalcResQ :
CalcResQ (r, None, None, useOld=True )
for at in r.atoms :
if at.element.name == "H" :
continue
if hasattr ( at, "Q") :
totQ += at.Q
totN += 1.0
tp = "Other"
if at.residue.isProt : tp = "Protein"
elif at.residue.isNA : tp = "Nucleic"
else : tp = at.residue.type
if tp in QT :
QT[tp] += at.Q; QN[tp] += 1.0;
QH[tp] = max(QH[tp], at.Q); QL[tp] = min(QL[tp], at.Q)
else :
QT[tp] = at.Q; QN[tp] = 1.0
QH[tp] = at.Q; QL[tp] = at.Q
tps = r.id.chainId + ":" + tp
if tps in QT_ :
QT_[tps] += at.Q; QN_[tps] += 1.0
else :
QT_[tps] = at.Q; QN_[tps] = 1.0
#for tp in ["Other", "Protein", "Nucleic"] :
print ""
print "Chain\tAvg.Q-score\tEst.Res.(A)"
tpk = QT_.keys()
tpk.sort()
for tp in tpk :
if QN_[tp] > 0 :
avgQ = QT_[tp]/QN_[tp]
avgR = 0
if "nucleic" in tp.lower() :
avgR = (avgQ-1.0673)/-0.1574
else :
avgR = (avgQ-1.1244)/-0.1794
print " %s\t%.3f\t%.2f" % (tp, avgQ, avgR )
else :
print " %s\tn/a" % (tp)
Q__ = { " protein":0, " nucleic":0, " water":0, " ion":0 }
#for tp in ["Other", "Protein", "Nucleic"] :
print ""
print "Type\tAvg.Q-score\tEst.Res.(A)"
for tp in QT.keys() :
if QN[tp] > 0 :
avgQ = QT[tp]/QN[tp]
avgR = 0
if "nucleic" in tp.lower() :
avgR = (avgQ-1.0673)/-0.1574
Q__[" nucleic"] = avgQ
elif "protein" in tp.lower() :
avgR = (avgQ-1.1244)/-0.1794
Q__[" protein"] = avgQ
elif "hoh" in tp.lower() :
avgR = (avgQ-1.1244)/-0.1794
Q__[" water"] = avgQ
elif tp.upper() in chargedIons :
avgR = (avgQ-1.1244)/-0.1794
Q__[" ion"] = avgQ
else :
avgR = (avgQ-1.1244)/-0.1794
Q__[tp] = avgQ
print " %s\t%.3f\t%.2f" % (tp, avgQ, avgR )
else :
print " %s\tn/a" % (tp)
print ""
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%s" % tp,
print ""
print "Avg.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
avgQ = QT[tp]/QN[tp]
print "\t%.3f" % avgQ,
print ""
print "Max.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%.3f" % QH[tp],
print ""
print "Min.Q.",
for tp in QT.keys() :
if QN[tp] > 0 :
print "\t%.3f" % QL[tp],
print ""
print ""
#return Q__
return totQ/totN
def QStatsProt ( mol, dmap, chainId ) :
SetBBAts ( mol )
ress = []
for r in mol.residues :
if r.id.chainId == chainId and r.isProt :
ress.append ( r )
if len(ress) == 0 :
print "QstatsProt - no protein residues in chain %s" % chainId
return
sByType = {}
rByType = {}
def addType (tp, r, score) :
if not tp in sByType :
rByType[tp] = []
sByType[tp] = []
rByType[tp].append ( [score, r] )
sByType[tp].append ( [score] )
for r in ress :
if r.isProt and r.type == "LEU" :
avg = (r.atomsMap["CD1"][0].Q + r.atomsMap["CD2"][0].Q)/2.0
addType ( "LEU(CD)", r, avg )
if r.isProt and r.type == "LEU" and r.id.position==114 :
avg = (r.atomsMap["CD1"][0].Q + r.atomsMap["CD2"][0].Q)/2.0
addType ( "LEU_114(CD)", r, avg )
if r.isProt and r.type == "VAL" :
avg = (r.atomsMap["CG1"][0].Q + r.atomsMap["CG2"][0].Q)/2.0
addType ( "VAL(CG)", r, avg )
if r.isProt and r.type == "VAL" and r.id.position==33 :
avg = (r.atomsMap["CG1"][0].Q + r.atomsMap["CG2"][0].Q)/2.0
addType ( "VAL_33(CG)", r, avg )
if r.isProt and r.type == "ARG" :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG(NH)", r, avg )
if r.isProt and r.type == "ARG" and r.id.position==76 :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG_76(NH)", r, avg )
if r.isProt and r.type == "ARG" and r.id.position==9 :
avg = (r.atomsMap["NH1"][0].Q + r.atomsMap["NH2"][0].Q)/2.0
addType ( "ARG_9(NH)", r, avg )
if r.isProt and r.type == "LYS" :
avg = r.atomsMap["NZ"][0].Q
addType ( "LYS(NZ)", r, avg )
if r.isProt and r.type == "ASP" :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==42 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_42(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==131 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_131(OD)", r, avg )
if r.isProt and r.type == "ASP" and r.id.position==171 :
avg = (r.atomsMap["OD1"][0].Q + r.atomsMap["OD2"][0].Q)/2.0
addType ( "ASP_171(OD)", r, avg )
if r.isProt and r.type == "GLU" :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==17 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_17(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==27 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_27(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==67 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_67(OE)", r, avg )
if r.isProt and r.type == "GLU" and r.id.position==134 :
avg = (r.atomsMap["OE1"][0].Q + r.atomsMap["OE2"][0].Q)/2.0
addType ( "GLU_134(OE)", r, avg )
if r.isProt or r.isNA :
if r.scQ :
addType ( r.type, r, r.scQ )
else :
addType ( r.type, r, r.Q )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype, numpy.std (ra)] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
# sort by avg score
#avgs.sort ( reverse=True, key=lambda x: x[0] )
# sort by residue type
avgs.sort ( reverse=False, key=lambda x: x[1] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + ".txt"
print " - scores to: " + foname
fp = open (foname,"w")
for avgScore, rtype, sdev in avgs :
rscores = rByType[rtype]
if len(rscores) > 0 :
rscores.sort ( reverse=True, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[R.type]
elif R.isNA : rts = nucleic3to1[R.type]
print "%s\t%s\t%d\t%f\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore) )
fp.close()
def QStatsRNA ( mol, dmap, chainId ) :
SetBBAts ( mol )
ress = []
for r in mol.residues :
if r.id.chainId == chainId and r.isNA :
ress.append ( r )
if len(ress) == 0 :
print "Qstats RNA - no RNA residues found in chain %s" % chainId
return
print ""
print "RNA stats for chain %s" % chainId
print ""
sByType = {}
rByType = {}
def addType (tp, r, score) :
if not tp in sByType :
rByType[tp] = []
sByType[tp] = []
rByType[tp].append ( [score, r] )
sByType[tp].append ( [score] )
scAts = []
bbAts = []
allAts = []
for r in ress :
if r.isNA :
avg = numpy.average ( [at.Q for at in r.scAtoms] )
#addType ( nucleic3to1[r.type] + "_SC", r, avg )
addType ( r.type + "_SC", r, avg )
avg = numpy.average ( [at.Q for at in r.bbAtoms] )
#addType ( nucleic3to1[r.type] + "_BB", r, avg )
addType ( r.type + "_BB", r, avg )
scAts.extend ( r.scAtoms )
bbAts.extend ( r.bbAtoms )
allAts.extend ( [at for at in r.atoms if at.element.name != "H"] )
avgQ = numpy.average ( [at.Q for at in allAts] )
avgQbb = numpy.average ( [at.Q for at in bbAts] )
avgQsc = numpy.average ( [at.Q for at in scAts] )
sQ = numpy.std ( [at.Q for at in allAts] )
sQbb = numpy.std ( [at.Q for at in bbAts] )
sQsc = numpy.std ( [at.Q for at in scAts] )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype, numpy.std (ra)] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
# sort by avg score
#avgs.sort ( reverse=True, key=lambda x: x[0] )
# sort by residue type
avgs.sort ( reverse=False, key=lambda x: x[1] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + "_rscores.txt"
print " - scores to: " + foname
fp = open (foname,"w")
print ""
print "Map\tModel\tQ_All\tQ_Backbone\tQ_SideChain\tStdQ_All\tStdQ_Backbone\tStdQ_SideChain"
print "%s\t%s\t%f\t%f\t%f\t%f\t%f\t%f" % (mapName, molName, avgQ, avgQbb, avgQsc, sQ, sQbb, sQsc)
print ""
fp.write ( "\n" )
fp.write ( "Map\tModel\tQ_All\tQ_Backbone\tQ_SideChain\tStdQ_All\tStdQ_Backbone\tStdQ_SideChain\n" )
fp.write ( "%s\t%s\t%f\t%f\t%f\t%f\t%f\t%f" % (mapName, molName, avgQ, avgQbb, avgQsc, sQ, sQbb, sQsc) )
fp.write ( "\n\n" )
fp.write ( "Type\Mol\t#\tAvg.Q.\tSDev\tPos\tChain\tMaxQ\n" )
print "RType\tResidue\t#\tAvg.Q.\tSDev\tPos\tChain\tMaxQ"
print ""
for avgScore, rtype, sdev in avgs :
rscores = rByType[rtype]
if len(rscores) > 0 :
rscores.sort ( reverse=True, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[R.type]
elif R.isNA : rts = nucleic3to1[R.type]
print "%s\t%s\t%d\t%f\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore) )
fp.close()
# expected Q-scores given the resolution of a map
# - sigma=0.6, for resolutions 1.5 and lower
# - sigma=0.4, for resolution higher than 1.5
def eQ_protein (RES, sigma=0.6) :
if abs(sigma-0.6) < 1e-5 :
return -0.1775 * RES + 1.1192, "-0.1775 * RES + 1.1192"
elif abs(sigma-0.4) < 1e-5 :
return -0.1866 * RES + 1.1242, "-0.1866 * RES + 1.1242"
else :
return None, "no eqn for protein with sigma=%.2f" % sigma
def eQ_nucleic (RES, sigma=0.6) :
if abs(sigma-0.6) < 1e-5 :
return -0.1377 * RES + 0.9973, "-0.1377 * RES + 0.9973"
elif abs(sigma-0.4) < 1e-5 :
return -0.1465 * RES + 0.9436, "-0.1465 * RES + 0.9436"
else :
return None, "no eqn for nucleic with sigma=%.2f" % sigma
def eQ_ion (RES, sigma=0.6) :
if abs(sigma-0.6) < 1e-5 :
return -0.1103 * RES + 1.0795, "-0.1103 * RES + 1.0795"
elif abs(sigma-0.4) < 1e-5 :
return -0.1103 * RES + 1.0795, "-0.1103 * RES + 1.0795"
else :
return None, "no eqn for ion with sigma=%.2f" % sigma
def eQ_water ( RES, sigma=0.6) :
if abs(sigma-0.6) < 1e-5 :
return -0.0895 * RES + 1.0001, "-0.0895 * RES + 1.0001"
elif abs(sigma-0.4) < 1e-5 :
return -0.0895 * RES + 1.0001, "-0.0895 * RES + 1.0001"
else :
return None, "no eqn for water with sigma=%.2f" % sigma
def SaveQStats ( mol, chainId, dmap, sigma, RES=3.0 ) :
if chainId == None :
chainId = "All"
cres = {}
for r in mol.residues :
if r.id.chainId == chainId or chainId == "All" :
if r.id.chainId in cres :
cres[r.id.chainId].append ( [r.id.position, r] )
else :
cres[r.id.chainId] = [ [r.id.position, r] ]
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
nname = molPath + "__Q__" + mapName + "_" + chainId + ".txt"
#nname = molPath + "__Q__" + mapName + "_" + cid + ".txt"
print ""
print "Saving per-chain & per-residue Q-scores:"
print " -> res=", RES
print " -> file:", nname
print " -> chain:", chainId
fp = open (nname, "w")
fp.write ( "\n" )
fp.write ( "Map: %s\n" % dmap.name )
fp.write ( "Resolution entered (RES): %g\n" % RES )
fp.write ( "Model: %s\n" % mol.name )
fp.write ( "Sigma: %g\n" % sigma )
fp.write ( "\n" )
avgQrna, eq_nucleic = eQ_nucleic(RES, sigma)
avgQprot, eq_protein = eQ_protein(RES, sigma)
avgQIon, eq_ion = eQ_ion(RES, sigma)
avgQWater, eq_water = eQ_water(RES, sigma)
fp.write ( "Protein: expectedQ = %s\n" % eq_protein )
fp.write ( "Nucleic: expectedQ = %s\n" % eq_nucleic )
fp.write ( "Ion: expectedQ = %s\n" % eq_ion )
fp.write ( "Water: expectedQ = %s\n" % eq_water )
fp.write ( "\n" )
fp.write ( "Chain\tType\t# residues\tAvg. Q\tExpectedQ@%.2f\tEst.Res.\n" % RES )
chains = cres.keys()
chains.sort()
for cid in chains :
ress = cres[cid]
type_ats = {}
type_ress = {}
resAtoms = []
for ri, r in ress :
tp = ""
if r.isProt : tp = "Protein"
elif r.isNA : tp = "Nucleic"
elif r.type.upper() in chargedIons : tp = "Ion"
elif r.type.upper() == "HOH" : tp = "Water"
else : tp = r.type
if tp in type_ats : type_ats[tp].extend (r.atoms)
else : type_ats[tp] = r.atoms[:]
if tp in type_ress : type_ress[tp].append ( r )
else : type_ress[tp] = [r]
for rtype, atoms in type_ats.iteritems() :
qs = [at.Q for at in atoms if (at.element.name != "H" and hasattr(at,'Q'))]
if len(qs) == 0 :
continue
avgQ = numpy.average ( qs )
numR = len ( type_ress[rtype] )
formula, estRes = None, None
if "Protein" in rtype :
formula = "=" + eQ_protein(RES,sigma)[1].replace ("RES",'%.2f') % RES
estRes = (avgQ - 1.1192) / -0.1775
elif "Nucleic" in rtype :
formula ="=" + eQ_nucleic(RES,sigma)[1].replace ("RES",'%.2f') % RES
estRes = (avgQ - 0.9973) / -0.1377
elif "Ion" in rtype :
formula = "=" + eQ_ion(RES,sigma)[1].replace ("RES",'%.2f') % RES
estRes = (avgQ - 1.0795) / -0.1103
elif "Water" in rtype :
formula ="=" + eQ_water(RES,sigma)[1].replace ("RES",'%.2f') % RES
estRes = (avgQ - 1.0001) / -0.0895
else :
formula = "?"
estRes = 0.0
fp.write ( "%s\t%s\t%d\t%.2f\t%s\t%.2f\n" % (cid, rtype, numR, avgQ, formula, estRes) )
#print " - cid: %s - %s - %.2f" % (cid, ctypes, cQ)
fp.write ( "\n" )
for cid in cres.keys () :
rs = cres[cid]
rs.sort()
r = rs[0][1]
if r.isProt :
fp.write ( "Protein - Chain %s\t\t\t\t\t\t\t\tAverage over 3 residues\t\t\t\t\tAverage over 5 residues\t\t\t\t\tAverage over 7 residues\t\t\t\t\tAverage over 11 residues\n\n" % cid )
fp.write ( "Chain\tRes\tRes #\tQ_backBone\tQ_sideChain\tQ_residue\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sideChain\tQ_residue\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sideChain\tQ_residue\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sideChain\tQ_residue\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sideChain\tQ_residue\tExpectedQ@%.2f\t\n" % RES )
elif r.isNA :
fp.write ( "Nucleic Acid - Chain %s\t\t\t\t\t\t\t\t\tAverage over 3 nucleotides\t\t\t\t\t\tAverage over 5 nucleotides\t\t\t\t\t\tAverage over 7 nucleotides\t\t\t\t\t\tAverage over 11 nucleotides\n\n" % cid )
fp.write ( "Chain\tRes\tRes #\tQ_backBone\tQ_sugar\tQ_base\tQ_nucleotide\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sugar\tQ_base\tQ_nucleotide\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sugar\tQ_base\tQ_nucleotide\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sugar\tQ_base\tQ_nucleotide\tExpectedQ@%.2f\t\t" % RES )
fp.write ( "Q_backBone\tQ_sugar\tQ_base\tQ_nucleotide\tExpectedQ@%.2f\t\n" % RES )
else :
fp.write ( "Molecule - Chain %s\n\n" % cid )
fp.write ( "Chain\tMolecule\tMol #\t\t\tQ_molecule\tExpectedQ@%.2f\n" % RES )
ress = []
Qs, AV, CC = [], [], []
for ri, r in rs :
#if not r.isProt and not r.isNA :
# print " - cid: %s - r %d - not prot or RNA" % (cid, r.id.position)
# continue
ress.append (r)
qs = [at.Q for at in r.atoms if (at.element.name != "H" and hasattr(at,'Q'))]
if len(qs) == 0 :
continue
r.Q = numpy.average ( qs )
r.qBB, r.qSC, r.qSugar = 0, 0, 0
if len(r.bbAtoms) > 0 :
r.qBB = numpy.average ( [at.Q for at in r.bbAtoms if at.element.name != "H"] )
if len(r.scAtoms) > 0 :
r.qSC = numpy.average ( [at.Q for at in r.scAtoms if at.element.name != "H"] )
if len(r.sugarAtoms) > 0 :
r.qSugar = numpy.average ( [at.Q for at in r.sugarAtoms if at.element.name != "H"] )
Qs.append ( [r.qBB, r.qSC, r.Q, r.qSugar] )
if 0 :
ad = avgdAts ( r.atoms, dmap )
aSC, aBB = 0, 0
if len(r.scAtoms) > 0 :
aSC = avgdAts ( r.scAtoms, dmap )
if len(r.bbAtoms) > 0 :
aBB = avgdAts ( r.bbAtoms, dmap )
AV.append ( [ad, aBB, aSC] )
if 0 :
cc, ccm = ccAts ( r.atoms, dmap, RES )
ccSC, ccmSC = ccAts ( r.scAtoms, dmap, RES )
ccBB, ccmBB = ccAts ( r.bbAtoms, dmap, RES )
CC.append ( [cc, ccBB, ccSC] )
#CC.append ( [ccm, ccmBB, ccmSugar, ccmBase] )
# averages items in a list over N items before and after
def N ( A, i, ind, N ) :
#for i, a in enumerate ( A ) :
sum, n = 0, 0
for j in range ( i-N, i+N+1 ) :
if j >= 0 and j < len(A) :
sum += A[j][ind]
n += 1.0
return sum/n
last_i = None
for i, r in enumerate ( ress ) :
if not hasattr ( r, 'Q' ) or not hasattr (r, 'qBB') :
continue
# fills in missing residues in proteins and rna
if (r.isNA or r.isProt) and last_i != None :
ii = last_i+1
while ii < r.id.position :
# fill gaps
if r.isNA :
fp.write ( "%s\t%s\t%d\t" % (r.id.chainId, "", ii ) )
fp.write ( "\t\t\t\t%f\t\t" % (avgQrna ) )
fp.write ( "\t\t\t\t%f\t\t" % (avgQrna) )
fp.write ( "\t\t\t\t%f\t\t" % (avgQrna) )
fp.write ( "\t\t\t\t%f\t\t" % (avgQrna) )
fp.write ( "\t\t\t\t%f\n" % (avgQrna) )
else :
avgQ = avgQrna if r.isNA else avgQprot
fp.write ( "%s\t%s\t%d\t\t\t\t%f\t\t" % (r.id.chainId, "", ii, avgQprot ) )
fp.write ( "\t\t\t%f\t\t" % (avgQprot) )
fp.write ( "\t\t\t%f\t\t" % (avgQprot) )
fp.write ( "\t\t\t%f\t\t" % (avgQprot) )
fp.write ( "\t\t\t%f\n" % (avgQprot) )
ii += 1
if r.isNA :
fp.write ( "%s\t%s\t%d\t" % (r.id.chainId, r.type, r.id.position) )
fp.write ( "%f\t%f\t%f\t%f\t%f\t\t" % (r.qBB, r.qSugar, r.qSC, r.Q, avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,1), N(Qs,i,3,1), N(Qs,i,1,1), N(Qs,i,2,1), avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,2), N(Qs,i,3,2), N(Qs,i,1,2), N(Qs,i,2,2), avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,3), N(Qs,i,3,3), N(Qs,i,1,3), N(Qs,i,2,3), avgQrna ) )
fp.write ( "%f\t%f\t%f\t%f\t%f\n" % (N(Qs,i,0,5), N(Qs,i,3,5), N(Qs,i,1,5), N(Qs,i,2,5), avgQrna ) )
elif r.isProt :
if len(r.scAtoms) > 0 :
fp.write ( "%s\t%s\t%d\t%f\t%f\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.qBB, r.qSC, r.Q, avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,1), N(Qs,i,1,1), N(Qs,i,2,1), avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,2), N(Qs,i,1,2), N(Qs,i,2,2), avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\t\t" % (N(Qs,i,0,3), N(Qs,i,1,3), N(Qs,i,2,3), avgQprot ) )
fp.write ( "%f\t%f\t%f\t%f\n" % (N(Qs,i,0,5), N(Qs,i,1,5), N(Qs,i,2,5), avgQprot ) )
else :
fp.write ( "%s\t%s\t%d\t%f\t\t%f\t%f\t\t" % (r.id.chainId, r.type, r.id.position, r.qBB, r.Q, avgQprot ) )
fp.write ( "%f\t\t%f\t%f\t\t" % (N(Qs,i,0,1), N(Qs,i,2,1), avgQprot ) )
fp.write ( "%f\t\t%f\t%f\t\t" % (N(Qs,i,0,2), N(Qs,i,2,2), avgQprot ) )
fp.write ( "%f\t\t%f\t%f\t\t" % (N(Qs,i,0,3), N(Qs,i,2,3), avgQprot ) )
fp.write ( "%f\t\t%f\t%f\n" % (N(Qs,i,0,5), N(Qs,i,2,5), avgQprot ) )
elif r.type.upper() in chargedIons :
fp.write ( "%s\t%s\t%d\t\t\t%f\t%f\n" % (r.id.chainId, r.type, r.id.position, r.Q, avgQIon ) )
elif r.type.upper() == "HOH" :
fp.write ( "%s\t%s\t%d\t\t\t%f\t%f\n" % (r.id.chainId, r.type, r.id.position, r.Q, avgQWater ) )
else :
fp.write ( "%s\t%s\t%d\t\t\t%f\t?\n" % (r.id.chainId, r.type, r.id.position, r.Q ) )
last_i = r.id.position
fp.write ( "\n\n" )
fp.close()
print ""
def CalcRadZ ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :
print "Rad-Z Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if not useOld :
ress.append ( r )
elif not hasattr (r, 'scS' ) :
ress.append ( r )
print " - residues to do: %d" % len(ress)
for ri, r in enumerate ( ress ) :
r.scZ = RadZ ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )
r.bbZ = RadZ ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )
if log and ri % 10 == 0 :
status ( "Calculating - res %d/%d" % (ri, len(ress)) )
print ".",
scoresBB, scoresSC = [], []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if r.bbZ != None :
scoresBB.append ( r.bbZ )
if r.scZ != None :
scoresSC.append ( r.scZ )
print " - avg radz - side chain %.1f, backbone %.1f" % (numpy.average(scoresSC), numpy.average(scoresBB) )
return numpy.average(scoresBB), numpy.average(scoresSC)
def qwork (num, ress, dmap, allAtTree, log):
print 'qwork %d - %d res, %d - %d' % (num, len(ress), ress[0].id.position, ress[-1].id.position)
for ri, r in enumerate ( ress ) :
r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
if num == 0 and log :
status ( "Calculating Q scores - %d/%d" % (ri, len(ress)) )
print ".",
def CalcSigma ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :
print "Sigma Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
if not useOld :
ress.append ( r )
elif not hasattr (r, 'scS' ) :
ress.append ( r )
print " - residues to do: %d" % len(ress)
if 0 :
import multiprocessing, threading
N = 4 # multiprocessing.cpu_count()
print " - cores: %d" % N
dn = len(ress) / N
threads = []
for i in range(N):
l = i * dn
h = (i+1)*dn if i != N-1 else len(ress)
#print "t %d, %d-%d" % (i, l, h)
#t = threading.Thread(target=qwork, args=(i,ress[l:h], dmap, allAtTree))
#threads.append(t)
#t.start()
#t = threading.Thread(name='d%d'%i, target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))
#t.setDaemon(True)
#t.start()
#threads.append(t)
#print __name__
if 1 or __name__ == '__main__':
p = ctx.Process(target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))
p.start()
threads.append(p)
for i, t in enumerate(threads) :
print "j %d" % (i)
t.join()
else :
for ri, r in enumerate ( ress ) :
r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )
if log and ri % 10 == 0 :
status ( "Calculating - res %d/%d" % (ri, len(ress)) )
print ".",
scoresBB, scoresSC = [], []
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
if r.bbZ != None : scoresBB.append ( r.bbZ )
if r.scZ != None : scoresSC.append ( r.scZ )
#sc = [x for x in scores if x is not None]
#scSC = [1.0/x for x in scoresSC if x is not None]
#scBB = [1.0/x for x in scoresBB if x is not None]
#print " - %d res, SC min %.2f max %.2f, avg %.2f" % (len(ress), min(scSC), max(scSC), numpy.average(scSC))
print " - avg sigma - side chain %.1f, backbone %.1f" % (numpy.average(scoresSC), numpy.average(scoresBB) )
if 0 :
sByType = {}
rByType = {}
for r in ress :
if r.scZ != None :
if not r.type in sByType :
rByType[r.type] = []
sByType[r.type] = []
rByType[r.type].append ( [r.scZ, r] )
sByType[r.type].append ( [r.scZ] )
avgs = []
for rtype, ra in sByType.iteritems () :
avgs.append ( [numpy.average (ra), rtype] )
from chimera.resCode import protein3to1
from chimera.resCode import nucleic3to1
avgs.sort ( reverse=True, key=lambda x: x[0] )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
mdir, mpfile = os.path.split(dmap.data.path)
foname = mdir + "/" + mapName + "__" + molName + ".txt"
print " - scores to: " + foname
fp = open (foname,"w")
for avgScore, rtype in avgs :
rscores = rByType[rtype]
rscores.sort ( reverse=False, key=lambda x: x[0] )
hr = rscores[0]
R = hr[1]
highestScore = hr[0]
numRes = len(rscores)
rts = ""
if R.isProt : rts = protein3to1[rtype]
else : rts = nucleic3to1[rtype]
print "%s\t%s\t%d\t%f\t%d\t.%s\t%f" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore)
fp.write ( "%s\t%s\t%d\t%f\t%d\t.%s\t%f\n" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore) )
fp.close()
return numpy.average(scoresBB), numpy.average(scoresSC)
def CalcResQ (r, dmap=None, sigma=0.6, allAtTree=None, numPts=8, toRAD=2.0, dRAD=0.1, minD=0.0, maxD=1.0, useOld=False ) :
scQ, bbQ, Q, numSC, numBB = 0.0, 0.0, 0.0, 0.0, 0.0
for at in r.atoms :
if at.element.name == "H" :
continue
if not hasattr ( at, 'isBB' ) :
SetBBAts ( at.molecule )
if hasattr (at, 'Q') :
Q += at.Q
if r.isProt or r.isNA :
if at.isBB :
bbQ += at.Q
numBB += 1.0
else :
scQ += at.Q
numSC += 1.0
if r.isProt or r.isNA :
if numSC > 0 :
r.scQ = scQ / numSC
else :
r.scQ = None
if numBB > 0 :
r.bbQ = bbQ / numBB
else :
r.bbQ = None
r.Q = Q / float ( len(r.atoms) )
def CalcQ_ ( mol, cid, dmap, sigma=0.5, allAtTree=None, useOld=False, log=False ) :
print "Q Scores - in parallel"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
ress = []
for r in mol.residues :
if cid == None or r.id.chainId == cid :
ress.append ( r )
print " - residues to do: %d" % len(ress)
import multiprocessing
threads = multiprocessing.cpu_count() / 2
print 'calc q using %d threads' % threads
# Avoid periodic Python context switching.
import sys
original_check_interval = sys.getcheckinterval()
sys.setcheckinterval(1000000000)
# Define thread class for fitting.
from threading import Thread
class Q_Thread(Thread):
def __init__(self, ress, ti):
Thread.__init__(self)
self.ress = ress
self.ti = ti
def run(self):
print "run - %d - %d" % (self.ti, len(ress))
for ri, r in enumerate ( self.ress ) :
#CalcResQ (r, dmap, sigma, allAtTree=allAtTree, numPts=2, toRAD=2.0, dRAD=0.2 )
#print "%d-%d/%d" % (ti,ri/len(self.ress)),
for at in r.atoms :
if at.element.name != "H" :
qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5 )
# Starts threads with each calculating an equal number of fits.
n = len(ress)
g = [ress[(n*c)/threads:(n*(c+1))/threads] for c in range(threads)]
threads = []
for mi, ml in enumerate(g) :
#print "%d - %d, %d-%d" % (mi,len(ml),ml[0].id.position,ml[-1].id.position)
t = Q_Thread(ml,mi)
threads.append(t)
for t in threads:
t.start()
print ""
# Wait for all threads to finish
for t in threads:
t.join()
# Restore periodic context switching.
sys.setcheckinterval(original_check_interval)
# Collect fit results from all threads.
#for t in threads:
# print "",
def CalcQ ( mol, cid, dmap, sigma, useOld=False, log=False ) :
print ""
print "Q Scores"
print " - map: %s" % dmap.name
print " - mol: %s, chain: %s" % (mol.name, cid if cid != None else "_all_")
print " - sigma: %.2f" % sigma
minD, maxD = MinMaxD ( dmap )
print " - mind: %.3f, maxd: %.3f" % (minD, maxD)
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - atoms tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
#allAtTree = None
atoms = []
import time
start = time.time()
#ress = []
for r in mol.residues :
if cid == None or cid == "All" or r.id.chainId == cid :
for at in r.atoms :
if not at.element.name == "H" :
atoms.append ( at )
print " - atoms to do: %d" % len(atoms)
#for ai, at in enumerate ( atoms[0:2] ) :
# qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
from chimera import tasks, CancelOperation
task = tasks.Task('Calculating Q-scores', modal = True)
SetBBAts ( mol )
modi = 100
if len(atoms) > 100000 :
modi = 1000
try :
for ai, at in enumerate ( atoms ) :
at.Q = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )
at.bfactor = at.Q
end = time.time()
totSec = end - start
leftTime = ""
leftSec = 0.0
iPerSec = float(ai) / totSec
if iPerSec > 0 :
leftSec = float ( len(atoms) - ai ) / iPerSec
leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )
leftSec = leftSec - leftHour * 60.0 * 60.0
leftMin = numpy.floor ( leftSec / 60.0 )
leftSec = leftSec - leftMin * 60.0
leftTime = "%.0f:%.0f:%.0f" % (leftHour, leftMin, leftSec)
if ai+1 == 100 :
if log :
print " - atom %d/%d - eta: %s" % (ai+1, len(atoms), leftTime)
elif (ai+1) % modi == 0 :
if log :
print " - atom %d/%d - eta: %s" % (ai+1, len(atoms), leftTime)
task.updateStatus( " - Q scores - atom %d/%d - eta: %s" % (ai+1, len(atoms), leftTime) )
except :
print " - something went wrong..."
return None
finally :
task.finished()
end = time.time()
print ""
print " - done, time: %f" % ( end-start )
totSec = end - start
totMin = numpy.floor ( totSec / 60.0 )
totSec = totSec - totMin * 60.0
print " - done, time: %.0f min, %.1f sec" % ( totMin, totSec )
SaveQFile ( mol, cid, dmap, sigma )
Qavg = QStats1 ( mol, cid )
return Qavg
def SaveQFile ( mol, cid, dmap, sigma ) :
if not hasattr ( mol, 'openedAs' ) :
print ""
print " >>> Could not save file with Q-scores - molecule was not opened from file?"
print ""
return
molPath, molExt = os.path.splitext(mol.openedAs[0])
mapName = os.path.splitext(dmap.name)[0]
if hasattr ( mol, 'cif' ) and molExt == '.cif' :
import mmcif
reload ( mmcif )
fout = molPath + "__Q__" + mapName + ".cif"
mmcif.WriteMol ( mol, fout )
else :
nname_ = molPath + "__Q__" + mapName + "_.pdb"
try :
chimera.PDBio().writePDBfile ( [mol], nname_ )
except :
print " - could not save Q-scores file"
return
nname = molPath + "__Q__" + mapName + ".pdb"
fpo = open ( nname, "w" )
fpi = open ( nname_ )
ver = ""
try :
from Segger.mapq import mapqVersion
ver = mapqVersion
print " ----1- version: %s" % mapqVersion
except :
pass
try :
from mapq.mapq import mapqVersion
ver = mapqVersion
print " ----2- version: %s" % mapqVersion
except :
pass
try :
from mapq import mapqVersion
ver = mapqVersion
print " ----3- version: %s" % mapqVersion
except :
pass
fpo.write ( "REMARK 0 \n" )
fpo.write ( "REMARK 0 Q-scores calculated with MapQ\n" )
fpo.write ( "REMARK 0 - sigma %.1f\n" % sigma )
fpo.write ( "REMARK 0 - more info: github.com/gregdp/mapq\n" )
fpo.write ( "REMARK 0 - Q-scores for each atom are stored in B-factor column\n" )
fpo.write ( "REMARK 0 - Model: %s\n" % mol.name )
if cid == None :
fpo.write ( "REMARK 0 - for all atoms\n" )
else :
fpo.write ( "REMARK 0 - for atoms in chain: %s\n" % cid )
fpo.write ( "REMARK 0 - (other atoms have original B-factor values)\n" )
fpo.write ( "REMARK 0 - Map: %s\n" % dmap.name )
fpo.write ( "REMARK 0 \n" )
for l in fpi :
fpo.write (l)
fpo.close()
fpi.close()
print " - saved %s with Q-scores" % nname
from os import remove
try :
remove(nname_)
except :
print " - could not remove %s" % nname_
def QsFromPdbFile ( mol, qfpath ) :
rids = {}
for r in mol.residues :
rids["%d.%s" % (r.id.position,r.id.chainId)] = r
# http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM
try :
fin = open ( qfpath, "r" )
except :
#print " - file not found"
return False
print " - Qs from file: %s" % qfpath
for line in fin :
if line[0:4] == "ATOM" or line[0:6] == "HETATM" :
aname, aloc, cid, resi, occ, bfac = line[12:16].strip(), line[16:17].strip(), line[21], int(line[22:26]), float ( line[54:60] ), float ( line[60:66] )
#if occ < 1.0 :
rid = "%s.%s" % (resi,cid)
if rid in rids :
r = rids[rid]
if aname in r.atomsMap :
ats = r.atomsMap[aname]
found = False
for at in ats :
if at.altLoc == aloc :
at.Q = bfac
at.bfactor = bfac
#at.bfactor = 100.0 * (1.0 - at.Q)
#dval = self.cur_dmap.interpolated_values ( [ at.coord() ], self.cur_mol.openState.xform ).astype(numpy.float64, copy=False)[0]
found = True
if not found :
#print " -xx- %s.%s - atom %s - loc %s" % (resi, cid, aname, aloc)
continue
else :
#print " -xx- %s.%s - atom %s" % (resi,cid, aname)
continue
fin.close ()
return True
def QsFromCifFile ( mol, qfpath ) :
print " - Qs from file: %s" % qfpath
from mmcif import ReadMol
qmol = ReadMol ( qfpath, log=False )
rids = {}
for r in qmol.residues :
rids["%d.%s" % (r.id.position,r.id.chainId)] = r
numNotFound, numQ, numNoQ = 0, 0, 0
for at in mol.atoms :
rid = "%d.%s" % (at.residue.id.position,at.residue.id.chainId)
if rid in rids :
qres = rids[rid]
if at.name in qres.atomsMap :
found = False
for qat in qres.atomsMap[at.name] :
#print "[%s] [%s]" % (at.altLoc, qat.altLoc)
if at.altLoc == qat.altLoc :
found = True
#print qat.Q
if hasattr ( qat, 'Q' ) :
at.Q = qat.Q
numQ += 1
else :
numNoQ += 1
if not found :
#print " -xx- %s.%s - atom %s - loc %s" % (resi, cid, aname, aloc)
continue
else :
#print " -xx- %s.%s - atom %s" % (resi,cid, aname)
numNotFound += 1
if numNotFound != 0 :
print " - %d/%d atoms not found in q-score file" % (numNotFound, len(mol.atoms))
print " - got Q-scores for %d/%d atoms - %d no Q" % (numQ, len(mol.atoms), numNoQ)
return True
def QScoreFileName ( mol, dmap ) :
molPath = os.path.splitext(mol.openedAs[0])[0]
mapName = os.path.splitext(dmap.name)[0]
qfpath = molPath + "__Q__" + mapName + ".pdb"
return qfpath
def AddSpherePts ( pts, clr, rad, mname = "RAD points" ) :
from chimera import elements, Coord, Atom, MolResId
ptsMol = GetMod ( mname )
res = None
if ptsMol == None:
from chimera import Molecule, openModels
ptsMol = Molecule()
ptsMol.name = mname
ptsMol.isRealMolecule = False
openModels.add ( [ptsMol], noprefs = True )
res = ptsMol.newResidue('marker', chimera.MolResId('1', 1) )
else :
res = ptsMol.residues[0]
for pt in pts :
a = ptsMol.newAtom('', elements.H)
res.addAtom(a)
a.setCoord ( chimera.Point(*pt) ) # ( chimera.Point(*xyz) )
a.radius = rad
a.drawMode = Atom.Sphere
a.color = chimera.MaterialColor ( *clr )
a.surfaceCategory = 'markers'
return ptsMol
def SpherePts ( ctr, rad, N ) :
thetas, phis = [], []
from math import acos, sin, cos, sqrt, pi
for k in range ( 1, N+1 ) :
h = -1.0 + ( 2.0*float(k-1)/float(N-1) )
phis.append ( acos(h) )
thetas.append ( 0 if k == 1 or k == N else
(thetas[k-2] + 3.6/sqrt(N*(1.0-h**2.0))) % (2*pi) )
pts = [None] * N
for i, theta, phi in zip ( range(N), thetas, phis ):
v = chimera.Vector (sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi))
#if numpy.abs ( v.length - 1.0 ) > 1e-3 :
# print "x"
pt = ctr + v * rad
pts[i] = pt
return pts
import threading
def Calc_ ( label="", res=0.0 ) :
print "Calc Q scores:", label
from VolumeViewer import Volume
vols = chimera.openModels.list(modelTypes = [Volume])
if len(vols) == 0 :
print " - no volumes loaded"
return
dmap = vols[0]
print " - dmap: %s" % dmap.name
print " - res: %s" % res
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mols = chimera.openModels.list(modelTypes = [Molecule])
if len(mols) == 0 :
print " - no molecules loaded"
return
mol = mols[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
ats = [at for at in mol.atoms if not at.element.name == "H"]
points = _multiscale.get_atom_coordinates ( ats, transformed = False )
print " - search tree: %d/%d ats" % ( len(ats), len(mol.atoms) )
#allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)
allAtTree = None
qs, dr, q, qcc, emr = 0,0,0,0,0
#bbRadZ, scRadZ, scRotaZ = 0,0,0
sigma = 0.4
cid = None
#cid = mol.residues[0].id.chainId
qs = CalcQp ( mol, cid, dmap, sigma=sigma, allAtTree=allAtTree, useOld=False )
print ""
print "Avg. Q scores:"
print ""
tps = qs.keys()
tps.sort()
for tp in tps :
print " - %s : %.2f" % (tp, qs[tp])
print ""
if 1 :
at = 30
fp = None
if os.path.isdir("/Users/greg/Dropbox/_mapsq") :
fp = open ( "/Users/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
elif os.path.isdir("/home/greg/Dropbox/_mapsq") :
fp = open ( "/home/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
elif os.path.isdir("C:/Users/greg/Dropbox/_mapsq") :
fp = open ( "C:/Users/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
else :
fp = open ( "scores%d_Q_allc_%s_sig%.0f.txt" % (at, label, sigma*100.0), "a" )
fp.write ( "%s\t%s\t%s" % (dmap.name, mol.name, res) )
for tp in tps :
fp.write ( "\t%s\t%.2f" % (tp, qs[tp]) )
fp.write ( "\n" )
#nProt = len ( [at for at in mol.atoms if at.residue.isProt == True] )
#nNA = len ( [at for at in mol.atoms if at.residue.isNA == True] )
#fp.write ( "%s\t%s\t%s\t%d\t%d\n" % (dmap.name, mol.name, res, nProt, nNA) )
fp.close ()
def emringer ( dmap, mol ) :
print "----- %s ____________ EMRINGER ____________ %s -----" % (dmap.name, mol.name)
cdir = os.getcwd()
print " - now in: ", cdir
#print " - splitting " + mol.openedAs[0]
mpath, mname = os.path.split ( mol.openedAs[0] )
dpath, dname = os.path.split ( dmap.data.path )
bs = os.path.splitext ( mol.openedAs[0] )[0]
print " - copying mol file... removes symmetry/connect stuff"
fin = open ( mol.openedAs[0], "r" )
fout = open ( bs + "_.pdb", "w" )
for line in fin :
if "ATOM" in line or "HETATM" in line :
fout.write ( line )
fin.close ()
fout.close ()
phPath = "/Users/greg/_mol/phenix-1.14-3260/build/bin/"
#phPath = "/Users/greg/_mol/phenix-1.15rc3-3435/build/bin/"
args = [phPath+'phenix.emringer', dmap.data.path, bs+"_.pdb" ]
print "running: ",
for arg in args : print arg,
print ""
outf = mpath + '/' + '_out.txt'
errf = mpath + '/' + '_err.txt'
fout = open ( outf, "w" )
ferr = open ( errf, "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)
p.wait()
fout.close()
ferr.close()
print " - getting score from " + outf
score = -100
fin = open ( outf )
for l in fin :
if "EMRinger Score:" in l :
s = l [ len("EMRinger Score:")+1 : ]
print "Score: ", s
score = float( s )
print " - found score: %.3f" % score
print " - removing ", bs + "_.pdb"
import shutil
try :
os.remove ( bs + "_.pdb" )
os.remove ( bs + "__emringer.pkl" )
os.remove ( bs + "__emringer.csv" )
shutil.rmtree ( bs + "__emringer_plots" )
print " - done"
except :
print " -- did not find"
return score
def refine ( dmap, mol, res ) :
print "----- %s ____________ REFINE ____________ %s -----" % (dmap.name, mol.name)
cdir = os.getcwd()
print " - now in: ", cdir
#print " - splitting " + mol.openedAs[0]
mpath, mname = os.path.split ( mol.openedAs[0] )
dpath, dname = os.path.split ( dmap.data.path )
bs = os.path.splitext ( mol.openedAs[0] )[0]
print " - copying mol file... removes symmetry/connect stuff"
fin = open ( mol.openedAs[0], "r" )
fout = open ( bs + "_.pdb", "w" )
for line in fin :
if "ATOM" in line or "HETATM" in line :
fout.write ( line )
fin.close ()
fout.close ()
phPath = "/Users/greg/_mol/phenix-1.14-3260/build/bin/"
phPath = "/Users/greg/_mol/phenix-1.15rc3-3435/build/bin/"
args = [phPath+'phenix.real_space_refine', dmap.data.path, bs+"_.pdb", "resolution=%.1f"%res ]
print "running: ",
for arg in args : print arg,
print ""
outf = mpath + '/' + '_out.txt'
errf = mpath + '/' + '_err.txt'
fout = open ( outf, "w" )
ferr = open ( errf, "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)
p.wait()
fout.close()
ferr.close()
print " - getting score from " + outf
score = -100
fin = open ( outf )
for l in fin :
if "EMRinger Score:" in l :
s = l [ len("EMRinger Score:")+1 : ]
print "Score: ", s
score = float( s )
print " - found score: %.3f" % score
print " - removing ", bs + "_.pdb"
import shutil
try :
os.remove ( bs + "_.pdb" )
os.remove ( bs + "__emringer.pkl" )
os.remove ( bs + "__emringer.csv" )
shutil.rmtree ( bs + "__emringer_plots" )
print " - done"
except :
print " -- did not find"
return score
def refdir ( rdir ) :
print "Refining in", rdir
def CalcR_ ( label = "" ) :
print "Calc all scores -", label
from VolumeViewer import Volume
dmap = chimera.openModels.list(modelTypes = [Volume])[0]
print " - dmap: %s" % dmap.name
#fp = open ( "/Users/greg/_data/_mapsq/scores.txt", "a" )
#fp.write ( "%s...\n" % dmap.name.split("_")[0] )
#fp.close ()
from chimera import Molecule
mol = chimera.openModels.list(modelTypes = [Molecule])[0]
print " - mol: %s" % mol.name
SetBBAts ( mol )
mapName = os.path.splitext(dmap.name)[0]
molName = os.path.splitext(mol.name)[0]
ddir, dfile = os.path.split(dmap.data.path)
molFile = mol.openedAs[0]
mdir, mfile = os.path.split(molFile)
print "PhFmap -- " + molFile
RES = 3.0
print " -- res %.1f -- " % RES
outFile = molFile + "_r%.0f" % RES + "_fmodel.ccp4"
if not os.path.isfile ( outFile ) :
phPath = "/usr/local/phenix-1.14-3260/build/bin/"
args = [phPath+'phenix.fmodel', "high_resolution=%.1f"%RES, "scattering_table=electron", "generate_fake_p1_symmetry=True", molFile ]
print "running: ",
for arg in args : print arg,
print ""
fout = open ( mdir + '/' + '_0_fmodel.log', "w" )
import subprocess
p = subprocess.Popen(args, stdout=fout, cwd=mdir)
p.wait()
fout.close()
print ""
args = [phPath+'phenix.mtz2map', "high_resolution=%.1f"%RES, "include_fmodel=true", "scattering_table=electron", molFile, molFile + ".mtz" ]
print "running: ",
for arg in args : print arg,
print ""
fout = open ( mdir + '/' + '_1_mtz2map.log', "w" )
p = subprocess.Popen(args, stdout=fout, cwd=mdir)
p.wait()
fout.close()
print " - renaming to:", outFile
os.rename( molFile + "_fmodel.ccp4", outFile )
os.remove( molFile + ".mtz" )
print " - loading map:", outFile
dm = chimera.openModels.open ( outFile )[0]
molg = MyMolMapX ( mol, mol.atoms, RES, dmap.data.step[0], chimera.Xform.identity() )
fpoints, fpoint_weights = fit_points_g ( molg, 0.1 )
map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )
mmolap, mmcorr1, mmcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
print "Molmap - olap: %f, CC: %f, CCm: %f" % (mmolap, mmcorr1, mmcorr2)
fpoints, fpoint_weights = fit_points_g ( dm.data, 5.0 )
map_values = dmap.interpolated_values ( fpoints, dm.openState.xform )
olap, phcorr1, phcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
print "Phenix - olap: %f, CC: %f, CCm: %f" % (olap, phcorr1, phcorr2)
#fpoints, fpoint_weights = fit_points_g ( dmap.data, -1e6 )
#map_values = dm.interpolated_values ( fpoints, dmap.openState.xform )
#olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )
#print "Phenix - olap: %f, CC: %f, CCm: %f" % (olap, corr1, corr2)
print "%f\t%f\t%f\t%f" % (mmcorr1, mmcorr2, phcorr1, phcorr2)
fp = open ( "/Users/greg/Dropbox/_mapsq/scores3_R_%s.txt" % label, "a" )
fp.write ( "%s\t%f\t%f\t%f\t%f\n" % (dmap.name.split("_")[0], mmcorr1, mmcorr2, phcorr1, phcorr2) )
fp.close ()
#[ 0.04964269]
#[ 0.08007674]
#[ 0.08772154]
#[ 0.06052513]
#[ 0.05444193]
#[ 0.05091212]
#[ 0.04454869]
#[ 0.03272544]
#[ 0.036254]
#[ 0.02918004]
def MaskMapResize ( atoms, bound, dmap, fout=None ) :
import _multiscale
import _contour
import _volume
from _contour import affine_transform_vertices as transform_vertices
from VolumeData import grid_indices, zone_masked_grid_data, interpolate_volume_data
points = _multiscale.get_atom_coordinates ( atoms, transformed = True )
#print " %d points" % len(points)
fpoints = points
if 0 :
_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, R )
#mdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = "atom masked" )
mat = mdata.full_matrix()
threshold = 1e-3
points = _volume.high_indices(mat, threshold)
fpoints = points.astype(numpy.single)
fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]
#print " %d points" % len(points)
nz = numpy.nonzero( fpoint_weights )[0]
#print " %d pts nonzero" % len(nz)
if len(nz) > 0 and len(nz) < len (fpoint_weights) :
fpoints = numpy.take( fpoints, nz, axis=0 )
fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)
else :
_contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#transform_vertices( fpoints, dmap.data.ijk_to_xyz_transform )
transform_vertices( fpoints, dmap.data.xyz_to_ijk_transform )
#print " - %s mask %d atoms, %d nonzero points" % ( dmap.name, len(atoms), len(nz) )
#transform_vertices( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )
#transform_vertices( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )
#transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )
#bound = 10
li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)
hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)
n1 = hi - li + 1
n2 = hj - lj + 1
n3 = hk - lk + 1
#print " - bounds - %d %d %d --> %d %d %d --> %d %d %d" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )
#nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )
#dmat = dmap.full_matrix()
nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )
nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )
nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )
nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )
O = dmap.data.origin
#print " - %s origin:" % dmap.name, O
nO = ( O[0] + float(li) * dmap.data.step[0],
O[1] + float(lj) * dmap.data.step[1],
O[2] + float(lk) * dmap.data.step[2] )
#print " - new map origin:", nO
ox = round ( nO[0]/dmap.data.step[0] ) * dmap.data.step[0]
oy = round ( nO[1]/dmap.data.step[1] ) * dmap.data.step[1]
oz = round ( nO[2]/dmap.data.step[2] ) * dmap.data.step[2]
nO = ( ox, oy, oz )
#print " - new map origin:", nO
nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
npoints = grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices
transform_vertices ( npoints, ndata.ijk_to_xyz_transform )
# todo - don't interpolate
dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )
#dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )
#nze = numpy.nonzero ( dvals )
nmat = dvals.reshape( (nn3,nn2,nn1) )
ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )
if fout == None :
try : nv = VolumeViewer.volume.add_data_set ( ndata, None )
except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )
dmap_base = os.path.splitext(dmap.name)[0]
dmap_path = os.path.splitext (dmap.data.path)[0]
nv.name = dmap_base + "_masked"
nv.openState.xform = dmap.openState.xform
return nv
else :
from VolumeData import save_grid_data
#d = self.grid_data()
format = save_grid_data(ndata, fout, None, {}, False)
#print " - saved data"
def SetBBAts ( mol ) :
#if hasattr ( mol, "bbats" ) :
# return
#mol.bbats = True
#print " - setting bbAts in %s" % mol.name
for r in mol.residues :
#r.isProt = "C" in r.atomsMap and "CA" in r.atomsMap and "N" in r.atomsMap
#r.isProt = "CA" in r.atomsMap
#r.isNA = "O3'" in r.atomsMap and "O5'" in r.atomsMap
from chimera.resCode import nucleic3to1
from chimera.resCode import protein3to1
protein3to1['HSD'] = protein3to1['HIS']
protein3to1['HSE'] = protein3to1['HIS']
r.isProt = r.type in protein3to1
r.isNA = r.type in nucleic3to1
r.score1 = None
r.score2 = None
if r.isProt :
r.rtype = "prot"
elif r.isNA :
r.rtype = "na"
else :
r.rtype = "?"
if r.isNA :
try :
if nucleic3to1[r.type] == "G" :
r.baseAt = r.atomsMap["N9"][0]
elif nucleic3to1[r.type] == "C" :
r.baseAt = r.atomsMap["N1"][0]
elif nucleic3to1[r.type] == "A" :
r.baseAt = r.atomsMap["N9"][0]
elif nucleic3to1[r.type] == "U" :
r.baseAt = r.atomsMap["N1"][0]
except :
#print " - baseAt not found - "
pass
r.bbAtoms = []
r.scAtoms = []
r.sugarAtoms = []
if r.isProt :
for a in r.atoms :
if a.element.name == "H" :
a.isBB, a.isSC = False, False
continue
n = a.name
a.isBB = n=="C" or n=="CA" or n=="O" or n=="N" or n=="OT1" or n=="OT2"
a.isSC = not a.isBB
if a.isBB :
r.bbAtoms.append ( a )
else :
r.scAtoms.append ( a )
a.isSugar, a.isBase = False, False
elif r.isNA :
for a in r.atoms :
if a.element.name == "H" :
a.isBB, a.isSC = False, False
continue
n = a.name
a.isBB = n=="P" or n=="O1P" or n=="O2P" or n=="OP1" or n=="OP2" or n=="O5'" or n=="C5'" or n=="O3'"
a.isSugar = n=="C1'" or n=="C2'" or n=="O4'" or n=="O2'" or n=="C3'" or n=="C4'"
#a.isBB = a.isBB or a.isSugar
a.isBase = not a.isBB and not a.isSugar
a.isSC = a.isBase
#if nucleic3to1[r.type] == "G" : a.isBase = n=="N9" or n=="C8" or n=="N7" or n=="C5" or n=="C4" or n=="C6" or n=="O6" or n=="N1" or n=="C2" or n=="N2" or n=="N3"
#elif nucleic3to1[r.type] == "C" : a.isBase = n=="N1" or n=="C2" or n=="O2" or n=="N3" or n=="C4" or n=="N4" or n=="C5" or n=="C6"
#elif nucleic3to1[r.type] == "A" : a.isBase = n=="N9" or n=="C8" or n=="N7" or n=="C5" or n=="C4" or n=="N3" or n=="C2" or n=="N1" or n=="C6" or n=="N6"
#elif nucleic3to1[r.type] == "U" : a.isBase = n=="N1" or n=="C2" or n=="O2" or n=="N3" or n=="C4" or n=="O4" or n=="C5" or n=="C6"
#else : #print " -x- NA res %d.%s is ?" % (r.id.position, r.type) break
if a.isBB :
r.bbAtoms.append ( a )
elif a.isSugar :
r.sugarAtoms.append ( a )
else :
r.scAtoms.append ( a )
else :
for a in r.atoms :
a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False
def fit_points_g (fdata, threshold = 1e-5) :
mat = fdata.full_matrix()
import _volume
points = _volume.high_indices(mat, threshold)
fpoints = points.astype(numpy.single)
fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]
nz = numpy.nonzero( fpoint_weights )[0]
if len(nz) < len (fpoint_weights) :
fpoints = numpy.take( fpoints, nz, axis=0 )
fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)
from _contour import affine_transform_vertices
affine_transform_vertices ( fpoints, fdata.ijk_to_xyz_transform )
if 0 : print "FitPoints from %s with threshold %.4f, %d nonzero" % (
fmap.name, threshold, len(nz) )
return fpoints, fpoint_weights
def MyMolMapX2 ( atoms, resolution, step=1.0, xf=None ) :
from math import sqrt, pi
pad = 3*resolution
cutoff_range = 5 # in standard deviations
sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution
from _multiscale import get_atom_coordinates
xyz = get_atom_coordinates(atoms, transformed = False)
# Transform coordinates to local coordinates of the molecule containing
# the first atom. This handles multiple unaligned molecules.
# Or if on_grid is specified transform to grid coordinates.
#m0 = atoms[0].molecule
#xf = m0.openState.xform
if xf :
import Matrix
#Matrix.transform_points(xyz, M.xform_matrix(xf.inverse()))
Matrix.transform_points ( xyz, Matrix.xform_matrix(xf) )
anum = [a.element.number for a in atoms]
grid = bounding_grid(xyz, step, pad, [])
grid.name = ""
sdev = resolution * sigma_factor
add_gaussians(grid, xyz, anum, sdev, cutoff_range, [])
#return grid, molecules
return grid
# -----------------------------------------------------------------------------
#
def bounding_grid(xyz, step, pad, transforms):
xyz_min, xyz_max = point_bounds(xyz, transforms)
origin = [x-pad for x in xyz_min]
from math import ceil
shape = [int(ceil((xyz_max[a] - xyz_min[a] + 2*pad) / step)) for a in (2,1,0)]
from numpy import zeros, float32
matrix = zeros(shape, float32)
from VolumeData import Array_Grid_Data
grid = Array_Grid_Data(matrix, origin, (step,step,step))
return grid
# -----------------------------------------------------------------------------
#
def add_gaussians(grid, xyz, weights, sdev, cutoff_range, transforms = []):
from numpy import zeros, float32, empty
sdevs = zeros((len(xyz),3), float32)
for a in (0,1,2):
sdevs[:,a] = sdev / grid.step[a]
import Matrix as M
if len(transforms) == 0:
transforms = [M.identity_matrix()]
from _gaussian import sum_of_gaussians
ijk = empty(xyz.shape, float32)
matrix = grid.matrix()
for tf in transforms:
ijk[:] = xyz
M.transform_points(ijk, M.multiply_matrices(grid.xyz_to_ijk_transform, tf))
sum_of_gaussians(ijk, weights, sdevs, cutoff_range, matrix)
from math import pow, pi
normalization = pow(2*pi,-1.5)*pow(sdev,-3)
matrix *= normalization
# -----------------------------------------------------------------------------
#
def point_bounds(xyz, transforms = []):
from _multiscale import bounding_box
if transforms :
from numpy import empty, float32
xyz0 = empty((len(transforms),3), float32)
xyz1 = empty((len(transforms),3), float32)
txyz = empty(xyz.shape, float32)
import Matrix as M
for i, tf in enumerate(transforms) :
txyz[:] = xyz
M.transform_points(txyz, tf)
xyz0[i,:], xyz1[i,:] = bounding_box(txyz)
xyz_min, xyz_max = xyz0.min(axis = 0), xyz1.max(axis = 0)
else:
xyz_min, xyz_max = bounding_box(xyz)
return xyz_min, xyz_max
def GetMod ( name ) :
for m in chimera.openModels.list() :
if m.name == name :
return m
return None
|
{"hexsha": "6a444bd72062dc62167d303475b5937449faeab5", "size": 132894, "ext": "py", "lang": "Python", "max_stars_repo_path": "Segger/qscores.py", "max_stars_repo_name": "gregdp/segger", "max_stars_repo_head_hexsha": "d4c112fd43f0b088145e225f976335800874ebe5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-03-27T22:53:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T09:02:05.000Z", "max_issues_repo_path": "mapq/qscores.py", "max_issues_repo_name": "gregdp/mapq", "max_issues_repo_head_hexsha": "f97709f2ce6a307cae482d879f5f5bbce66c80be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-03-26T22:13:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-21T12:45:48.000Z", "max_forks_repo_path": "Segger/qscores.py", "max_forks_repo_name": "gregdp/segger", "max_forks_repo_head_hexsha": "d4c112fd43f0b088145e225f976335800874ebe5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-05-30T19:10:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T07:04:59.000Z", "avg_line_length": 33.4577039275, "max_line_length": 219, "alphanum_fraction": 0.5041988352, "include": true, "reason": "import numpy,from numpy", "num_tokens": 42160}
|
"""
marchonintime(W0,Z,B,I)
Solve by marching-on-in-time the causal convolution problem defined by `(W0,Z,B)`
up to timestep `I`. Here, `Z` is an array of order 3 that contains a discretisation
of a time translation invariant retarded potential operator. `W0` is the inverse of
the slice `Z[:,:,1]`.
"""
function marchonintime(W0,Z,B,I)
T = eltype(W0)
M,N = size(Z)
@assert M == size(B,1)
x = zeros(T,N,I)
for i in 1:I
b = B[:,i] - convolve(Z,x,i,2)
x[:,i] += W0 * b
(i % 10 == 0) && print(i, "[", I, "] - ")
end
return x
end
using BlockArrays
function convolve(Z::BlockArray, x, i, j_start)
cs = BlockArrays.cumulsizes(Z)
bs = [blocksize(Z, (i,1))[1] for i in 1:nblocks(Z,1)]
# @show bs
T = eltype(eltype(Z))
y = PseudoBlockVector{T}(undef,bs)
fill!(y,0)
for I in 1:nblocks(Z,1)
# xI = view(x, cs[1][I] : cs[1][I+1]-1, :)
for J in 1:nblocks(Z,2)
xJ = view(x, cs[2][J] : cs[2][J+1]-1, :)
isassigned(Z.blocks, I, J) || continue
ZIJ = Z[Block(I,J)].banded
# @show size(xJ) size(ZIJ)
# @show size(y[Block(I)])
y[Block(I)] .+= convolve(ZIJ, xJ, i, j_start)
end
end
return y
end
function marchonintime(W0,Z::BlockArray,B,I)
T = eltype(W0)
M,N = size(W0)
@assert M == size(B,1)
x = zeros(T,N,I)
for i in 1:I
R = [ B[j][i] for j in 1:N ]
S = convolve(Z,x,i,2)
# @show size(R)
# @show size(S)
# b = R - convolve(Z,x,i,2)
b = R - S
x[:,i] += W0 * b
(i % 10 == 0) && print(i, "[", I, "] - ")
end
return x
end
|
{"hexsha": "93101da382d0a4605f46d04b44408e19322b4252", "size": 1689, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/timedomain/motlu.jl", "max_stars_repo_name": "RodVoskamp/BEAST.jl", "max_stars_repo_head_hexsha": "a56309d894dcbab85e514089ddee22e1ae9e53ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/timedomain/motlu.jl", "max_issues_repo_name": "RodVoskamp/BEAST.jl", "max_issues_repo_head_hexsha": "a56309d894dcbab85e514089ddee22e1ae9e53ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/timedomain/motlu.jl", "max_forks_repo_name": "RodVoskamp/BEAST.jl", "max_forks_repo_head_hexsha": "a56309d894dcbab85e514089ddee22e1ae9e53ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8382352941, "max_line_length": 83, "alphanum_fraction": 0.50562463, "num_tokens": 597}
|
#include <cmath>
#include <iostream>
#include <fstream>
#include <assert.h>
#include <string>
#include <vector>
#include <map>
#include <boost/tokenizer.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include "parse_speedup.h"
// Cores for which speedup data is available.
static const size_t NUM_SPEEDUP_POINTS = 4;
static int interp_cores[NUM_SPEEDUP_POINTS] = { 2, 4, 8, 16 };
static std::map<std::string, loop_data*> loop_data_map;
/* This function converts absolute speedups on x cores into marginal speedup.
* HELIX speedup data is given in absolute terms, but it is easier for the
* allocators to work with marginal speedups.
*/
/* ========================================================================== */
static void ConvertToMarginalSpeedup(double* speedup, int num_points) {
double temp1 = speedup[0], temp2 = 0;
for (int i = 1; i < num_points; i++) {
temp2 = speedup[i];
speedup[i] -= temp1;
temp1 = temp2;
}
}
/* Linearly interpolates input speedup points. speedup_in is an array that
* contains speedup values for 2, 4, 8, and 16 cores. speedup_out is an array
* that has linearly interpolated values for 2-16 cores. The zeroth element of
* speedup_out = 0, because there is no speedup with just one core.
*/
/* ========================================================================== */
static void InterpolateSpeedup(double* speedup_in, double* speedup_out) {
// Copy the existing data points.
for (size_t i = 0; i < NUM_SPEEDUP_POINTS; i++)
speedup_out[interp_cores[i] - 1] = speedup_in[i];
for (size_t i = 0; i < NUM_SPEEDUP_POINTS - 1; i++) {
double slope =
(speedup_in[i + 1] - speedup_in[i]) / (interp_cores[i + 1] - interp_cores[i]);
// Interpolate.
for (int j = interp_cores[i] + 1; j < interp_cores[i + 1]; j++) {
speedup_out[j - 1] = slope * (j - interp_cores[i + 1]) + speedup_in[i + 1];
}
}
}
/* Parses a comma separated value file that contains predicted speedups for
* each loop when run on 2,4,8,and 16 cores and stores the data in a map.
*/
/* ========================================================================== */
void LoadHelixSpeedupModelData(const std::string filepath) {
using std::string;
using boost::tokenizer;
using boost::escaped_list_separator;
string line;
std::ifstream speedup_loop_file(filepath);
if (speedup_loop_file.is_open()) {
#ifdef PARSE_DEBUG
std::cout << "Cores:\t\t";
for (size_t j = 1; j <= NUM_SPEEDUP_POINTS; j++)
std::cout << j << "\t";
std::cout << std::endl;
#endif
while (getline(speedup_loop_file, line)) {
// Ignore comments (lines starting with //).
if (!boost::starts_with(line.c_str(), "//")) {
tokenizer<escaped_list_separator<char>> tok(line);
string loop_name;
double* speedup_data = new double[NUM_SPEEDUP_POINTS];
double serial_runtime = 0;
double serial_runtime_variance = 0;
size_t i = 0;
bool first_iteration = true;
for (auto it = tok.begin(); it != tok.end(); ++it) {
if (first_iteration) {
loop_name = *it;
first_iteration = false;
} else if (i < NUM_SPEEDUP_POINTS) {
// Speedup data points.
speedup_data[i] = atof(it->c_str());
i++;
} else {
// Serial runtime and variance.
serial_runtime = atof(it->c_str());
it++;
serial_runtime_variance = atof(it->c_str());
}
}
loop_data* data = new loop_data();
data->speedup = speedup_data;
data->serial_runtime = serial_runtime;
data->serial_runtime_variance = serial_runtime_variance;
loop_data_map[loop_name] = data;
#ifdef PARSE_DEBUG
std::cout << loop_name << " speedup:\t";
for (size_t j = 0; j < NUM_SPEEDUP_POINTS; j++)
std::cout << speedup_data[j] << "\t";
std::cout << std::endl;
#endif
}
}
#ifdef PARSE_DEBUG
std::cout << std::endl;
#endif
} else {
std::cerr << "Speedup file could not be opened.";
exit(1);
}
}
// Returns the core scaling attributes of a loop.
std::vector<double> GetHelixLoopScaling(const std::string& loop_name) {
double* res_raw = loop_data_map[loop_name]->speedup;
assert(res_raw != NULL);
std::vector<double> res(NUM_SPEEDUP_POINTS + 1);
res[0] = 1; // for 1 core, speedup is 1.
for (size_t i = 0; i < NUM_SPEEDUP_POINTS; i++)
res[i + 1] = res_raw[i];
return res;
}
// Returns the full loop scaling attributes of a loop
loop_data* GetHelixFullLoopData(const std::string& loop_name) { return loop_data_map[loop_name]; }
|
{"hexsha": "19880de9a1d9f7f0a2cc1d1cee1de760efa4dcf9", "size": 5089, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "xiosim/pintool/parse_speedup.cpp", "max_stars_repo_name": "s-kanev/XIOSim", "max_stars_repo_head_hexsha": "9673bbd15ba72c9cce15243a462bffb5d9ded9ae", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 55.0, "max_stars_repo_stars_event_min_datetime": "2015-05-29T19:59:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T03:08:15.000Z", "max_issues_repo_path": "xiosim/pintool/parse_speedup.cpp", "max_issues_repo_name": "s-kanev/XIOSim", "max_issues_repo_head_hexsha": "9673bbd15ba72c9cce15243a462bffb5d9ded9ae", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-04-03T04:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2015-04-03T04:40:26.000Z", "max_forks_repo_path": "xiosim/pintool/parse_speedup.cpp", "max_forks_repo_name": "s-kanev/XIOSim", "max_forks_repo_head_hexsha": "9673bbd15ba72c9cce15243a462bffb5d9ded9ae", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2015-04-03T00:28:32.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-01T20:53:58.000Z", "avg_line_length": 39.1461538462, "max_line_length": 98, "alphanum_fraction": 0.5561013952, "num_tokens": 1213}
|
[STATEMENT]
lemma angle_actrans_conj: "|~ (\<langle>F \<and> G\<rangle>_v) = (\<langle>F\<rangle>_v \<and> \<langle>G\<rangle>_v)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. |~ \<langle>F \<and> G\<rangle>_v = (\<langle>F\<rangle>_v \<and> \<langle>G\<rangle>_v)
[PROOF STEP]
by (auto simp: angle_actrans_def actrans_def unch_def)
|
{"llama_tokens": 148, "file": "TLA_PreFormulas", "length": 1}
|
[STATEMENT]
lemma clm_decompose: "\<Turnstile>(cl(c) \<^bold>\<rightarrow> cl(c) \<^bold>\<frown> cl(c))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<longrightarrow> (\<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1)
[PROOF STEP]
proof (rule allI|rule impI)+
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
fix ts v
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
assume assm: "ts,v \<Turnstile> cl(c)"
[PROOF STATE]
proof (state)
this:
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
have restr:"restrict v (clm ts) c = lan v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. restrict v (clm ts) c = lan v
[PROOF STEP]
using assm
[PROOF STATE]
proof (prove)
using this:
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
goal (1 subgoal):
1. restrict v (clm ts) c = lan v
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
restrict v (clm ts) c = lan v
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
have len_ge_zero:"\<parallel>len v ts c\<parallel> > 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < \<parallel>len v ts c\<parallel>
[PROOF STEP]
using assm
[PROOF STATE]
proof (prove)
using this:
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
goal (1 subgoal):
1. 0 < \<parallel>len v ts c\<parallel>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < \<parallel>len v ts c\<parallel>
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
have len:"len v ts c = ext v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. len v ts c = ext v
[PROOF STEP]
using assm
[PROOF STATE]
proof (prove)
using this:
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
goal (1 subgoal):
1. len v ts c = ext v
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
len v ts c = ext v
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
obtain v1 v2 where chop:"(v=v1\<parallel>v2) \<and> \<parallel>ext v1\<parallel> > 0 \<and> \<parallel>ext v2\<parallel> > 0 "
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>v1 v2. v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assm view.horizontal_chop_non_empty
[PROOF STATE]
proof (prove)
using this:
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
0 < \<parallel>ext ?v\<parallel> \<longrightarrow> (\<exists>u w. ?v=u\<parallel>w \<and> 0 < \<parallel>ext u\<parallel> \<and> 0 < \<parallel>ext w\<parallel>)
goal (1 subgoal):
1. (\<And>v1 v2. v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using length_split
[PROOF STATE]
proof (prove)
using this:
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
0 < \<parallel>ext ?v\<parallel> \<longrightarrow> (\<exists>u w. ?v=u\<parallel>w \<and> 0 < \<parallel>ext u\<parallel> \<and> 0 < \<parallel>ext w\<parallel>)
\<forall>ts v. 0 < \<parallel>ext v\<parallel> \<longrightarrow> (\<exists>va u. v=va\<parallel>u \<and> 0 < \<parallel>ext va\<parallel> \<and> 0 < \<parallel>ext u\<parallel>)
goal (1 subgoal):
1. (\<And>v1 v2. v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
from chop and len
[PROOF STATE]
proof (chain)
picking this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v ts c = ext v
[PROOF STEP]
have len_v1:"len v1 ts c = ext v1"
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v ts c = ext v
goal (1 subgoal):
1. len v1 ts c = ext v1
[PROOF STEP]
using len_view_hchop_left
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v ts c = ext v
len ?v ?ts ?c = ext ?v \<and> ?v=?v1.0\<parallel>?v2.0 \<longrightarrow> len ?v1.0 ?ts ?c = ext ?v1.0
goal (1 subgoal):
1. len v1 ts c = ext v1
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
len v1 ts c = ext v1
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
from chop and len
[PROOF STATE]
proof (chain)
picking this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v ts c = ext v
[PROOF STEP]
have len_v2:"len v2 ts c = ext v2"
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v ts c = ext v
goal (1 subgoal):
1. len v2 ts c = ext v2
[PROOF STEP]
using len_view_hchop_right
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v ts c = ext v
len ?v ?ts ?c = ext ?v \<and> ?v=?v1.0\<parallel>?v2.0 \<longrightarrow> len ?v2.0 ?ts ?c = ext ?v2.0
goal (1 subgoal):
1. len v2 ts c = ext v2
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
len v2 ts c = ext v2
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
from chop and restr
[PROOF STATE]
proof (chain)
picking this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
restrict v (clm ts) c = lan v
[PROOF STEP]
have restr_v1:"restrict v1 (clm ts) c = lan v1"
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
restrict v (clm ts) c = lan v
goal (1 subgoal):
1. restrict v1 (clm ts) c = lan v1
[PROOF STEP]
by (metis (no_types, lifting) hchop_def restriction.restriction_stable1)
[PROOF STATE]
proof (state)
this:
restrict v1 (clm ts) c = lan v1
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
from chop and restr
[PROOF STATE]
proof (chain)
picking this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
restrict v (clm ts) c = lan v
[PROOF STEP]
have restr_v2:"restrict v2 (clm ts) c = lan v2"
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
restrict v (clm ts) c = lan v
goal (1 subgoal):
1. restrict v2 (clm ts) c = lan v2
[PROOF STEP]
by (metis (no_types, lifting) hchop_def restriction.restriction_stable2)
[PROOF STATE]
proof (state)
this:
restrict v2 (clm ts) c = lan v2
goal (1 subgoal):
1. \<And>ts v. 0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1 \<Longrightarrow> \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
from chop and len_v1 len_v2 restr_v1 restr_v2
[PROOF STATE]
proof (chain)
picking this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v1 ts c = ext v1
len v2 ts c = ext v2
restrict v1 (clm ts) c = lan v1
restrict v2 (clm ts) c = lan v2
[PROOF STEP]
show "ts,v \<Turnstile>cl(c) \<^bold>\<frown> cl(c)"
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v1 ts c = ext v1
len v2 ts c = ext v2
restrict v1 (clm ts) c = lan v1
restrict v2 (clm ts) c = lan v2
goal (1 subgoal):
1. \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
using hchop_def assm
[PROOF STATE]
proof (prove)
using this:
v=v1\<parallel>v2 \<and> 0 < \<parallel>ext v1\<parallel> \<and> 0 < \<parallel>ext v2\<parallel>
len v1 ts c = ext v1
len v2 ts c = ext v2
restrict v1 (clm ts) c = lan v1
restrict v2 (clm ts) c = lan v2
?v=?u\<parallel>?w \<equiv> R_Chop(ext ?v,ext ?u,ext ?w) \<and> lan ?v = lan ?u \<and> lan ?v = lan ?w \<and> own ?v = own ?u \<and> own ?v = own ?w \<and> more ?v = more ?w \<and> more ?v = more ?u
0 < \<parallel>ext v\<parallel> \<and> len v ts c = ext v \<and> restrict v (clm ts) c = lan v \<and> |lan v| = 1
goal (1 subgoal):
1. \<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
\<exists>va u. v=va\<parallel>u \<and> (0 < \<parallel>ext va\<parallel> \<and> len va ts c = ext va \<and> restrict va (clm ts) c = lan va \<and> |lan va| = 1) \<and> 0 < \<parallel>ext u\<parallel> \<and> len u ts c = ext u \<and> restrict u (clm ts) c = lan u \<and> |lan u| = 1
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5831, "file": "Hybrid_Multi_Lane_Spatial_Logic_HMLSL", "length": 35}
|
#to force cpu use
#import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
import tensorflow as tf
from tensorflow import keras
from keras.utils import plot_model
from keras.models import load_model
import _pickle as pickle
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose
#shape = (1060, 2, 128, 128) -convert-> (1060, 128, 128, 2)
with open('train_input.pickle', mode='rb') as f:
train_input = pickle.load(f)
train_input = train_input.transpose(0,2,3,1)
with open('train_output.pickle', mode='rb') as f:
train_output = pickle.load(f)
with open('test_input.pickle', mode='rb') as f:
test_input = pickle.load(f)
test_input = test_input.transpose(0,2,3,1)
with open('test_output.pickle', mode='rb') as f:
test_output = pickle.load(f)
print(np.shape(train_input),np.shape(train_output))
model = Sequential([
Conv2D(4, kernel_size=(3, 3), padding="same", activation='relu',input_shape =(128, 128, 2),data_format='channels_last'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(8, kernel_size=(3, 3), padding="same", activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(16, kernel_size=(3, 3), padding="same", activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2DTranspose(16, kernel_size=(3, 3), strides=2, padding="same", activation='relu'),
Conv2DTranspose(4, kernel_size=(3, 3), strides=2, padding="same", activation='relu'),
Conv2DTranspose(1, kernel_size=(3, 3), strides=2, padding="same", activation='relu'),
])
#saves the model summary into txt file
with open("model.txt", mode='w') as f:
stringlist = []
model.summary(print_fn=lambda x: stringlist.append(x))
str_model_summary = "\n".join(stringlist)
f.write(str_model_summary)
#saves the model into png file
plot_model(model, to_file='model.png')
sgd = SGD(lr = 0.00000001)
model.compile(loss='mse',
optimizer=sgd,
metrics=['accuracy'],)
import os
import os.path
PATH='./my_model.h5'
if os.path.isfile(PATH) and os.access(PATH, os.R_OK):
model = load_model('my_model.h5')
history = model.fit(train_input, train_output,
#initial_epoch = 299,
epochs=40000,
batch_size=64,
validation_data=(test_input,test_output),
verbose = 2)
test_loss, test_acc = model.evaluate(test_input, test_output)
print('Test loss:', test_loss, 'Test accuracy:', test_acc)
model.save('my_model.h5')
#test phase
test_indices = 100
test_input_1 = test_input[test_indices,:,:,0]
test_input_2 = test_input[test_indices,:,:,1]
test_output = model.predict(np.array([test_input[test_indices,:]]))
plt.subplot(131)
plt.imshow(test_input_1)
plt.subplot(132)
plt.imshow(test_output[0,:,:,0])
plt.subplot(133)
plt.imshow(test_input_2)
plt.savefig('test.png', pad_inches=0.1) #pad_inches is used to prevent the output png from having excess padding
plt.show()
plt.cla()
plt.clf()
#plots the loss and val_loss
plt.xlabel("epochs")
plt.ylabel("loss")
plt.plot(history.history['loss'],label="loss")
plt.plot(history.history['val_loss'],label="val_loss")
plt.legend(loc = 'upper right')
plt.title('loss and validation_loss')
#saves the plot into png and displays the plot
plt.savefig('train1.png', pad_inches=0.1) #pad_inches is used to prevent the output png from having excess padding
plt.show()
|
{"hexsha": "ac6338291dfa587f7ae8db1a9f453102f5865b6b", "size": 3536, "ext": "py", "lang": "Python", "max_stars_repo_path": "cnn.py", "max_stars_repo_name": "rayanti/frame_interpolation_CNN", "max_stars_repo_head_hexsha": "65fa55886e87cb1c2f1dee0bb315e550b29d9601", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cnn.py", "max_issues_repo_name": "rayanti/frame_interpolation_CNN", "max_issues_repo_head_hexsha": "65fa55886e87cb1c2f1dee0bb315e550b29d9601", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cnn.py", "max_forks_repo_name": "rayanti/frame_interpolation_CNN", "max_forks_repo_head_hexsha": "65fa55886e87cb1c2f1dee0bb315e550b29d9601", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6761904762, "max_line_length": 124, "alphanum_fraction": 0.7030542986, "include": true, "reason": "import numpy", "num_tokens": 949}
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from time import time
import numpy as np
import tensorflow.compat.v1 as tf
import blocksparse.ewops as ew
import blocksparse.transformer as trans
from tensorflow.python.ops import gradient_checker
out = 0
bench = 0
shapes = [
( 2, 2, 1024, 1024),
( 4, 4, 768, 768),
( 4, 4, 544, 544),
( 4, 4, 512, 512),
( 8, 8, 256, 256),
(16, 16, 128, 128),
(32, 32, 64, 64),
(64, 64, 32, 32),
# (1, 2, 1024, 1024),
# (1, 2, 512, 512),
# (1, 2, 256, 256),
# (1, 2, 128, 128),
# (1, 2, 64, 64),
# (1, 2, 32, 32),
# (1, 2, 1024, 1024-1),
# (1, 2, 512, 512+1),
# (1, 2, 256, 256+1),
# (1, 2, 128, 128+1),
# (1, 2, 64, 64+1),
# (1, 2, 32, 32+1),
]
class TopKTest(tf.test.TestCase):
def testTopK(self):
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
with self.test_session(config=config) as sess, tf.device("/gpu:0"):
for shape in shapes:
topK = shape[-1] // 4 # 25% sparsity
np.random.seed(int(time()))
cpuX = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
cpuE = np.random.uniform(-1.0, 1.0, shape).astype(np.float32)
X = tf.placeholder(tf.float32, cpuX.shape)
E = tf.placeholder(tf.float32, cpuE.shape)
for mask_dims in (0, 2, 3):
if mask_dims == 0:
mask = M = m_shape = None
feed_dict = { X: cpuX, E: cpuE }
else:
m_shape = [1 for n in shape]
m_shape[-mask_dims:] = shape[-mask_dims:]
mask = np.zeros(m_shape, dtype=np.float32)
if mask_dims == 2:
for y, x in np.ndindex(mask.shape[-2:]):
if x <= y: mask[:,:,y,x] = 3.0
elif mask_dims == 3:
for z, y, x in np.ndindex(mask.shape[-3:]):
if x <= y: mask[:,z,y,x] = (z+1)*3.0
M = tf.placeholder(tf.float32, mask.shape)
feed_dict = { X: cpuX, E: cpuE, M: mask }
for dtype in (tf.float32, ): #tf.float16, tf.bfloat16
rtol = 1e-4 if dtype is tf.float32 else 1e-1
Y = ew.float_cast(X, dtype=dtype)
#Y = trans.masked_top_k_softmax(Y, topK, mask=M, scale=2.0)
Y = trans.masked_softmax(Y, mask=M, scale=2.0, bench=bench)
Y = ew.float_cast(Y, dtype=tf.float32, dx_dtype=dtype)
D = tf.gradients(Y, [X], E)
#devY, = sess.run( [Y], feed_dict)
devY, (devDX,) = sess.run( [Y, D], feed_dict)
#devY, (devDX,), tfY = sess.run( [Y, D, tf.nn.top_k(X, topK)], feed_dict)
# gradient_checker tests are insanely slow
# if True:
# x = tf.constant(cpuX)
# m = tf.constant(mask)
# y = trans.masked_top_k_softmax(x, topK, mask=m)
# error = gradient_checker.compute_gradient_error(x, shape, y, shape) #, extra_feed_dict={ x: cpuX, m: mask }
# assert error < 0.01, error
if bench == 0:
# cpuY = trans.masked_top_k_softmax_test(cpuX, topK, mask=mask, scale=2.0)
# cpuDX = trans.masked_softmax_grad_test(cpuE, cpuY, mask=mask, scale=2.0)
cpuY = trans.masked_softmax_test(cpuX, mask=mask, scale=2.0)
cpuDX = trans.masked_softmax_grad_test(cpuE, cpuY, mask=mask, scale=2.0)
difY = np.abs(cpuY - devY)
difDX = np.abs(cpuDX - devDX)
cntY = (difY > rtol).astype(np.int).sum() / difY.size
cntDX = (difDX > rtol).astype(np.int).sum() / difDX.size
print("%s, shape:%18s, mask:%18s, errY:%.5f, errDX:%.5f" % (dtype.name, str(shape), str(m_shape), cntY, cntDX))
if out:
np.savetxt( "cpuY.txt", cpuY.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt( "devY.txt", devY.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt("cpuDX.txt", cpuDX.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt("devDX.txt", devDX.reshape(-1,shape[-1]), fmt="%6.3f")
np.savetxt("difDX.txt", difDX.reshape(-1,shape[-1]), fmt="%6.3f")
if __name__ == "__main__":
tf.test.main()
|
{"hexsha": "228f2871e2d9c1405aad29217c018a56541c32d1", "size": 5114, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/top_k_test.py", "max_stars_repo_name": "AlumiK/blocksparse", "max_stars_repo_head_hexsha": "bc265b5e404f68b27229cb445ea48bf5b5f3bef7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/top_k_test.py", "max_issues_repo_name": "AlumiK/blocksparse", "max_issues_repo_head_hexsha": "bc265b5e404f68b27229cb445ea48bf5b5f3bef7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/top_k_test.py", "max_forks_repo_name": "AlumiK/blocksparse", "max_forks_repo_head_hexsha": "bc265b5e404f68b27229cb445ea48bf5b5f3bef7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0381679389, "max_line_length": 139, "alphanum_fraction": 0.4546343371, "include": true, "reason": "import numpy", "num_tokens": 1464}
|
"""
Run preprocessing on the data using this script. See the README.md for more details.
"""
import sqlite3
import os
import datetime
import logging
import sys
import glob
import pandas as pd
import numpy as np
from .utils import create_adjacent_stop_pairs, bank_holidays_2018
logging.basicConfig(
filename=f"/home/team13/logs/preprocessing/{sys.argv[1]}_{datetime.datetime.now()}",
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
stop_pairs_2021 = list(pd.read_csv("~/model_output/stop_pairs_2021.csv")['stop_pair'].unique())
if sys.argv[1] == "create_adjacent_stop_pairs":
conn = sqlite3.connect("/home/team13/db/database/DublinBusHistoric_typed.db")
dates = pd.read_sql("SELECT DISTINCT DAYOFSERVICE FROM trips", conn)
dates = pd.to_datetime(dates['DAYOFSERVICE'], format="%d-%b-%y %H:%M:%S")
for date in dates:
logging.info(f"Creating adjacent pairs for {date}")
query_date = date.strftime("%d-%b-%y %H:%M:%S").upper()
LEAVTIMES_QUERY = """
select TRIPID, PROGRNUMBER, STOPPOINTID, ACTUALTIME_ARR, ACTUALTIME_DEP
from leavetimes
WHERE DAYOFSERVICE == "{query_date}"
"""
TRIPS_QUERY = """
select DAYOFSERVICE, TRIPID, LINEID, ROUTEID, DIRECTION
from trips
WHERE DAYOFSERVICE == "{query_date}"
"""
leavetimes = pd.read_sql(
LEAVTIMES_QUERY.format(query_date=query_date), conn)
trips = pd.read_sql(TRIPS_QUERY.format(query_date=query_date), conn)
# Data quality checks
# Remove any rows from leavetimes where the ACTUALTIME_ARR is greater than
# the ACTUALTIME_DEP (i.e., a bus cannot arrive at a stop after it's
# already supposed to have departed)
leavetimes = leavetimes[leavetimes['ACTUALTIME_ARR'] <= leavetimes['ACTUALTIME_DEP']]
# Join leavetimes and trips
leavetimes_trips = leavetimes.join(
trips.set_index('TRIPID'), on='TRIPID')
stop_pairs_df = create_adjacent_stop_pairs(leavetimes_trips)
for dep_stop, arr_stop in list(
stop_pairs_df.groupby(['departure_stop', 'arrival_stop'])[
['travel_time']].mean().index
):
if f"{int(dep_stop)}_to_{int(arr_stop)}" in stop_pairs_2021:
res = stop_pairs_df[(stop_pairs_df['departure_stop'] == dep_stop) & (
stop_pairs_df['arrival_stop'] == arr_stop)]
path = f"/home/team13/data/adjacent_stop_pairs/{int(dep_stop)}_to_{int(arr_stop)}/"
if not os.path.exists(path):
os.mkdir(path)
file_name = f'{int(dep_stop)}_to_{int(arr_stop)}_{query_date}'
res.sort_values('time_departure').to_parquet(
path + f'{file_name}.parquet', index=False)
elif sys.argv[1] == "features":
for stop_pair in glob.glob("/home/team13/data/adjacent_stop_pairs/*"):
dfs = []
for parquet_file in glob.glob(f"{stop_pair}/*"):
dfs.append(pd.read_parquet(parquet_file))
stop_pair = stop_pair.split("/")[-1]
stop_pair_df = pd.concat(dfs, ignore_index=True)
logging.info(f"{stop_pair_df.shape[0]} rows for {stop_pair}")
# Data quality checks
if (stop_pair_df[stop_pair_df['travel_time'] < 0].shape[0]) > 0:
invalid_rows = stop_pair_df[stop_pair_df['travel_time'] < 0].index
logging.info(f"Dropping {len(invalid_rows)} rows where calculated travel" +
"time is < 0")
stop_pair_df = stop_pair_df.drop(invalid_rows)
# Outlier detection for TRAVEL_TIME
q25 = np.percentile(stop_pair_df['travel_time'], 25)
q75 = np.percentile(stop_pair_df['travel_time'], 75)
iqr = q75 - q25
cut_off = iqr * 3
lower, upper = q25 - cut_off, q75 + cut_off
outliers = stop_pair_df[
(stop_pair_df['travel_time'] < lower) |
(stop_pair_df['travel_time'] > upper)
]
outliers_pct = (outliers.shape[0] / stop_pair_df.shape[0])*100
if outliers.shape[0] > 0:
logging.info(
f"Identified {outliers.shape[0]} outliers {outliers_pct:.2f}%"
)
# Don't drop outliers if they constitute more than 10% of data
if outliers_pct < 10:
logging.info("Dropping outliers because percentage of outliers is below 10%")
stop_pair_df = stop_pair_df[
(stop_pair_df['travel_time'] >= lower) &
(stop_pair_df['travel_time'] <= upper)
]
logging.info(f"{stop_pair_df.shape[0]} remaining rows for {stop_pair}")
stop_pair_df['DAYOFSERVICE'] = pd.to_datetime(
stop_pair_df['DAYOFSERVICE'], format="%d-%b-%y %H:%M:%S")
stop_pair_df['hour'] = (stop_pair_df['time_departure'] / (60 * 60)).astype(int)
# Dublin Bus data uses hours >= 24 for the next day (e.g., 25:00 for 1am)
# Move the day of service to the next day and convert hour back to 24hr clock
stop_pair_df.loc[stop_pair_df['hour'] >= 24,
'DAYOFSERVICE'] = stop_pair_df['DAYOFSERVICE'] + pd.Timedelta(days=1)
stop_pair_df['hour'] = stop_pair_df['hour'] % 24
stop_pair_df['date'] = stop_pair_df['DAYOFSERVICE'].dt.date
stop_pair_df['day'] = stop_pair_df['DAYOFSERVICE'].dt.weekday
# cosine and sine of seconds since midnight
SECONDS_IN_DAY = 24 * 60 * 60
stop_pair_df['cos_time'] = np.cos(
stop_pair_df['time_departure'] * (2 * np.pi / SECONDS_IN_DAY))
stop_pair_df['sin_time'] = np.sin(
stop_pair_df['time_departure'] * (2 * np.pi / SECONDS_IN_DAY))
# cosine and sine of day of week number
stop_pair_df['cos_day'] = np.cos(
stop_pair_df['day'] * (2 * np.pi / 7))
stop_pair_df['sin_day'] = np.sin(
stop_pair_df['day'] * (2 * np.pi / 7))
# dummy variable for weekend
stop_pair_df['is_weekend'] = stop_pair_df['day'].isin([5, 6])
# one-hot encoding of days and hours
day_of_week_columns = pd.get_dummies(stop_pair_df['day'], drop_first=True, prefix="day")
stop_pair_df[list(day_of_week_columns)] = day_of_week_columns
hour_of_day_columns = pd.get_dummies(stop_pair_df['hour'], drop_first=True, prefix='hour')
stop_pair_df[list(hour_of_day_columns)] = hour_of_day_columns
stop_pair_df = stop_pair_df.drop('DAYOFSERVICE', axis=1)
# Add weather features
weather_df = pd.read_csv(
"~/data/raw/met_eireann_hourly_phoenixpark_dec2017jan2019.csv",
usecols=['date', 'rain', 'temp'])
weather_df['datetime'] = pd.to_datetime(
weather_df['date'].str.upper(), format="%d-%b-%Y %H:%M")
weather_df = weather_df.drop('date', axis=1)
weather_df['date'] = weather_df['datetime'].dt.date
weather_df['hour'] = weather_df['datetime'].dt.hour
weather_df = weather_df.sort_values('datetime')
weather_df['lagged_rain'] = weather_df['rain'].shift(1)
stop_pair_df = pd.merge(stop_pair_df, weather_df, on=[
'date', 'hour'], how='left')
file_path = f"/home/team13/data/adjacent_stop_pairs_with_features/{stop_pair}.parquet"
# bank holiday features
stop_pair_df['bank_holiday'] = 0
stop_pair_df.loc[stop_pair_df['date'].isin(bank_holidays_2018), 'bank_holiday'] = 1
stop_pair_df.sort_values(['date', 'time_departure']).to_parquet(
file_path, index=False)
|
{"hexsha": "2bb3d873c884684ba1b7728c13662329ecbceb0d", "size": 7776, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_analytics/preprocessing/run_preprocessing.py", "max_stars_repo_name": "reidy-p/DublinBusPredictions", "max_stars_repo_head_hexsha": "a6b1fc8a5c28500a3292883ea0dfcde1770d78d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_analytics/preprocessing/run_preprocessing.py", "max_issues_repo_name": "reidy-p/DublinBusPredictions", "max_issues_repo_head_hexsha": "a6b1fc8a5c28500a3292883ea0dfcde1770d78d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_analytics/preprocessing/run_preprocessing.py", "max_forks_repo_name": "reidy-p/DublinBusPredictions", "max_forks_repo_head_hexsha": "a6b1fc8a5c28500a3292883ea0dfcde1770d78d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9263157895, "max_line_length": 99, "alphanum_fraction": 0.6153549383, "include": true, "reason": "import numpy", "num_tokens": 1990}
|
# Beg et al. (2007): https://doi.org/10.1073/pnas.0609845104.
module BegData
using DataFrames
import ..Chemostat_Heerden2013
const ChH = Chemostat_Heerden2013
import CSV
import UtilsJL
const UJL = UtilsJL
UJL.gen_sub_proj(@__MODULE__)
# The average crowding coefficient a was fitted to obtain the minimum square
# deviation between the measured and model predicted growth rates,
# resulting in a = 0.0040 ± 0.0005 h•g/mmol, in which g is grams dry weight.
# However, the maximum growth rates on glucose and glycerol are more consistent
# with a = 0.0031 ± 0.0001 h•g/mmol and a = 0.0053 ± 0.0001 h•g/mmol, respectively.
const ave_a = 0.0031 # I will use this to set the unknown costs
# enzymatic costs from Beg et al. (2007): https://doi.org/10.1073/pnas.0609845104.
function load_enz_data()
datfile = rawdir("beg2007___enzymatic_data.tsv")
beg_enz_data = CSV.read(datfile, DataFrame)
end
function __init__()
UJL.create_proj_dirs(@__MODULE__)
end
end
|
{"hexsha": "3e9f8ae0f675b01ed63ffeba472f459ec3a8816c", "size": 1061, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/BegData/BegData.jl", "max_stars_repo_name": "josePereiro/Chemostat_Heerden2013.jl", "max_stars_repo_head_hexsha": "00f97a57a64daf8d175b2eb22871c5fc83d2e6f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/BegData/BegData.jl", "max_issues_repo_name": "josePereiro/Chemostat_Heerden2013.jl", "max_issues_repo_head_hexsha": "00f97a57a64daf8d175b2eb22871c5fc83d2e6f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/BegData/BegData.jl", "max_forks_repo_name": "josePereiro/Chemostat_Heerden2013.jl", "max_forks_repo_head_hexsha": "00f97a57a64daf8d175b2eb22871c5fc83d2e6f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3666666667, "max_line_length": 87, "alphanum_fraction": 0.6908576814, "num_tokens": 329}
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defense Evaluation.
"""
import numpy as np
import scipy.stats as st
from mindarmour.utils.logger import LogUtil
from mindarmour.utils._check_param import check_numpy_param
from mindarmour.utils._check_param import check_pair_numpy_param
LOGGER = LogUtil.get_instance()
TAG = 'DefenseEvaluate'
class DefenseEvaluate:
"""
Evaluation metrics of defense methods.
Args:
raw_preds (numpy.ndarray): Prediction results of some certain samples
on raw model.
def_preds (numpy.ndarray): Prediction results of some certain samples on
defensed model.
true_labels (numpy.ndarray): Ground-truth labels of samples, a
one-dimension array whose size is raw_preds.shape[0].
Examples:
>>> from mindarmour.adv_robustness.evaluations import DefenseEvaluate
>>> raw_preds = np.array([[0.1, 0.1, 0.2, 0.6],
... [0.1, 0.7, 0.0, 0.2],
... [0.8, 0.1, 0.0, 0.1]])
>>> def_preds = np.array([[0.1, 0.1, 0.1, 0.7],
... [0.1, 0.6, 0.2, 0.1],
... [0.1, 0.2, 0.1, 0.6]])
>>> true_labels = np.array([3, 1, 0])
>>> def_eval = DefenseEvaluate(raw_preds,
... def_preds,
... true_labels)
>>> cav = def_eval.cav()
>>> crr = def_eval.crr()
>>> csr = def_eval.csr()
>>> ccv = def_eval.ccv()
>>> cos = def_eval.cos()
"""
def __init__(self, raw_preds, def_preds, true_labels):
self._raw_preds, self._def_preds = check_pair_numpy_param('raw_preds',
raw_preds,
'def_preds',
def_preds)
self._true_labels = check_numpy_param('true_labels', true_labels)
self._num_samples = len(true_labels)
def cav(self):
"""
Calculate classification accuracy variance (CAV).
Returns:
float, the higher, the more successful the defense is.
Examples:
>>> def_eval.cav()
"""
def_succ_num = np.sum(np.argmax(self._def_preds, axis=1)
== self._true_labels)
raw_succ_num = np.sum(np.argmax(self._raw_preds, axis=1)
== self._true_labels)
return (def_succ_num - raw_succ_num) / self._num_samples
def crr(self):
"""
Calculate classification rectify ratio (CRR).
Returns:
float, the higher, the more successful the defense is.
Examples:
>>> def_eval.crr()
"""
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
cond2 = np.argmax(self._raw_preds, axis=1) != self._true_labels
rectify_num = np.sum(cond1*cond2)
return rectify_num*1.0 / self._num_samples
def csr(self):
"""
Calculate classification sacrifice ratio (CSR), the lower the better.
Returns:
float, the lower, the more successful the defense is.
"""
cond1 = np.argmax(self._def_preds, axis=1) != self._true_labels
cond2 = np.argmax(self._raw_preds, axis=1) == self._true_labels
sacrifice_num = np.sum(cond1*cond2)
return sacrifice_num*1.0 / self._num_samples
def ccv(self):
"""
Calculate classification confidence variance (CCV).
Returns:
- float, the lower, the more successful the defense is.
- If return value == -1, len(idxes) == 0.
Examples:
>>> def_eval.ccv()
"""
idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
cond2 = np.argmax(self._raw_preds, axis=1) == self._true_labels
idxes = idxes[cond1*cond2]
def_max = np.max(self._def_preds, axis=1)
raw_max = np.max(self._raw_preds, axis=1)
if idxes.shape[0] == 0:
return -1
conf_variance = np.mean(np.abs(def_max[idxes] - raw_max[idxes]))
return conf_variance
def cos(self):
"""
References: `Calculate classification output stability (COS)
<https://en.wikipedia.org/wiki/Jensen%E2%80%93Shannon_divergence>`_
Returns:
float.
- If return value >= 0, is effective defense. The lower, the
more successful the defense.
- If return value == -1, idxes == 0.
Examples:
>>> def_eval.cos()
"""
idxes = np.arange(self._num_samples)
cond1 = np.argmax(self._def_preds, axis=1) == self._true_labels
cond2 = np.argmax(self._raw_preds, axis=1) == self._true_labels
idxes = idxes[cond1*cond2]
if idxes.size == 0:
return -1
def_preds = self._def_preds[idxes]
raw_preds = self._raw_preds[idxes]
js_total = 0.0
mean_value = 0.5*(def_preds + raw_preds)
for i, value in enumerate(mean_value):
js_total += 0.5*st.entropy(def_preds[i], value) \
+ 0.5*st.entropy(raw_preds[i], value)
return js_total / len(idxes)
|
{"hexsha": "cf2085b60b62eddebde0194df73f97803fb791fb", "size": 5949, "ext": "py", "lang": "Python", "max_stars_repo_path": "mindarmour/adv_robustness/evaluations/defense_evaluation.py", "max_stars_repo_name": "hboshnak/mindarmour", "max_stars_repo_head_hexsha": "0609a4eaea875a84667bed279add9305752880cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mindarmour/adv_robustness/evaluations/defense_evaluation.py", "max_issues_repo_name": "hboshnak/mindarmour", "max_issues_repo_head_hexsha": "0609a4eaea875a84667bed279add9305752880cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mindarmour/adv_robustness/evaluations/defense_evaluation.py", "max_forks_repo_name": "hboshnak/mindarmour", "max_forks_repo_head_hexsha": "0609a4eaea875a84667bed279add9305752880cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9941176471, "max_line_length": 80, "alphanum_fraction": 0.5678265255, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1419}
|
# RUN: SUPPORTLIB=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext %PYTHON %s | FileCheck %s
import filecmp
import numpy as np
import os
import sys
import tempfile
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_SCRIPT_PATH)
from tools import mlir_pytaco_api as pt
from tools import testing_utils as utils
# Define the CSR format.
csr = pt.format([pt.dense, pt.compressed], [0, 1])
# Read matrices A and B from file, infer size of output matrix C.
A = pt.read(os.path.join(_SCRIPT_PATH, "data/A.mtx"), csr)
B = pt.read(os.path.join(_SCRIPT_PATH, "data/B.mtx"), csr)
C = pt.tensor([A.shape[0], B.shape[1]], csr)
# Define the kernel.
i, j, k = pt.get_index_vars(3)
C[i, j] = A[i, k] * B[k, j]
# Force evaluation of the kernel by writing out C.
with tempfile.TemporaryDirectory() as test_dir:
golden_file = os.path.join(_SCRIPT_PATH, "data/gold_C.tns")
out_file = os.path.join(test_dir, "C.tns")
pt.write(out_file, C)
#
# CHECK: Compare result True
#
print(f"Compare result {utils.compare_sparse_tns(golden_file, out_file)}")
|
{"hexsha": "af1c6ba0c4552881e2b015048450437c07f0d321", "size": 1077, "ext": "py", "lang": "Python", "max_stars_repo_path": "mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py", "max_stars_repo_name": "LaudateCorpus1/llvm-project", "max_stars_repo_head_hexsha": "ff2e0f0c1112558b3f30d8afec7c9882c33c79e3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py", "max_issues_repo_name": "LaudateCorpus1/llvm-project", "max_issues_repo_head_hexsha": "ff2e0f0c1112558b3f30d8afec7c9882c33c79e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mlir/test/Integration/Dialect/SparseTensor/taco/test_SpMM.py", "max_forks_repo_name": "LaudateCorpus1/llvm-project", "max_forks_repo_head_hexsha": "ff2e0f0c1112558b3f30d8afec7c9882c33c79e3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9166666667, "max_line_length": 98, "alphanum_fraction": 0.7270194986, "include": true, "reason": "import numpy", "num_tokens": 314}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.