text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#include "lue/configure.hpp"
#ifdef LUE_BUILD_DATA_MODEL
#ifdef LUE_DATA_MODEL_WITH_PYTHON_API
#include "lue/py/data_model/submodule.hpp"
#endif
#endif
#ifdef LUE_BUILD_FRAMEWORK
#ifdef LUE_FRAMEWORK_WITH_PYTHON_API
#include "lue/py/framework/submodule.hpp"
#endif
#endif
#include <boost/algorithm/string/join.hpp>
#include <fmt/format.h>
namespace py = pybind11;
namespace lue {
PYBIND11_MODULE(lue, module)
{
std::vector<std::string> automodules;
#ifdef LUE_DATA_MODEL_WITH_PYTHON_API
automodules.push_back("data_model");
#endif
#ifdef LUE_FRAMEWORK_WITH_PYTHON_API
automodules.push_back("framework");
#endif
for(std::string& item: automodules)
{
item = fmt::format(".. automodule:: lue.{}", item);
}
module.doc() =
fmt::format(R"(
:mod:`lue` --- Scientific Database and Environmental Modelling Framework
========================================================================
The :mod:`lue` package provides functionality for ...
.. automodule:: lue.data_model
.. automodule:: lue.framework
)", boost::algorithm::join(automodules, "\n "));
module.attr("__version__") = py::str(BuildOptions::version);
module.attr("lue_version") = py::str(BuildOptions::version);
module.attr("git_short_sha1") = py::str(BuildOptions::git_short_sha1);
#ifdef LUE_BUILD_DATA_MODEL
#ifdef LUE_DATA_MODEL_WITH_PYTHON_API
data_model::init_submodule(module);
#endif
#endif
#ifdef LUE_BUILD_FRAMEWORK
#ifdef LUE_FRAMEWORK_WITH_PYTHON_API
framework::init_submodule(module);
#endif
#endif
}
} // namespace lue
|
{"hexsha": "13c4b6d7ba0badaf487b55327b12bd8af26b5585", "size": 1590, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "source/python/src/python_extension.cpp", "max_stars_repo_name": "computationalgeography/lue", "max_stars_repo_head_hexsha": "71993169bae67a9863d7bd7646d207405dc6f767", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-02-26T22:45:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-02T10:28:48.000Z", "max_issues_repo_path": "source/python/src/python_extension.cpp", "max_issues_repo_name": "computationalgeography/lue", "max_issues_repo_head_hexsha": "71993169bae67a9863d7bd7646d207405dc6f767", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 131.0, "max_issues_repo_issues_event_min_datetime": "2020-10-27T13:09:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T10:24:26.000Z", "max_forks_repo_path": "source/python/src/python_extension.cpp", "max_forks_repo_name": "computationalgeography/lue", "max_forks_repo_head_hexsha": "71993169bae67a9863d7bd7646d207405dc6f767", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7313432836, "max_line_length": 76, "alphanum_fraction": 0.6937106918, "num_tokens": 396}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2012-2013 Karsten Jeschkies <jeskar@web.de>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
'''
Created on 24.11.2012
@author: karsten jeschkies <jeskar@web.de>
This is an implementation of the SMOTE Algorithm.
See: "SMOTE: synthetic minority over-sampling technique" by
Chawla, N.V et al.
'''
import logging, math
import numpy as np
from random import randrange, choice
from sklearn.neighbors import NearestNeighbors
logger = logging.getLogger("main")
def SMOTE(T, N, k, h = 1.0):
"""
Returns (N/100) * n_minority_samples synthetic minority samples.
Parameters
----------
T : array-like, shape = [n_minority_samples, n_features]
Holds the minority samples
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
Returns
-------
S : Synthetic samples. array,
shape = [(N/100) * n_minority_samples, n_features].
"""
n_minority_samples, n_features = T.shape
N = N/100.
n_synthetic_samples = N * n_minority_samples
S = np.zeros(shape=(n_synthetic_samples, n_features))
#Learn nearest neighbours
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(T)
#Calculate synthetic samples
for n in range(int(math.ceil(N))):
if n == math.ceil(N):
sample_size = int(n_minority_samples - (n*100))
else:
sample_size = int(n_minority_samples)
T2 = T[np.random.choice(len(T), sample_size)]
for i in range(T2.shape[0]):
nn = neigh.kneighbors(T2[i], return_distance=False)
nn_index = choice(nn[0])
#NOTE: nn includes T[i], we don't want to select it
while nn_index == i:
nn_index = choice(nn[0])
dif = T2[nn_index] - T2[i]
gap = np.random.uniform(low = 0.0, high = h)
S[n + i * N, :] = T2[i,:] + gap * dif[:]
return S
def borderlineSMOTE(X, y, minority_target, N, k):
"""
Returns synthetic minority samples.
Parameters
----------
X : array-like, shape = [n__samples, n_features]
Holds the minority and majority samples
y : array-like, shape = [n__samples]
Holds the class targets for samples
minority_target : value for minority class
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
h : high in random.uniform to scale dif of snythetic sample
Returns
-------
safe : Safe minorities
synthetic : Synthetic sample of minorities in danger zone
danger : Minorities of danger zone
"""
n_samples, _ = X.shape
#Learn nearest neighbours on complete training set
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(X)
safe_minority_indices = list()
danger_minority_indices = list()
for i in range(n_samples):
if y[i] != minority_target: continue
nn = neigh.kneighbors(X[i], return_distance=False)
majority_neighbours = 0
for n in nn[0]:
if y[n] != minority_target:
majority_neighbours += 1
if majority_neighbours == len(nn):
continue
elif majority_neighbours < (len(nn)/2):
logger.debug("Add sample to safe minorities.")
safe_minority_indices.append(i)
else:
#DANGER zone
danger_minority_indices.append(i)
#SMOTE danger minority samples
synthetic_samples = SMOTE(X[danger_minority_indices], N, k, h = 0.5)
return (X[safe_minority_indices],
synthetic_samples,
X[danger_minority_indices])
|
{"hexsha": "2a4274fea0b51436dde22e0a0fca0cc5304cd90d", "size": 5043, "ext": "py", "lang": "Python", "max_stars_repo_path": "pml/lib/smote.py", "max_stars_repo_name": "gatapia/py_ml_utils", "max_stars_repo_head_hexsha": "844d8b62a7c5cc0a80f4f62c0bfda092aac57ade", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 183, "max_stars_repo_stars_event_min_datetime": "2015-01-11T13:01:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T04:45:33.000Z", "max_issues_repo_path": "pml/lib/smote.py", "max_issues_repo_name": "gatapia/py_ml_utils", "max_issues_repo_head_hexsha": "844d8b62a7c5cc0a80f4f62c0bfda092aac57ade", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2015-05-12T17:39:42.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-29T18:01:38.000Z", "max_forks_repo_path": "pml/lib/smote.py", "max_forks_repo_name": "gatapia/py_ml_utils", "max_forks_repo_head_hexsha": "844d8b62a7c5cc0a80f4f62c0bfda092aac57ade", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 166, "max_forks_repo_forks_event_min_datetime": "2015-01-28T18:05:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T04:45:34.000Z", "avg_line_length": 35.2657342657, "max_line_length": 85, "alphanum_fraction": 0.6319651001, "include": true, "reason": "import numpy", "num_tokens": 1165}
|
import warnings
import chess
import numpy as np
from gym import spaces
from pettingzoo import AECEnv
from pettingzoo.utils import wrappers
from pettingzoo.utils.agent_selector import agent_selector
from . import chess_utils
def env():
env = raw_env()
env = wrappers.CaptureStdoutWrapper(env)
env = wrappers.TerminateIllegalWrapper(env, illegal_reward=-1)
env = wrappers.AssertOutOfBoundsWrapper(env)
env = wrappers.OrderEnforcingWrapper(env)
return env
class raw_env(AECEnv):
metadata = {
'render_modes': ['human'],
"name": "chess_v5",
"is_parallelizable": False,
"render_fps": 2,
}
def __init__(self):
super().__init__()
self.board = chess.Board()
self.agents = [f"player_{i}" for i in range(2)]
self.possible_agents = self.agents[:]
self._agent_selector = agent_selector(self.agents)
self.action_spaces = {name: spaces.Discrete(8 * 8 * 73) for name in self.agents}
self.observation_spaces = {name: spaces.Dict({
'observation': spaces.Box(low=0, high=1, shape=(8, 8, 111), dtype=bool),
'action_mask': spaces.Box(low=0, high=1, shape=(4672,), dtype=np.int8)
}) for name in self.agents}
self.rewards = None
self.dones = None
self.infos = {name: {} for name in self.agents}
self.agent_selection = None
self.board_history = np.zeros((8, 8, 104), dtype=bool)
def observation_space(self, agent):
return self.observation_spaces[agent]
def action_space(self, agent):
return self.action_spaces[agent]
def observe(self, agent):
observation = chess_utils.get_observation(self.board, self.possible_agents.index(agent))
observation = np.dstack((observation[:, :, :7], self.board_history))
legal_moves = chess_utils.legal_moves(self.board) if agent == self.agent_selection else []
action_mask = np.zeros(4672, 'int8')
for i in legal_moves:
action_mask[i] = 1
return {'observation': observation, 'action_mask': action_mask}
def reset(self, seed=None):
self.has_reset = True
self.agents = self.possible_agents[:]
self.board = chess.Board()
self._agent_selector = agent_selector(self.agents)
self.agent_selection = self._agent_selector.reset()
self.rewards = {name: 0 for name in self.agents}
self._cumulative_rewards = {name: 0 for name in self.agents}
self.dones = {name: False for name in self.agents}
self.infos = {name: {} for name in self.agents}
self.board_history = np.zeros((8, 8, 104), dtype=bool)
def set_game_result(self, result_val):
for i, name in enumerate(self.agents):
self.dones[name] = True
result_coef = 1 if i == 0 else -1
self.rewards[name] = result_val * result_coef
self.infos[name] = {'legal_moves': []}
def step(self, action):
if self.dones[self.agent_selection]:
return self._was_done_step(action)
current_agent = self.agent_selection
current_index = self.agents.index(current_agent)
next_board = chess_utils.get_observation(self.board, current_agent)
self.board_history = np.dstack((next_board[:, :, 7:], self.board_history[:, :, :-13]))
self.agent_selection = self._agent_selector.next()
chosen_move = chess_utils.action_to_move(self.board, action, current_index)
assert chosen_move in self.board.legal_moves
self.board.push(chosen_move)
next_legal_moves = chess_utils.legal_moves(self.board)
is_stale_or_checkmate = not any(next_legal_moves)
# claim draw is set to be true to align with normal tournament rules
is_repetition = self.board.is_repetition(3)
is_50_move_rule = self.board.can_claim_fifty_moves()
is_claimable_draw = is_repetition or is_50_move_rule
game_over = is_claimable_draw or is_stale_or_checkmate
if game_over:
result = self.board.result(claim_draw=True)
result_val = chess_utils.result_to_int(result)
self.set_game_result(result_val)
self._accumulate_rewards()
def render(self, mode='human'):
print(self.board)
def close(self):
pass
|
{"hexsha": "8ad31ee61841e807f42418f0a76a863186a80047", "size": 4352, "ext": "py", "lang": "Python", "max_stars_repo_path": "pettingzoo/classic/chess/chess_env.py", "max_stars_repo_name": "hany606/PettingZoo", "max_stars_repo_head_hexsha": "ef958ff3dfb3759e980759b507448ea96ac8ba28", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pettingzoo/classic/chess/chess_env.py", "max_issues_repo_name": "hany606/PettingZoo", "max_issues_repo_head_hexsha": "ef958ff3dfb3759e980759b507448ea96ac8ba28", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pettingzoo/classic/chess/chess_env.py", "max_forks_repo_name": "hany606/PettingZoo", "max_forks_repo_head_hexsha": "ef958ff3dfb3759e980759b507448ea96ac8ba28", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9696969697, "max_line_length": 98, "alphanum_fraction": 0.6528033088, "include": true, "reason": "import numpy", "num_tokens": 1025}
|
\ Part 2 of the JonesForth tutorial.
\ This one is added word-by-word as they are succesfully executed
\ Define / and MOD in terms of /MOD
: / /MOD SWAP DROP ;
: MOD /MOD DROP ;
\ Some char constant
: '\n' 10 ;
: BL 32 ; \ BL (blank) is standard FORTH word for space.
: CR '\n' EMIT ;
: SPACE BL EMIT ;
: NEGATE 0 SWAP - ;
: TRUE 1 ;
: FALSE 0 ;
: NOT 0= ;
\ LITERAL takes whatever on the stack and compiles LIT <foo>
: LITERAL IMMEDIATE
' LIT ,
,
;
: ':'
[
CHAR :
]
LITERAL
;
: ';' [ CHAR ; ] LITERAL ;
: '(' [ CHAR ( ] LITERAL ;
: ')' [ CHAR ) ] LITERAL ;
: '"' [ CHAR " ] LITERAL ;
: 'A' [ CHAR A ] LITERAL ;
: '0' [ CHAR 0 ] LITERAL ;
: '-' [ CHAR - ] LITERAL ;
: '.' [ CHAR . ] LITERAL ;
: [COMPILE] IMMEDIATE
WORD
FIND
>CFA
,
;
: RECURSE IMMEDIATE
LATEST @
>CFA
,
;
\ Conditionals Statements
: IF IMMEDIATE
' 0BRANCH ,
HERE @
0 ,
;
: THEN IMMEDIATE
DUP
HERE @ SWAP -
SWAP !
;
: ELSE IMMEDIATE
' BRANCH ,
HERE @
0 ,
SWAP
DUP
HERE @ SWAP -
SWAP !
;
: UNLESS IMMEDIATE
' NOT ,
[COMPILE] IF
;
\ Loop Construct
: BEGIN IMMEDIATE
HERE @
;
: UNTIL IMMEDIATE
' 0BRANCH ,
HERE @ -
,
;
: AGAIN IMMEDIATE
' BRANCH ,
HERE @ -
,
;
: WHILE IMMEDIATE
' 0BRANCH ,
HERE @
0 ,
;
: REPEAT IMMEDIATE
' BRANCH ,
SWAP
HERE @ - ,
DUP
HERE @ SWAP -
SWAP !
;
\ Comments
: ( IMMEDIATE
1
BEGIN
KEY
DUP '(' = IF
DROP
1+
ELSE
')' = IF
1-
THEN
THEN
DUP 0= UNTIL
DROP
;
( Now we can nest ( ... ) as much as we want )
\ Stack Manipulation
: NIP ( x y -- y ) SWAP DROP ;
: TUCK ( x y -- y x y ) SWAP OVER ;
: PICK ( x_u ... x_1 x_0 u -- x_u ... x_1 x_0 x_u )
1+
8 * ( multiply by the word size )
DSP@ +
@
;
\ Writes N spaces to stdout
: SPACES ( n -- )
BEGIN
DUP 0>
WHILE
SPACE
1-
REPEAT
DROP
;
\ EXTRA: Writes N zeroes to stdout
: ZEROES ( n -- )
BEGIN
DUP 0>
WHILE
'0' EMIT
1-
REPEAT
DROP
;
\ Standard word for manipulating BASE.
: DECIMAL ( -- ) 10 BASE ! ;
: HEX ( -- ) 16 BASE ! ;
( Printing Numbers )
: U. ( u -- )
BASE @ /MOD
?DUP IF ( if quotient <> 0 then )
RECURSE ( print the quotient )
THEN
( print the remainder )
DUP 10 < IF
'0'
ELSE
10 -
'A'
THEN
+
EMIT
;
( Printing the content of the stack )
: .S ( -- )
DSP@
BEGIN
DUP S0 @ <
WHILE
DUP @ U.
SPACE
8+
REPEAT
DROP
;
( Returns the width of an unsigned number (in characters) in the current base )
: UWIDTH
BASE @ /
?DUP IF
RECURSE 1+
ELSE
1
THEN
;
: U.R ( u width -- )
SWAP
DUP
UWIDTH
ROT
SWAP -
SPACES
U.
;
\ EXTRA, print zeroes padded unsigned number
: ZU.R ( u width -- )
SWAP
DUP
UWIDTH
ROT
SWAP -
ZEROES
U.
;
: .R ( n width -- )
SWAP ( width n )
DUP 0< IF
NEGATE ( width u )
1 ( save flag to remember that it was negative | width u 1 )
SWAP ( width 1 u )
ROT ( 1 u width )
1- ( 1 u width-1 )
ELSE
0 ( width u 0 )
SWAP ( width 0 u )
ROT ( 0 u width )
THEN
SWAP ( flag width u )
DUP ( flag width u u )
UWIDTH ( flag width u uwidth )
ROT ( flag u uwidth width )
SWAP - ( flag u width-uwidth )
SPACES ( flag u )
SWAP ( u flag )
IF
'-' EMIT
THEN
U.
;
( Finally )
: . 0 .R SPACE ;
( The real U. )
: U. U. SPACE ;
: ? ( addr -- ) @ . ;
: WITHIN ( c a b - f )
-ROT ( b c a )
OVER ( b c a c )
<= IF
> IF ( b c )
TRUE
ELSE
FALSE
THEN
ELSE
2DROP
FALSE
THEN
;
: DEPTH ( -- n )
S0 @ DSP@ -
8-
;
: ALIGNED ( addr -- addr )
7 + 7 INVERT AND
;
: ALIGN HERE @ ALIGNED HERE ! ;
: C,
HERE @ C!
1 HERE +!
;
: S" IMMEDIATE ( -- addr len )
STATE @ IF
' LITSTRING ,
HERE @
0 ,
BEGIN
KEY
DUP '"' <>
WHILE
C,
REPEAT
DROP
DUP
HERE @ SWAP -
8-
SWAP !
ALIGN
ELSE
HERE @
BEGIN
KEY
DUP '"' <>
WHILE
OVER C!
1+
REPEAT
DROP
HERE @ -
HERE @
SWAP
THEN
;
: ." IMMEDIATE
STATE @ IF
[COMPILE] S"
' TELL ,
ELSE
BEGIN
KEY
DUP '"' = IF
DROP
EXIT
THEN
EMIT
AGAIN
THEN
;
( Constant and Variables )
: CONSTANT
WORD
CREATE
DOCOL ,
' LIT ,
,
' EXIT ,
;
: ALLOT ( n -- addr )
HERE @ SWAP
HERE +!
;
: CELLS ( n -- n ) 8 * ;
: VARIABLE
1 CELLS ALLOT
WORD CREATE
DOCOL ,
' LIT ,
,
' EXIT ,
;
: VALUE ( n -- )
WORD CREATE
DOCOL ,
' LIT ,
,
' EXIT ,
;
: TO IMMEDIATE ( n -- )
WORD
FIND
>DFA
8+
STATE @ IF
' LIT ,
,
' ! ,
ELSE
!
THEN
;
: +TO IMMEDIATE
WORD
FIND
>DFA
8+
STATE @ IF
' LIT ,
,
' +! ,
ELSE
+!
THEN
;
: ID. ( addr -- )
8+
DUP C@
F_LENMASK AND
BEGIN
DUP 0>
WHILE
SWAP 1+
DUP C@
EMIT
SWAP 1-
REPEAT
2DROP ( len addr -- )
;
: ?HIDDEN
8+
C@
F_HIDDEN AND
;
: ?IMMEDIATE
8+
C@
F_IMMED AND
;
: WORDS
LATEST @
BEGIN
?DUP
WHILE
DUP ?HIDDEN NOT IF
DUP ID.
SPACE
THEN
@
REPEAT
CR
;
: FORGET
WORD FIND
DUP @ LATEST !
HERE !
;
: DUMP ( addr len -- )
BASE @ -ROT
HEX
BEGIN
?DUP ( while len > 0 )
WHILE
OVER 8 ZU.R ( print the address )
SPACE
( print up to 16 words on this line )
2DUP ( addr len addr len )
1- 15 AND 1+ ( addr len addr linelen )
BEGIN
?DUP ( while linelen > 0 )
WHILE
SWAP ( addr len linelen addr )
DUP C@ ( addr len linelen addr byte )
2 ZU.R SPACE ( print the byte )
1+ SWAP 1- ( addr len linelen addr -- addr len addr+1 linelen-1 )
REPEAT
DROP ( addr len )
( print the ASCII equivalents )
2DUP 1- 15 AND 1+ ( addr len addr linelen )
BEGIN
?DUP
WHILE
SWAP ( addr len linelen addr )
DUP C@ ( addr len linelen addr byte )
DUP 32 128 WITHIN IF ( 32 <= c < 128? )
EMIT
ELSE
DROP '.' EMIT
THEN
1+ SWAP 1-
REPEAT
DROP
CR
DUP 1- 15 AND 1+
TUCK
-
>R + R>
REPEAT
DROP
BASE !
;
: CASE IMMEDIATE
0
;
: OF IMMEDIATE
' OVER ,
' = ,
[COMPILE] IF
' DROP ,
;
: ENDOF IMMEDIATE
[COMPILE] ELSE
;
: ENDCASE IMMEDIATE
' DROP ,
BEGIN
?DUP
WHILE
[COMPILE] THEN
REPEAT
;
: CFA>
LATEST @
BEGIN
?DUP
WHILE
2DUP SWAP
< IF
NIP
EXIT
THEN
@
REPEAT
DROP
0
;
: SEE
WORD FIND
HERE @
LATEST @
BEGIN
2 PICK
OVER
<>
WHILE
NIP
DUP @
REPEAT
DROP
SWAP
':' EMIT SPACE DUP ID. SPACE
DUP ?IMMEDIATE IF ." IMMEDIATE " THEN
>DFA
BEGIN ( end start )
2DUP >
WHILE
DUP @ ( end start codeword )
CASE
' LIT OF ( is it LIT ? )
8 + DUP @ ( get next word )
. ( and print it )
ENDOF
' LITSTRING OF
[ CHAR S ] LITERAL EMIT '"' EMIT SPACE ( print S"<space> )
8 + DUP @ ( get the length )
SWAP 8 + SWAP ( end start+8 length )
2DUP TELL ( print the string )
'"' EMIT SPACE
+ ALIGNED ( end start+8+len, aligned )
8 - ( because we're about to add 8 below )
ENDOF
' 0BRANCH OF
." 0BRANCH ( "
8 + DUP @
.
." ) "
ENDOF
' BRANCH OF
." BRANCH ( "
8 + DUP @
.
." ) "
ENDOF
' ' OF
[ CHAR ' ] LITERAL EMIT SPACE
8 + DUP @
CFA>
ID. SPACE
ENDOF
' EXIT OF
2DUP
8 +
<> IF
." EXIT "
THEN
ENDOF
DUP
CFA>
ID. SPACE
ENDCASE
8 +
REPEAT
';' EMIT CR
2DROP
;
: :NONAME
0 0 CREATE
HERE @
DOCOL ,
]
;
: ['] IMMEDIATE
' LIT ,
;
( Exception )
: EXCEPTION-MARKER
RDROP
0
;
: CATCH
DSP@ 8+ >R
' EXCEPTION-MARKER 8+
>R
EXECUTE
;
: THROW
?DUP IF
RSP@
BEGIN
DUP R0 8- <
WHILE
DUP @
' EXCEPTION-MARKER 8+ = IF
8+
RSP!
DUP DUP DUP
R>
8-
SWAP OVER
!
DSP! EXIT
THEN
8+
REPEAT
DROP
CASE
0 1- OF ( ABORT )
." ABORTED" CR
ENDOF
." UNCAUGHT THROW "
DUP . CR
ENDCASE
QUIT
THEN
;
: ABORT
0 1- THROW
;
: PRINT-STACK-TACE
RSP@
BEGIN
DUP R0 8- <
WHILE
DUP @
CASE
' EXCEPTION-MARKER 8+ OF
." CATCH ( DSP="
8+ DUP @ U.
." ) "
ENDOF
DUP
CFA>
?DUP IF
2DUP
ID.
[ CHAR + ] LITERAL EMIT
SWAP >DFA 8+ - .
THEN
ENDCASE
8+
REPEAT
DROP
CR
;
( C String )
: Z" IMMEDIATE
STATE @ IF
' LITSTRING ,
HERE @
0 ,
BEGIN
KEY
DUP '"' <>
WHILE
HERE @ C!
1 HERE +!
REPEAT
0 HERE @ C!
1 HERE +!
DROP
DUP
HERE @ SWAP -
8-
SWAP !
ALIGN
' DROP ,
ELSE
HERE @
BEGIN
KEY
DUP '"' <>
WHILE
OVER C!
1+
REPEAT
DROP
0 SWAP C!
HERE @
THEN
;
: STRLEN
DUP
BEGIN
DUP C@ 0<>
WHILE
1+
REPEAT
SWAP -
;
: CSTRING
SWAP OVER
HERE @ SWAP
CMOVE
HERE @ +
0 SWAP C!
HERE @
;
( The Environment )
: ARGC S0 @ @ ;
: ARGV ( n -- str u )
1+ CELLS S0 @ +
@
DUP STRLEN
;
: ENVIRON
ARGC
2 +
CELLS
S0 @ +
;
: BYE 0 SYS_EXIT SYSCALL1 ;
: GET-BRK ( -- brkpoint ) 0 SYS_BRK SYSCALL1 ;
: UNUSED ( -- n ) GET-BRK HERE @ - 8 / ;
: BRK ( brkpoint -- ) SYS_BRK SYSCALL1 ;
: MORECORE ( cells -- ) CELLS GET-BRK + BRK ;
: R/O ( -- fam ) O_RDONLY ;
: R/W ( -- fam ) O_RDWR ;
: OPEN-FILE ( addr u fam -- fd 0 (if successful) | c-addr u fam -- fd errno (if there was an error) )
-ROT
CSTRING
SYS_OPEN SYSCALL2
DUP
DUP 0< IF
NEGATE
ELSE
DROP 0
THEN
;
: CREATE-FILE ( similar to OPEN-FILE )
O_CREAT OR
O_TRUNC OR
-ROT
CSTRING
420 -ROT
SYS_OPEN SYSCALL3
DUP
DUP 0< IF
NEGATE
ELSE
DROP 0
THEN
;
: CLOSE-FILE
SYS_CLOSE SYSCALL1
NEGATE
;
: READ-FILE
>R SWAP R>
SYS_READ SYSCALL3
DUP
DUP 0< IF
NEGATE
ELSE
DROP 0
THEN
;
: PERROR
TELL
':' EMIT SPACE
." ERRNO="
. CR
;
( TODO: translate jonesforth x86 assembler into x64 )
: WELCOME
S" TEST-MODE" FIND NOT IF
." Jombloforth version " VERSION . CR
UNUSED . ." cells remaining" CR
." ok " CR
THEN
;
WELCOME
HIDE WELCOME
|
{"hexsha": "a0d47dbed10896d4f8a9822e7097e3e1f7f1ecd4", "size": 12525, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "jombloforth.f", "max_stars_repo_name": "matematikaadit/JombloForth", "max_stars_repo_head_hexsha": "cf61a32e0679a6070b73f6db24bba1a0f9bb920c", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-03-08T08:22:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-12T13:14:11.000Z", "max_issues_repo_path": "jombloforth.f", "max_issues_repo_name": "ammarfaizi2/jombloforth", "max_issues_repo_head_hexsha": "cf61a32e0679a6070b73f6db24bba1a0f9bb920c", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-03T20:37:51.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-04T09:15:17.000Z", "max_forks_repo_path": "jombloforth.f", "max_forks_repo_name": "matematikaadit/JombloForth", "max_forks_repo_head_hexsha": "cf61a32e0679a6070b73f6db24bba1a0f9bb920c", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-25T22:06:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-09T13:59:04.000Z", "avg_line_length": 14.5639534884, "max_line_length": 101, "alphanum_fraction": 0.4066267465, "num_tokens": 3880}
|
from cctpy.baseutils import Vectors, Equal
import unittest
import numpy as np
from cctpy.abstract_classes import LocalCoordinateSystem
class LocalCoordinateSystemTest(unittest.TestCase):
def test_point_to_local_coordinate(self):
for i in range(10):
o = np.random.rand(3) * (i + 1)
main = np.random.rand(3) * (i + 1)
temp = np.random.rand(3) * (i + 1)
second = np.cross(main, temp)
lc = LocalCoordinateSystem(o, main, second)
p = np.random.rand(3) * (i + 1)
op = p - o
zi = Vectors.normalize_self(main.copy())
xi = Vectors.normalize_self(second.copy())
yi = np.cross(zi, xi)
x = np.inner(op, xi)
y = np.inner(op, yi)
z = np.inner(op, zi)
self.assertTrue(Equal.equal_vector(
lc.point_to_local_coordinate(p),
np.array([x, y, z])
))
def test_global_coordinate_system(self):
"""
测试全局坐标系
Returns None
-------
"""
gcs = LocalCoordinateSystem.global_coordinate_system()
for i in range(10):
v0 = np.random.rand(3)
v = gcs.point_to_local_coordinate(v0)
self.assertTrue(Equal.equal_vector(v0, v))
def test_local_to_global_point(self):
i = 0
while i < 10:
ol = np.random.randn(3) # 任意坐标系的原点
xl = np.random.randn(3) # 任意坐标系的 x 方向
yl = np.random.randn(3) # 任意坐标系的 y 方向
xl = Vectors.normalize_self(xl)
yl = Vectors.normalize_self(yl)
if Equal.equal_vector(xl, yl) or Equal.equal_vector(xl + yl, np.zeros(3)):
# 如果 xl 和 yl 平行,则重新生成
i -= 1
continue
zl = np.cross(xl, yl)
zl = Vectors.normalize_self(zl)
yl = -np.cross(xl, zl) # 正交化
lcs = LocalCoordinateSystem(ol, zl, xl)
self.assertTrue(Equal.equal_vector(yl, lcs.YI))
for ignore in range(10):
pl = np.random.randn(3) # 局部坐标系中任意一点
pg = lcs.point_to_global_coordinate(pl) # 转全局
pl_1 = lcs.point_to_local_coordinate(pg) # 转回来
self.assertTrue(Equal.equal_vector(pl, pl_1))
i += 1
def test_local_to_global_line(self):
i = 0
while i < 10:
ol = np.random.randn(3) # 任意坐标系的原点
xl = np.random.randn(3) # 任意坐标系的 x 方向
yl = np.random.randn(3) # 任意坐标系的 y 方向
xl = Vectors.normalize_self(xl)
yl = Vectors.normalize_self(yl)
if Equal.equal_vector(xl, yl) or Equal.equal_vector(xl + yl, np.zeros(3)):
# 如果 xl 和 yl 平行,则重新生成
i -= 1
continue
zl = np.cross(xl, yl)
zl = Vectors.normalize_self(zl)
yl = -np.cross(xl, zl) # 正交化
lcs = LocalCoordinateSystem(ol, zl, xl)
self.assertTrue(Equal.equal_vector(yl, lcs.YI))
for ignore in range(10):
pl_start = np.random.randn(3) # 局部坐标系中任意起点
pl_end = np.random.randn(3) # 局部坐标系中任意终点
line_l = np.linspace(pl_start, pl_end, 50)
line_g = lcs.line_to_global_coordinate(line_l) # 转全局
line_l_1 = lcs.line_to_local_coordinate(line_g) # 转回来
self.assertTrue(Equal.equal_vector(line_l, line_l_1))
i += 1
if __name__ == '__main__':
unittest.main(verbosity=2)
|
{"hexsha": "d4c6cda8430deca9c89570775591949473451403", "size": 3586, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/tests/local_coordinate_system_test.py", "max_stars_repo_name": "madokast/cctpy", "max_stars_repo_head_hexsha": "b02c64220ea533a4fc9cad0b882d1be6edadf1c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-27T13:20:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T13:20:43.000Z", "max_issues_repo_path": "codes/tests/local_coordinate_system_test.py", "max_issues_repo_name": "madokast/cctpy", "max_issues_repo_head_hexsha": "b02c64220ea533a4fc9cad0b882d1be6edadf1c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/tests/local_coordinate_system_test.py", "max_forks_repo_name": "madokast/cctpy", "max_forks_repo_head_hexsha": "b02c64220ea533a4fc9cad0b882d1be6edadf1c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9193548387, "max_line_length": 86, "alphanum_fraction": 0.5281650864, "include": true, "reason": "import numpy", "num_tokens": 1023}
|
"""
Factor Analysis-regularized logistic regression.
Is `linear_layer` necessary?
"""
__date__ = "June - December 2021"
import numpy as np
import os
from sklearn.base import BaseEstimator
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
import torch
from torch.distributions import Categorical, Normal, kl_divergence
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader, WeightedRandomSampler
import warnings
from ..utils.utils import get_weights
# https://stackoverflow.com/questions/53014306/
if float(torch.__version__[:3]) >= 1.9:
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
FLOAT = torch.float32
INT = torch.int64
MAX_LABEL = 1000
EPSILON = 1e-6
FIT_ATTRIBUTES = ['classes_']
class FaSae(torch.nn.Module, BaseEstimator):
def __init__(self, reg_strength=1.0, z_dim=32, weight_reg=0.0,
nonnegative=True, variational=False, kl_factor=1.0, n_iter=50000,
lr=1e-3, batch_size=256, beta=0.5, device='auto'):
"""
A supervised autoencoder with nonnegative and variational options.
Notes
-----
* The `labels` argument to `fit` and `score` is a bit hacky so that the
model can work nicely with the sklearn model selection tools. The
labels should be an array of integers with `label // 1000` encoding
the individual and `label % 1000` encoding the behavioral label.
Parameters
----------
reg_strength : float, optional
This controls how much the classifier is regularized. This should
be positive, and larger values indicate more regularization.
z_dim : int, optional
Latent dimension/number of networks.
weight_reg : float, optional
Model L2 weight regularization.
nonnegative : bool, optional
Use nonnegative factorization.
variational : bool, optional
Whether a variational autoencoder is used.
kl_factor : float, optional
How much to weight the KL divergence term in the variational
autoencoder (VAE). The standard setting is `1.0`. This is a distinct
regularization parameter from `reg_strength` that can be
independently set. This parameter is only used if `variational` is
`True`.
n_iter : int, optional
Number of gradient steps during training.
lr : float, optional
Learning rate.
batch_size : int, optional
Minibatch size
"""
super(FaSae, self).__init__()
assert kl_factor >= 0.0, f"{kl_factor} < 0"
# Set parameters.
assert isinstance(reg_strength, (int, float))
assert reg_strength >= 0.0
self.reg_strength = float(reg_strength)
assert isinstance(z_dim, int)
assert z_dim >= 1
self.z_dim = z_dim
assert isinstance(weight_reg, (int, float))
assert weight_reg >= 0.0
self.weight_reg = float(weight_reg)
assert isinstance(nonnegative, bool)
self.nonnegative = nonnegative
assert isinstance(variational, bool)
self.variational = variational
assert isinstance(kl_factor, (int, float))
assert kl_factor >= 0.0
self.kl_factor = float(kl_factor)
assert isinstance(n_iter, int)
assert n_iter > 0
self.n_iter = n_iter
assert isinstance(lr, (int, float))
assert lr > 0.0
self.lr = float(lr)
assert isinstance(batch_size, int)
assert batch_size > 0
self.batch_size = batch_size
assert isinstance(beta, (int, float))
assert beta >= 0.0 and beta <= 1.0
self.beta = float(beta)
if device == 'auto':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.classes_ = None
def _initialize(self, n_features):
"""Initialize parameters of the networks before training."""
# Check arguments.
n_classes = len(self.classes_)
assert n_classes <= self.z_dim, f"{n_classes} > {self.z_dim}"
if self.nonnegative and self.weight_reg > 0.0:
self.weight_reg = 0.0
warnings.warn(
f"Weight regularization should be 0.0 "
f"for nonnegative factorization"
)
# Make the networks.
self.recognition_model = torch.nn.Linear(n_features, self.z_dim)
self.rec_model_1 = torch.nn.Linear(n_features, self.z_dim)
self.rec_model_2 = torch.nn.Linear(n_features, self.z_dim)
self.linear_layer = torch.nn.Linear(self.z_dim, self.z_dim)
prior_mean = torch.zeros(self.z_dim).to(self.device)
prior_std = torch.ones(self.z_dim).to(self.device)
self.prior = Normal(prior_mean, prior_std)
self.model = torch.nn.Linear(self.z_dim, n_features)
self.logit_bias = torch.nn.Parameter(torch.zeros(1,n_classes))
self.to(self.device)
def fit(self, features, labels, print_freq=500):
"""
Train the model on the given dataset.
Parameters
----------
features : numpy.ndarray
Shape: [n_data, n_features]
labels : numpy.ndarray
Shape: [n_data]
n_iter : int, optional
Number of training epochs.
lr : float, optional
Learning rate.
batch_size : int, optional
verbose : bool, optional
print_freq : None or int, optional
"""
# Check arguments.
features, labels = check_X_y(features, labels)
# Derive groups, labels, and weights from labels.
groups, labels, weights = _derive_groups(labels)
self.classes_, labels = np.unique(labels, return_inverse=True)
if features.shape[0] != labels.shape[0]:
raise ValueError(f"{features.shape}[0] != {labels.shape}[0]")
if len(features.shape) != 2:
raise ValueError(f"len({features.shape}) != 2")
if len(labels.shape) != 1:
raise ValueError(f"len({labels.shape}) != 1")
self._initialize(features.shape[1])
# NumPy arrays to PyTorch tensors.
features = torch.tensor(features, dtype=FLOAT).to(self.device)
labels = torch.tensor(labels, dtype=INT).to(self.device)
weights = torch.tensor(weights, dtype=FLOAT).to(self.device)
sampler_weights = torch.pow(weights, 1.0 - self.beta)
weights = torch.pow(weights, self.beta)
# Make some loaders and an optimizer.
dset = TensorDataset(features, labels, weights)
sampler = WeightedRandomSampler(
sampler_weights,
num_samples=self.batch_size,
replacement=True,
)
loader = DataLoader(
dset,
sampler=sampler,
batch_size=self.batch_size,
)
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
# Train.
for epoch in range(1,self.n_iter+1):
epoch_loss = 0.0
for batch in loader:
self.zero_grad()
loss = self(*batch)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
if print_freq is not None and epoch % print_freq == 0:
print(f"iter {epoch:04d}, loss: {loss:3f}")
return self
def forward(self, features, labels, weights):
"""
Calculate a loss for the features and labels.
Parameters
----------
features : torch.Tensor
Shape: [batch,n_features]
labels : torch.Tensor
Shape: [batch]
weights : None or torch.Tensor
Shape: [batch]
Returns
-------
loss : torch.Tensor
Shape: []
"""
if self.variational:
# Feed through the recognition network to get latents.
z_mus = self.rec_model_1(features)
z_log_stds = self.rec_model_2(features)
# Make the variational posterior and get a KL from the prior.
dist = Normal(z_mus, EPSILON + z_log_stds.exp())
kld = kl_divergence(dist, self.prior).sum(dim=1) # [b]
# Sample.
zs = dist.rsample() # [b,z]
# Project.
zs = self.linear_layer(zs)
else: # deterministic autoencoder
# Feed through the recognition network to get latents.
zs = self.recognition_model(features)
# Reconstruct the features.
if self.nonnegative:
A = F.softplus(self.model.weight)
features_rec = A.unsqueeze(0) @ F.softplus(zs).unsqueeze(-1)
features_rec = features_rec.squeeze(-1)
else:
A = self.model.weight
features_rec = self.model(zs)
# Calculate a reconstruction loss.
rec_loss = torch.mean((features - features_rec).pow(2), dim=1) # [b]
rec_loss = self.reg_strength * rec_loss
# Predict the labels.
logits = zs[:,:len(self.classes_)-1]
ones = torch.ones(
logits.shape[0],
1,
dtype=logits.dtype,
device=logits.device,
)
logits = torch.cat([logits, ones], dim=1) + self.logit_bias
log_probs = Categorical(logits=logits).log_prob(labels) # [b]
# Weight label log likes by class weights.
if weights is not None:
assert weights.shape == labels.shape
log_probs = weights * log_probs
# Regularize the model weights.
l2_loss = self.weight_reg * torch.norm(A)
# Combine all the terms into a composite loss.
loss = rec_loss - log_probs
if self.variational:
loss = loss + self.kl_factor * kld
loss = torch.mean(loss) + l2_loss
return loss
@torch.no_grad()
def predict_proba(self, features, to_numpy=True, stochastic=False):
"""
Probability estimates.
Note
----
* This should be consistent with `self.forward`.
Parameters
----------
features : numpy.ndarray
Shape: [batch, n_features]
to_numpy : bool, optional
stochastic : bool, optional
Returns
-------
probs : numpy.ndarray
Shape: [batch, n_classes]
"""
if self.variational:
# Feed through the recognition network to get latents.
z_mus = self.rec_model_1(features)
z_log_stds = self.rec_model_2(features)
if stochastic:
# Make the variational posterior and sample.
dist = Normal(z_mus, EPSILON + z_log_stds.exp())
zs = dist.rsample() # [b,z]
else:
zs = z_mus
# Project.
zs = self.linear_layer(zs)
else: # deterministic autoencoder
# Feed through the recognition network to get latents.
zs = self.recognition_model(features)
# Get class predictions.
logits = zs[:,:len(self.classes_)-1]
ones = torch.ones(
logits.shape[0],
1,
dtype=logits.dtype,
device=logits.device,
)
logits = torch.cat([logits, ones], dim=1) + self.logit_bias
probs = F.softmax(logits, dim=1) # [b, n_classes]
if to_numpy:
return probs.cpu().numpy()
return probs
@torch.no_grad()
def predict(self, X):
"""
Predict class labels for the features.
Parameters
----------
X : numpy.ndarray
Features
Shape: [batch, n_features]
Returns
-------
predictions : numpy.ndarray
Shape: [batch]
"""
# Checks
check_is_fitted(self, attributes=FIT_ATTRIBUTES)
X = check_array(X)
# Feed through model.
X = torch.tensor(X, dtype=FLOAT).to(self.device)
probs = self.predict_proba(X, to_numpy=False)
predictions = torch.argmax(probs, dim=1)
return self.classes_[predictions.cpu().numpy()]
@torch.no_grad()
def score(self, features, labels):
"""
Get a class weighted accuracy.
This is the objective we really care about, which doesn't contain the
regularization in FA's `forward` method.
Parameters
----------
features : numpy.ndarray
Shape: [n_datapoints, n_features]
labels : numpy.ndarray
Shape: [n_datapoints]
weights : None or numpy.ndarray
Shape: [n_datapoints]
Return
------
weighted_acc : float
"""
# Derive groups, labels, and weights from labels.
groups, labels, weights = _derive_groups(labels)
predictions = self.predict(features)
scores = np.zeros(len(features))
scores[predictions == labels] = 1.0
scores = scores * weights
weighted_acc = np.mean(scores)
return weighted_acc
def get_params(self, deep=True):
"""Get parameters for this estimator."""
params = {
'reg_strength': self.reg_strength,
'z_dim': self.z_dim,
'weight_reg': self.weight_reg,
'nonnegative': self.nonnegative,
'variational': self.variational,
'kl_factor': self.kl_factor,
'n_iter': self.n_iter,
'lr': self.lr,
'batch_size': self.batch_size,
'beta': self.beta,
'device': self.device,
'classes_': self.classes_,
}
if deep:
params['model_state_dict'] = self.state_dict()
return params
def set_params(self, reg_strength=None, z_dim=None, weight_reg=None,
nonnegative=None, variational=None, kl_factor=None, n_iter=None,
lr=None, batch_size=None, beta=None, device=None, classes_=None,
model_state_dict=None):
"""
Set the parameters of this estimator.
Parameters
----------
...
"""
if reg_strength is not None:
self.reg_strength = reg_strength
if z_dim is not None:
self.z_dim = z_dim
if weight_reg is not None:
self.weight_reg = weight_reg
if nonnegative is not None:
self.nonnegative = nonnegative
if variational is not None:
self.variational = variational
if kl_factor is not None:
self.kl_factor = kl_factor
if n_iter is not None:
self.n_iter = n_iter
if lr is not None:
self.lr = lr
if batch_size is not None:
self.batch_size = batch_size
if beta is not None:
self.beta = beta
if device is not None:
self.device = device
if classes_ is not None:
self.classes_ = classes_
if model_state_dict is not None:
assert 'model.bias' in model_state_dict, \
f"'model.bias' not in {list(model_state_dict.keys())}"
n_features = len(model_state_dict['model.bias'].view(-1))
self._initialize(n_features)
self.load_state_dict(model_state_dict)
return self
def save_state(self, fn):
"""Save parameters for this estimator."""
np.save(fn, self.get_params(deep=True))
def load_state(self, fn):
"""Load and set the parameters for this estimator."""
self.set_params(**np.load(fn, allow_pickle=True).item())
@torch.no_grad()
def get_factor(self, factor_num=0):
"""
Get a linear factor.
Parameters
----------
feature_num : int
Which factor to return. 0 <= `factor_num` < self.z_dim
"""
check_is_fitted(self, attributes=FIT_ATTRIBUTES)
assert isinstance(factor_num, int)
assert factor_num >= 0 and factor_num < self.z_dim
A = self.model.weight[:,factor_num]
if self.nonnegative:
A = F.softplus(A)
return A.detach().cpu().numpy()
def _derive_groups(labels):
groups = np.array([label // MAX_LABEL for label in labels])
labels = np.array([label % MAX_LABEL for label in labels])
weights = get_weights(labels, groups)
return groups, labels, weights
if __name__ == '__main__':
"""
Here's an example using some fake data.
NOTE: fix this!
"""
raise NotImplementedError
n = 100 # number of datapoints/windows
n_features = 100 # total number of LFP features
n_classes = 3 # number of label types
# Make some fake data.
features = np.random.randn(n, n_features)
labels = np.random.randint(n_classes, size=n)
# Calculate class weights.
class_counts = [len(np.argwhere(labels==i)) for i in range(n_classes)]
print("Class counts:", class_counts)
class_weights = n / (n_classes * np.array(class_counts))
print("Class weights:", class_weights)
# Make the model.
model = FaSae(
n_features,
n_classes,
class_weights=class_weights,
weight_reg=0.0,
nonnegative=True,
variational=True,
kl_factor=0.1,
)
# Fit the model.
print("Training model...")
model.fit(features, labels, epochs=5000, print_freq=250)
# Make some predictions.
print("Making predictions...")
predictions = model.predict(features)
print("Predictions:")
print(predictions)
# Calculate a weighted accuracy.
weighted_acc = model.score(
features,
labels,
class_weights,
)
print("Weighted accuracy on training set:", weighted_acc)
# Get state.
params = model.get_params()
# Make a new model and load the state.
new_model = FaSae(n_features, n_classes)
new_model.set_params(params)
# Calculate a weighted accuracy.
weighted_acc = new_model.score(
features,
labels,
class_weights,
)
print("This should be the same number:", weighted_acc)
###
|
{"hexsha": "a43df95d9cd6c1d00d57c3f5dc494b56d8e5cdb2", "size": 18316, "ext": "py", "lang": "Python", "max_stars_repo_path": "lpne/models/factor_analysis_sae.py", "max_stars_repo_name": "carlson-lab/lpne", "max_stars_repo_head_hexsha": "b45087506669216d885b82ce11900c2ab006d456", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-30T14:21:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:21:50.000Z", "max_issues_repo_path": "lpne/models/factor_analysis_sae.py", "max_issues_repo_name": "carlson-lab/lpne", "max_issues_repo_head_hexsha": "b45087506669216d885b82ce11900c2ab006d456", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lpne/models/factor_analysis_sae.py", "max_forks_repo_name": "carlson-lab/lpne", "max_forks_repo_head_hexsha": "b45087506669216d885b82ce11900c2ab006d456", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3624772313, "max_line_length": 80, "alphanum_fraction": 0.5850622407, "include": true, "reason": "import numpy", "num_tokens": 4050}
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.pyqtgraph.flowchart.Node import Node
from acq4.util import Qt
import numpy as np
import acq4.util.metaarray as metaarray
from acq4.pyqtgraph.flowchart.library.common import *
import acq4.util.functions as functions
class ExpDeconvolve(CtrlNode):
"""Exponential deconvolution filter."""
nodeName = 'ExpDeconvolve'
uiTemplate = [
('tau', 'spin', {'value': 10e-3, 'step': 1, 'minStep': 100e-6, 'dec': True, 'bounds': [0.0, None], 'suffix': 's', 'siPrefix': True})
]
def processData(self, data):
tau = self.ctrls['tau'].value()
return functions.expDeconvolve(data, tau)
#dt = 1
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#dt = data.xvals(0)[1] - data.xvals(0)[0]
#d = data[:-1] + (self.ctrls['tau'].value() / dt) * (data[1:] - data[:-1])
#if (hasattr(data, 'implements') and data.implements('MetaArray')):
#info = data.infoCopy()
#if 'values' in info[0]:
#info[0]['values'] = info[0]['values'][:-1]
#return MetaArray(d, info=info)
#else:
#return d
class ExpReconvolve(CtrlNode):
"""Exponential reconvolution filter. Only works with MetaArrays that were previously deconvolved."""
nodeName = 'ExpReconvolve'
#uiTemplate = [
#('tau', 'spin', {'value': 10e-3, 'step': 1, 'minStep': 100e-6, 'dec': True, 'bounds': [0.0, None], 'suffix': 's', 'siPrefix': True})
#]
def processData(self, data):
return functions.expReconvolve(data)
class Tauiness(CtrlNode):
"""Sliding-window exponential fit"""
nodeName = 'Tauiness'
uiTemplate = [
('window', 'intSpin', {'value': 100, 'min': 3, 'max': 1000000}),
('skip', 'intSpin', {'value': 10, 'min': 0, 'max': 10000000})
]
def processData(self, data):
return functions.tauiness(data, self.ctrls['window'].value(), self.ctrls['skip'].value())
|
{"hexsha": "d578dafaad4ce032a73e6b5260d60d08dd43179e", "size": 2044, "ext": "py", "lang": "Python", "max_stars_repo_path": "acq4/util/flowchart/Filters.py", "max_stars_repo_name": "aleonlein/acq4", "max_stars_repo_head_hexsha": "4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-04T17:04:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-04T17:04:53.000Z", "max_issues_repo_path": "acq4/util/flowchart/Filters.py", "max_issues_repo_name": "aleonlein/acq4", "max_issues_repo_head_hexsha": "4b1fcb9ad2c5e8d4595a2b9cf99d50ece0c0f555", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2016-09-27T17:25:24.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-02T21:00:11.000Z", "max_forks_repo_path": "acq4/util/flowchart/Filters.py", "max_forks_repo_name": "sensapex/acq4", "max_forks_repo_head_hexsha": "9561ba73caff42c609bd02270527858433862ad8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-10-19T06:39:36.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-30T21:06:45.000Z", "avg_line_length": 37.1636363636, "max_line_length": 141, "alphanum_fraction": 0.5914872798, "include": true, "reason": "import numpy", "num_tokens": 596}
|
import numpy as np
def cummin(x):
"""A python implementation of the cummin function in R"""
for i in range(1, len(x)):
if x[i-1] < x[i]:
x[i] = x[i-1]
return x
def bh_fdr(pval):
"""A python implementation of the Benjamani-Hochberg FDR method.
This code should always give precisely the same answer as using
p.adjust(pval, method="BH") in R.
Parameters
----------
pval : list or array
list/array of p-values
Returns
-------
pval_adj : np.array
adjusted p-values according the benjamani-hochberg method
"""
pval_array = np.array(pval)
sorted_order = np.argsort(pval_array)
original_order = np.argsort(sorted_order)
pval_array = pval_array[sorted_order]
# calculate the needed alpha
n = float(len(pval))
pval_adj = np.zeros(int(n))
i = np.arange(1, int(n)+1, dtype=float)[::-1] # largest to smallest
pval_adj = np.minimum(1, cummin(n/i * pval_array[::-1]))[::-1]
return pval_adj[original_order]
|
{"hexsha": "1083ca1bcf90ccd5d22dad43aaa8af1e8f328fe8", "size": 1025, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepDegron/pvalue.py", "max_stars_repo_name": "ctokheim/deepDegron", "max_stars_repo_head_hexsha": "753687f92fbff22f1a3044c5dc398aa1a19373ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-10-29T05:01:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T10:54:38.000Z", "max_issues_repo_path": "deepDegron/pvalue.py", "max_issues_repo_name": "ctokheim/deepDegron", "max_issues_repo_head_hexsha": "753687f92fbff22f1a3044c5dc398aa1a19373ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-24T02:09:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T02:09:59.000Z", "max_forks_repo_path": "deepDegron/pvalue.py", "max_forks_repo_name": "ctokheim/deepDegron", "max_forks_repo_head_hexsha": "753687f92fbff22f1a3044c5dc398aa1a19373ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-03-06T15:08:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-30T12:53:46.000Z", "avg_line_length": 29.2857142857, "max_line_length": 72, "alphanum_fraction": 0.6253658537, "include": true, "reason": "import numpy", "num_tokens": 282}
|
import numpy as np
import rasterio
def load_datasets():
"""
Loads the two target datasets from disk into memory.
"""
print("Reading temperature data...")
hourly_max_temp_data = rasterio.open("./hourly_max_temp_2019.nc").read()
print("Reading land cover data...")
land_cover_data = rasterio.open("./land_cover_classification.tiff").read()[0] # There's only a single band in this dataset - just return that
return land_cover_data, hourly_max_temp_data
def save_dataset_2d(dataset, filename):
shp = dataset.shape
dims = len(shp)
rows = shp[0]
cols = shp[1]
with open(filename, 'w') as f:
f.write(str(dims) + '\n')
f.write(str(rows) + '\n')
f.write(str(cols) + '\n')
for row in range(0, rows):
for col in range(0, cols):
f.write(str(dataset[row][col]) + '\n')
def save_dataset_3d(dataset, filename):
shp = dataset.shape
dims = len(shp)
lyrs = shp[0]
rows = shp[1]
cols = shp[2]
with open(filename, 'w') as f:
f.write(str(dims) + '\n')
f.write(str(lyrs) + '\n')
f.write(str(rows) + '\n')
f.write(str(cols) + '\n')
for lyr in range(0, lyrs):
for row in range(0, rows):
for col in range(0, cols):
f.write(str(dataset[lyr][row][col]) + '\n')
if __name__ == "__main__":
land_cover_data, hourly_max_temp_data = load_datasets()
print(land_cover_data.shape)
save_dataset_2d(land_cover_data, "land_cover_classification.txt")
print(hourly_max_temp_data.shape)
save_dataset_3d(hourly_max_temp_data, "hourly_max_temp_2019.txt")
print("Done!")
|
{"hexsha": "e52d493c53dd040d3b218c4f7be1ab2158de785e", "size": 1704, "ext": "py", "lang": "Python", "max_stars_repo_path": "sci-software-task/cpp/Data/convert.py", "max_stars_repo_name": "idouros/sci-software-task", "max_stars_repo_head_hexsha": "594fa39a1bac3881aa357d8dfb5eadaeeaa917b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sci-software-task/cpp/Data/convert.py", "max_issues_repo_name": "idouros/sci-software-task", "max_issues_repo_head_hexsha": "594fa39a1bac3881aa357d8dfb5eadaeeaa917b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sci-software-task/cpp/Data/convert.py", "max_forks_repo_name": "idouros/sci-software-task", "max_forks_repo_head_hexsha": "594fa39a1bac3881aa357d8dfb5eadaeeaa917b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4838709677, "max_line_length": 146, "alphanum_fraction": 0.6026995305, "include": true, "reason": "import numpy", "num_tokens": 462}
|
import noise as n
import numpy as np
import random
import curses
import math
import argparse
#default args
scale = 50
octaves = 6
xspeed = 1
yspeed = .75
adjust = .15
colors = [17, 18, 19, 20, 21, 228, 41, 40, 34, 28, 22, 245, 244, 243, 242, 241, 240]
#argument parser
parser = argparse.ArgumentParser(description='Fly over a randomly generated landmass')
parser.add_argument("-s", "--scale", type=float, help="Set size of islands. Defaults to 50")
parser.add_argument("-o", "--octaves", type=int, help="Set level of detail for islands. Defaults to 6")
parser.add_argument("--hspeed", type=float, help="Set x component of flyover speed. Defaults to .1")
parser.add_argument("--vspeed", type=float, help="Set y component of flyover speed. Defaults to .75")
parser.add_argument("-S", "--size", type=float, help="Set the size of landmasses. Defaults to .15")
args = parser.parse_args()
#set args if necessary
if args.scale:
scale = args.scale
if args.octaves:
octaves = args.octaves
if args.vspeed:
xspeed = args.vspeed
if args.hspeed:
yspeed = args.hspeed
if args.size:
adjust = args.size
#curses setup
screen = curses.initscr()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.start_color()
curses.use_default_colors()
#get rows and columns
rows = curses.LINES
columns = curses.COLS-1
#setup colors
for i in range(len(colors)):
curses.init_pair(i+1, 0, colors[i])
#setup array
arr = np.zeros((rows, columns))
#setup offsets
xoff = random.randint(0, 1000)
yoff = random.randint(0, 1000)
#other constants
perlinMax = 1
ncolors = len(colors)
gap = perlinMax / ncolors
#main loop
while True:
try:
#assign values to array
for x in range(rows):
for y in range(columns):
arr[x][y] = n.pnoise2((x+xoff)/scale, (y+yoff)/(2*scale), octaves) + adjust
xoff += xspeed
yoff += yspeed
#draw
for x in range(rows):
for y in range(columns):
setcolor = 1
for i in range(ncolors + 1):
if gap * i + 1 >= arr[x][y] > gap * i:
setcolor = i + 2
screen.addch(x, y, ' ', curses.color_pair(setcolor))
screen.refresh()
except KeyboardInterrupt:
curses.curs_set(1)
exit()
|
{"hexsha": "b98237485bff545c28a5c3f079d1fedc0b0adaea", "size": 2305, "ext": "py", "lang": "Python", "max_stars_repo_path": "cterra.py", "max_stars_repo_name": "wymcg/termsavers", "max_stars_repo_head_hexsha": "36de47b2ecce5b66ad106b2484dcf3cb1b73bb63", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-27T04:08:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T21:09:01.000Z", "max_issues_repo_path": "cterra.py", "max_issues_repo_name": "wymcg/termsavers", "max_issues_repo_head_hexsha": "36de47b2ecce5b66ad106b2484dcf3cb1b73bb63", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cterra.py", "max_forks_repo_name": "wymcg/termsavers", "max_forks_repo_head_hexsha": "36de47b2ecce5b66ad106b2484dcf3cb1b73bb63", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5212765957, "max_line_length": 103, "alphanum_fraction": 0.6407809111, "include": true, "reason": "import numpy", "num_tokens": 655}
|
using Pkg
using Dates
const HOME = ENV["HOME"]
Pkg.activate("$HOME/HelioseismicKernels")
using MPI
MPI.Init()
using HelioseismicKernels
const comm = MPI.COMM_WORLD
const nworkers = MPI.Comm_size(comm)
try
HelioseismicKernels.Cω(comm, Point2D(π/2, 0), Point2D(π/2, deg2rad(45)), los_earth(),
ν_ind_range = 1:nworkers)
tstart = time()
HelioseismicKernels.Cω(comm, Point2D(π/2, 0), Point2D(π/2, deg2rad(45)), los_earth())
tend = time()
Δt = Time(0) + Second(round(Int, tend - tstart))
if MPI.Comm_rank(comm) == 0
println("Time taken = $Δt")
end
finally
MPI.Finalize()
end
|
{"hexsha": "e2d0cd6eddd35dfbac6eeebc07d228891a61551b", "size": 616, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "computecrosscov.jl", "max_stars_repo_name": "jishnub/HelioseismicKernels.jl", "max_stars_repo_head_hexsha": "7c2ee184c57a1b181f725c6fc1bb2b66a7b265ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "computecrosscov.jl", "max_issues_repo_name": "jishnub/HelioseismicKernels.jl", "max_issues_repo_head_hexsha": "7c2ee184c57a1b181f725c6fc1bb2b66a7b265ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "computecrosscov.jl", "max_forks_repo_name": "jishnub/HelioseismicKernels.jl", "max_forks_repo_head_hexsha": "7c2ee184c57a1b181f725c6fc1bb2b66a7b265ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6923076923, "max_line_length": 89, "alphanum_fraction": 0.6737012987, "num_tokens": 214}
|
from absl import flags
from absl import app
import sys
import os
from sotabencheval.language_modelling import WikiText103Evaluator, WikiText2Evaluator
import tensorflow as tf
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(f"{dir_path}/tf") # add tf. to path to import dynamic_eval
#os.chdir(f"{dir_path}/tf")
from dynamiceval_tf_copy_for_sotabench import dynamic_eval, data_utils, FLAGS
#os.chdir(f"{dir_path}")
def main(unused_argv=None):
print("unused_argv", unused_argv)
tf.logging.set_verbosity(tf.logging.INFO)
# Get corpus info
corpus_info = data_utils.get_corpus_info(FLAGS.corpus_info_path)
n_token = corpus_info["vocab_size"]
cutoffs = corpus_info["cutoffs"][1:-1]
tf.logging.info("n_token {}".format(n_token))
evaluator = WikiText103Evaluator(
model_name="Transformer-XL (RMS dynamic eval)",
paper_arxiv_id="1904.08378",
paper_pwc_id="dynamic-evaluation-of-transformer-language",
#expected perplexity: 16.40
).eval(dynamic_eval(n_token, cutoffs, "/gpu:0"))
# test why the results are not being saved, temporary mocking the evaluation
# import numpy as np
# evaluator._neglogloss = np.log(16.441230035231136) * evaluator.dataset.testset_size
evaluator.print_stats()
evaluator.print_results()
evaluator.save() # to double check that it is being called
if __name__ == "__main__":
import sys
argv = f"""
IGNORED-PROGNAME
--data_dir={dir_path}/tf/pretrained_xl/tf_wt103/data/tfrecords
--record_info_dir={dir_path}/tf/pretrained_xl/tf_wt103/data/tfrecords/
--corpus_info_path={dir_path}/tf/pretrained_xl/tf_wt103/data/corpus-info.json
--eval_ckpt_path={dir_path}/tf/pretrained_xl/tf_wt103/model/model.ckpt-0
--model_dir=EXP-wt103
--div_val=4
--learning_rate=0.000002
--decay_rate=0
--epsilon=0.00001
--rms=True
--untie_r=True
--proj_share_all_but_first=True
--num_core_per_host=1
--n_layer=18
--d_model=1024
--d_embed=1024
--n_head=16
--d_head=64
--d_inner=4096
--dropout=0.0
--dropatt=0.0
--tgt_len=128
--mem_len=1600
--clamp_len=1000
--eval_split=test
--same_length=True
""".split()
FLAGS(argv, known_only=True)
assert FLAGS.data_dir == f"{dir_path}/tf/pretrained_xl/tf_wt103/data/tfrecords"
main()
|
{"hexsha": "5792ea174e7f7ddf5800ad7da81b289f9fbda6e7", "size": 2473, "ext": "py", "lang": "Python", "max_stars_repo_path": "sotabench.py", "max_stars_repo_name": "PiotrCzapla/dynamiceval-transformer", "max_stars_repo_head_hexsha": "010e05884031490f07abb107ef66af9c0061e094", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-10T20:24:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-10T20:24:11.000Z", "max_issues_repo_path": "sotabench.py", "max_issues_repo_name": "PiotrCzapla/dynamiceval-transformer", "max_issues_repo_head_hexsha": "010e05884031490f07abb107ef66af9c0061e094", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sotabench.py", "max_forks_repo_name": "PiotrCzapla/dynamiceval-transformer", "max_forks_repo_head_hexsha": "010e05884031490f07abb107ef66af9c0061e094", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9733333333, "max_line_length": 90, "alphanum_fraction": 0.6777193692, "include": true, "reason": "import numpy", "num_tokens": 684}
|
"""
Original code from OSVOS (https://github.com/scaelles/OSVOS-TensorFlow)
Sergi Caelles (scaelles@vision.ee.ethz.ch)
Modified code for liver and lesion segmentation:
Miriam Bellver (miriam.bellver@bsc.es)
"""
import os
import sys
import tensorflow as tf
slim = tf.contrib.slim
import numpy as np
root_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.abspath(root_folder))
import seg_liver as segmentation
from dataset.dataset_seg import Dataset
number_slices = 3
task_name = 'seg_liver_ck'
database_root = os.path.join(root_folder, 'LiTS_database')
logs_path = os.path.join(root_folder, 'train_files', task_name, 'networks')
result_root = os.path.join(root_folder, 'results')
model_name = os.path.join(logs_path, "seg_liver.ckpt")
test_file = os.path.join(root_folder, 'seg_DatasetList/testing_volume_3.txt')
dataset = Dataset(None, test_file, None, database_root, number_slices, store_memory=False)
result_path = os.path.join(result_root, task_name)
checkpoint_path = model_name
segmentation.test(dataset, checkpoint_path, result_path, number_slices)
|
{"hexsha": "b90882a5a575e5b849b097707bf3dea0ea3c1dc7", "size": 1090, "ext": "py", "lang": "Python", "max_stars_repo_path": "seg_liver_test.py", "max_stars_repo_name": "rishyak/liverseg-2017-nipsws", "max_stars_repo_head_hexsha": "73d16bc9b818b1d0bb9b1cfcb67f6a8c25b5dba2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 107, "max_stars_repo_stars_event_min_datetime": "2017-11-29T16:07:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T07:50:54.000Z", "max_issues_repo_path": "seg_liver_test.py", "max_issues_repo_name": "rishyak/liverseg-2017-nipsws", "max_issues_repo_head_hexsha": "73d16bc9b818b1d0bb9b1cfcb67f6a8c25b5dba2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2018-01-17T06:17:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-13T22:01:36.000Z", "max_forks_repo_path": "seg_liver_test.py", "max_forks_repo_name": "rishyak/liverseg-2017-nipsws", "max_forks_repo_head_hexsha": "73d16bc9b818b1d0bb9b1cfcb67f6a8c25b5dba2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2017-11-30T05:13:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T04:55:26.000Z", "avg_line_length": 31.1428571429, "max_line_length": 90, "alphanum_fraction": 0.795412844, "include": true, "reason": "import numpy", "num_tokens": 273}
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import numpy as np
from numpy import linalg as LA
def gmres(A, b, eps=1e-16, iteration=None, x_0=None):
if x_0 is None:
x_0 = np.zeros(len(b))
if iteration is None:
iteration = len(b)
r = A.dot(x_0) - b
beta = LA.norm(r, 2)
v = [r / beta]
h = np.zeros([], dtype=np.float32) # TODO: initialization
w = []
k = 0
while k < iteration:
w.append(A.dot(v))
for j in range(k):
h[j][k] = v[j].transpose().dot(w)
w -= h[j][k] * v[j]
h[k + 1][k] = LA.norm(w)
v.append(w / h[k + 1][k])
# TODO: finish it
|
{"hexsha": "2cbe7c2bc477df99f459797234a805fc52d3aef9", "size": 652, "ext": "py", "lang": "Python", "max_stars_repo_path": "gmres.py", "max_stars_repo_name": "VasavanThiru/gm-ed-res", "max_stars_repo_head_hexsha": "e03f02863ae0d9e706ff6d07a147e318504690c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gmres.py", "max_issues_repo_name": "VasavanThiru/gm-ed-res", "max_issues_repo_head_hexsha": "e03f02863ae0d9e706ff6d07a147e318504690c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gmres.py", "max_forks_repo_name": "VasavanThiru/gm-ed-res", "max_forks_repo_head_hexsha": "e03f02863ae0d9e706ff6d07a147e318504690c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1481481481, "max_line_length": 61, "alphanum_fraction": 0.4984662577, "include": true, "reason": "import numpy,from numpy", "num_tokens": 215}
|
""" Suite of Functions for Pre-Processing Chunked Neural Data """
import numpy as np
from scipy.signal import hilbert
from scipy import fftpack
try:
from neurodsp import filt
# from mne.filter import filter_data
_has_neurodsp = True
except ImportError:
_has_neurodsp = False
try:
import mne
# from mne.filter import filter_data
_has_mne = True
except ImportError:
_has_mne = False
def common_average_reference_array(neural_data, bad_channels: list = None):
""" Applies a Common Average Reference to Neural Data
Parameters
----------
neural_data : array 2d, shape (Channels, Samples)
Multi-Channel Neural Data
bad_channels : list, optional
list of Channels To Exclude from Common Average Reference
Returns
-------
data_common_avg_ref : array 2d, shape (Channels, Samples)
An array object of the Common Averaged Referenced Data
"""
data_common_avg_ref = np.array(neural_data)
# Exclude Noisy Channels from CAR if list of bad channels given
channels_include = list(range(neural_data.shape[0]))
if bad_channels is not None:
channels_include = np.delete(channels_include, bad_channels)
# Common Average Reference
data_common_avg_ref = data_common_avg_ref - np.mean(data_common_avg_ref[channels_include, :], axis=0)[None, :]
return data_common_avg_ref
def common_average_reference(chunk_neural, bad_channels: list = None):
""" Common Average References all Epochs(Chunk)
Paramters
---------
chunk_neural : list, shape = [Chunk]->(Channels, Samples)
Epoched(Chunk) Neural Data, list of 2darrays
bad_channels : list, optional
list of Channels To Exclude from Common Average Reference
Returns
-------
car_chunk_neural : list, shape = [Chunk]->(Channels, Samples)
list of Epoched(Chunks) Data that has been Common Averaged Referrenced
"""
car_chunk_neural = []
for chunk in chunk_neural:
car_chunk_neural.append(common_average_reference_array(neural_data=chunk, bad_channels=bad_channels))
return car_chunk_neural
def bandpass_filter_array_mne(neural_data, fs, l_freq: float, h_freq: float, fir_design='firwin2', verbose=False,
**kwargs):
""" Bandpass Filters Neural Data using the MNE package
Paramters
---------
neural_data : ndarray, shape (…, n_times)
The data to filter.
sfreq : float
The sample frequency in Hz.
l_freq : float | None
For FIR filters, the lower pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
h_freq : float | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
vebose : bool | False
If True it will print out information about the filter used
**kwargs : see MNE Documentation:
[Github](https://github.com/mne-tools/mne-python/blob/d36440176cf3f3532f64e6f046c4a6a3eca028de/mne/filter.py#L742-L824)
[mne](https://martinos.org/mne/stable/generated/mne.filter.filter_data.html)
Returns
-------
out : ndarray, shape (…, n_times)
The filtered data.
"""
return mne.filter.filter_data(data=neural_data, sfreq=fs, l_freq=l_freq, h_freq=h_freq, fir_design=fir_design,
verbose=verbose, **kwargs)
# NDSP (channels, samples)[base case]
def bandpass_filter_array_ndsp(neural_data, fs, l_freq: float, h_freq: float, remove_edges=False, **kwargs):
""" Bandpass Filters Neural Data using the NeuroDSP package
Parameters
----------
neural_data : ndarray, shape (…, n_times)
The data to filter. Defaults to working with either (epochs, channels, samples) or (channels, samples)
fs : float
The sample frequency in Hz.
l_freq : float | None
For FIR filters, the lower pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
h_freq : float | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
remove_edges : bool, optional, default: False
If True, replace samples within half the kernel length to be np.nan.
Only used for FIR filters.
**kwargs : see NeuroDSP Documentation:
[github](https://github.com/neurodsp-tools/neurodsp/blob/master/neurodsp/filt/filter.py#L13)
[neurodsp](https://neurodsp-tools.github.io/neurodsp/generated/neurodsp.filt.filter_signal.html)
Returns
-------
out: ndarray, shape (…, n_times)
The filtered data.
"""
# Apply the 1D Filtering Fuction accross the default Axis
return np.apply_along_axis(func1d=filt.filter_signal, axis=-1, arr=neural_data, fs=fs, pass_type='bandpass',
f_range=(l_freq, h_freq), remove_edges=remove_edges, **kwargs)
def bandpass_filter(neural_data, fs, l_freq, h_freq, remove_edges=False, verbose=False, **kwargs):
""" Bandpass Filter Neural Data
Parameters
----------
neural_data : 2d array, shape (channels, samples)
The Epoched Neural Data to be Bandpass Filtered
fs : float
The sample frequency in Hz.
l_freq : float | None
For FIR filters, the lower pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
h_freq : float | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
remove_edges : bool, optional, default: False
If True, replace samples within half the kernel length to be np.nan.
Only used for FIR filters when using neurodsp.
vebose : bool, optional, default: False
If True it will print out information about the filter used, (mne only)
Returns
-------
filt_epochs : list, shape (channels, samples)
The Neural Data Bandpass Filtered
"""
# Switch for installed back-end for Filtering
if _has_mne == True:
filt_epochs = bandpass_filter_array_mne(neural_data=neural_data, fs=fs, l_freq=l_freq, h_freq=h_freq,
verbose=verbose, **kwargs)
else:
filt_epochs = bandpass_filter_array_ndsp(neural_data=neural_data, fs=fs, l_freq=l_freq, h_freq=h_freq,
remove_edges=remove_edges, **kwargs)
return filt_epochs
def bandpass_filter_epochs(epoch_neural_data, fs, l_freq, h_freq, remove_edges=False, verbose=False, **kwargs):
""" Bandpass Filter Epochs(Chunks)
Parameters
----------
epoch_neural_data : list, shape [Epoch]->(channels, samples)
The Epoched Neural Data to be Bandpass Filtered
fs : float
The sample frequency in Hz.
l_freq : float | None
For FIR filters, the lower pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
h_freq : float | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
remove_edges : bool, optional, default: False
If True, replace samples within half the kernel length to be np.nan.
Only used for FIR filters when using neurodsp.
verbose : bool, optional, default: False
If True it will print out information about the filter used, (mne only)
Returns
-------
filt_epochs : list, shape [Epoch]->(channels, samples)
The Epoched Neural Data Bandpass Filtered
"""
filt_epochs = []
for epoch in epoch_neural_data:
filt_epochs.append(bandpass_filter(neural_data=epoch, fs=fs, l_freq=l_freq, h_freq=h_freq,
remove_edges=remove_edges, verbose=verbose, **kwargs))
return filt_epochs
def multi_bpf_epochs(epoch_neural_data, fs, l_freqs, h_freqs, remove_edges=False, verbose=False, **kwargs):
""" Run multiple bandpass filters on multiple neural data Epochs(Chunks)
Parameters
----------
epoch_neural_data : list, shape [Epoch]->(channels, samples)
The Epoched Neural Data to be Bandpass Filtered
fs : float
The sample frequency in Hz.
l_freqs : array-like | None
For FIR filters, the lower pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
h_freqs : array-like | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
remove_edges : bool, optional, default: False
If True, replace samples within half the kernel length to be np.nan.
Only used for FIR filters when using neurodsp.
verbose : bool, optional, default: False
If True it will print out information about the filter used, (mne only)
Returns
-------
multi_filt_epochs : list, shape [Freq]->[Epoch]->(channels, samples)
The Epoched Neural Data Bandpass Filtered
"""
assert len(l_freqs) == len(
h_freqs), 'l_freqs and h_freqs must be the same length, {l_f} not equal to {h_f}'.format(l_f=len(l_freqs),
h_f=len(h_freqs))
# f'l_freqs and h_freqs must be the same length, {len(l_freqs)} not equal to {len(h_freqs)}'
multi_filt_epochs = []
for l_freq, h_freq in zip(l_freqs, h_freqs):
multi_filt_epochs.append(bandpass_filter_epochs(epoch_neural_data=epoch_neural_data, fs=fs, l_freq=l_freq,
h_freq=h_freq, remove_edges=remove_edges, verbose=verbose,
**kwargs))
return multi_filt_epochs
def multi_bpf(chunk_neural_data, fs, l_freqs, h_freqs, remove_edges=False, verbose=False, **kwargs):
""" Run multiple bandpass filters on one neural data Epoch(Chunks)
Parameters
----------
chunk_neural_data : list, shape (channels, samples)
A Single Chunk of Neural Data to be Bandpass Filtered
fs : float
The sample frequency in Hz.
l_freqs : array-like | None
For FIR filters, the lower pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
h_freqs : array-like | None
For FIR filters, the upper pass-band edge; for IIR filters, the upper cutoff frequency.
If None the data are only low-passed.
remove_edges : bool, optional, default: False
If True, replace samples within half the kernel length to be np.nan.
Only used for FIR filters when using neurodsp.
verbose : bool, optional, default: False
If True it will print out information about the filter used, (mne only)
Returns
-------
multi_filt_chunk : list, shape [Freq]->(channels, samples)
The Chunk of Neural Data Bandpass Filtered
"""
assert len(l_freqs) == len(
h_freqs), 'l_freqs and h_freqs must be the same length, {l_f} not equal to {h_f}'.format(l_f=len(l_freqs),
h_f=len(h_freqs))
# f'l_freqs and h_freqs must be the same length, {len(l_freqs)} not equal to {len(h_freqs)}'
multi_filt_chunk = []
for l_freq, h_freq in zip(l_freqs, h_freqs):
multi_filt_chunk.append(bandpass_filter(neural_data=chunk_neural_data, fs=fs, l_freq=l_freq,
h_freq=h_freq, remove_edges=remove_edges, verbose=verbose, **kwargs))
return multi_filt_chunk
def hilbert3(signal, axis=-1):
""" Efficient implementation of the Hilbert Transform (Fast)
Note
----
This will only work on a 3-Dimensional array due to the buffer reducing step
Parameters
----------
signal : ndarray
signal to be transformed
axis : int
defaults to -1
"""
def trim_padding(padded, trim):
return padded[:trim]
hilbert_signal = hilbert(signal, fftpack.next_fast_len(signal.shape[axis]), axis=axis)
return np.apply_along_axis(trim_padding, axis=axis, arr=hilbert_signal, trim=signal.shape[axis])
def hilbert_module(neural_data, output: str, smooth=False):
""" Use the Hilbert Transform to get either the Amplitude or Phase of the Input Neural Data
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Parameters
----------
neural_data : ndarray | shape (..., Samples)
Input Neural Activity during all Trials
output : str
String that instructs what information to extract from the analytical signal, options: 'phase', 'amplitude'
smooth : bool, optional
If True the instantaneous phase will be passed through a sine function, defaults to False
Returns
-------
hilbert_results: ndarray | shape (..., Samples)
Depending on the output parameter:
'phase': Instantaneous Phase of the Input Neural Activity in radians
'amplitude': Envelope (Amplitude) of the Input Neural Activity
"""
# TODO: Verify that the axis parameter allows for hilbert of the frequencies seperately not as a single channel
assert output == 'amplitude' or output == 'phase', \
"output parameter can only be 'amplitude' or 'phase' not {output}".format(output=output)
analytic_signal = hilbert3(neural_data, axis=-1)
if output == 'phase':
# The phase is given by the angle of the analytic signal (complex argument)
hilbert_results = np.apply_along_axis(func1d=np.angle, axis=-1, arr=analytic_signal, deg=False)
if smooth:
hilbert_results = np.apply_along_axis(func1d=np.sin, axis=-1, arr=hilbert_results)
#TODO: Make this function optionally pass through a cos or sine function
# TODO: Investigate if this should switch between sin or cos depending on the starting slope
if output == 'amplitude':
# The amplitude envelope is given by magnitude of the analytic signal
hilbert_results = np.abs(analytic_signal)
return hilbert_results
|
{"hexsha": "89fb827a03666ba10ad5a7e06112754f82550ee9", "size": 14832, "ext": "py", "lang": "Python", "max_stars_repo_path": "BirdSongToolbox/preprocess.py", "max_stars_repo_name": "Darilbii/BirdSongToolbox", "max_stars_repo_head_hexsha": "f4853a7f6cb5c4ef0f57e9f346be08f6e153ca65", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-03T00:03:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-25T04:37:52.000Z", "max_issues_repo_path": "BirdSongToolbox/preprocess.py", "max_issues_repo_name": "Darilbii/BirdSongToolbox", "max_issues_repo_head_hexsha": "f4853a7f6cb5c4ef0f57e9f346be08f6e153ca65", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-08T11:04:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-08T11:04:36.000Z", "max_forks_repo_path": "BirdSongToolbox/preprocess.py", "max_forks_repo_name": "Darilbii/BirdSongToolbox", "max_forks_repo_head_hexsha": "f4853a7f6cb5c4ef0f57e9f346be08f6e153ca65", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8709677419, "max_line_length": 127, "alphanum_fraction": 0.659789644, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3497}
|
from random import randint, uniform
import torch.utils.data as data
import os, sys
sys.path.append("..")
from src.utils import get_vid_list, get_img_iuv_text_mask, TransferTexture, get_mask_list
import numpy as np
import cv2
from src.computer_angle import compute_angle
import pickle
from options import get_general_options
class Fusion_dataset(data.Dataset):
def __init__(self, params, mode='train'):
super(Fusion_dataset, self).__init__()
data_root = params['data_root']
self.data_dir = os.path.join(data_root, mode)
self.vid_list = get_vid_list(self.data_dir)
self.batch_size = params['batch_size']
self.n_iters = params['n_training_iter']
self.n_sample = params['n_sample'] # get N+1 control points
self.max_ref_frames = params["maximum_ref_frames"]
num_vids = len(self.vid_list)
self.num_frames = params['num_frames']
self.frame_interval = params['frame_interval']
use_fix_intv = params['use_fix_interval']
self.num_inputs = params["maximum_ref_frames"]
self.num_target = params["num_target"]
self.fix_frame = params["fix_frame"]
self.face_GAN = params["face_GAN"]
self.output_mask = params["output_mask"]
self.self_recon = params["self_recon"]
self.dataset_len = len(self.vid_list)
def __getitem__(self, index):
vid_idx = index
random_number = np.random.random()
vid_path = self.vid_list[vid_idx]
img_list, iuv_list, text_list, mask_list = get_img_iuv_text_mask(vid_path)
frames = np.random.choice(len(img_list), self.num_inputs + self.num_target, replace=False)
if not self.fix_frame:
if random_number < 0.33333:
frames[1 + self.num_target] = frames[self.num_target]
frames[2 + self.num_target] = frames[self.num_target]
elif random_number < 0.66666:
frames[1 + self.num_target] = frames[self.num_target]
if self.self_recon:
random_number_recon = np.random.random()
if random_number_recon < 0.3:
random_index = np.random.choice(self.num_inputs, 1)
frames[random_index] = frames[self.num_target]
print("self_reconstruction")
src_texture_im = np.zeros((self.num_inputs, 800, 1200, 3), np.uint8)
for i in range(self.num_inputs):
src_texture_im[i] = cv2.imread(text_list[frames[i + self.num_target]])
tgt_texture_im = np.zeros((self.num_target, 800, 1200, 3), np.uint8)
for i in range(self.num_target):
tgt_texture_im[i] = cv2.imread(text_list[frames[i]])
src_mask_im = np.zeros((self.num_inputs, 800, 1200), np.uint8)
for i in range(self.num_inputs):
src_mask_im[i] = cv2.imread(mask_list[frames[i + self.num_target]])[:, :, 0]
tgt_mask_im = np.zeros((self.num_target, 800, 1200), np.uint8)
for i in range(self.num_target):
tgt_mask_im[i] = cv2.imread(mask_list[frames[i]])[:, :, 0]
src_img = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_img[i] = cv2.imread(img_list[frames[i + self.num_target]])
tgt_img = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_img[i] = cv2.imread(img_list[frames[i]])
src_IUV = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_IUV[i] = cv2.imread(iuv_list[frames[i + self.num_target]])
tgt_IUV = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_IUV[i] = cv2.imread(iuv_list[frames[i]])
if self.output_mask == True:
src_common_area = np.zeros((800, 1200), np.uint8)
for i in range(self.num_inputs):
src_common_area = np.logical_or(src_common_area, src_mask_im[i] / 255)
src_area = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
src_area[i] = TransferTexture(TextureIm=np.repeat(src_common_area[:, :, np.newaxis], 3, axis=2),
IUV=tgt_IUV[i])
image_inpaint_area = np.zeros((self.num_target, 256, 256, 3), np.uint8)
tgt_mask_in_image = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=tgt_IUV[i])
image_inpaint_area[i] = np.logical_xor(tgt_mask_in_image[i], src_area[i])
src_mask_in_image = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=src_IUV[i])
# compute the face Bbox
# 23 and 24 correspond to the face
if self.face_GAN == True:
face_bbox = np.zeros((self.num_target, 4), np.uint8)
for i in range(self.num_target):
try:
Y1, X1 = np.where(tgt_IUV[i, :, :, 0] == 23)
Y2, X2 = np.where(tgt_IUV[i, :, :, 0] == 24)
X_con = np.concatenate([X1, X2])
Y_con = np.concatenate([Y1, Y2])
leftmost = max(np.min(X_con) - 2, 0)
rightmost = min(np.max(X_con) + 3, 256)
upmost = max(np.min(Y_con) - 2, 0)
bottomost = min(np.max(Y_con) + 3, 256)
face_bbox[i] = np.array([leftmost, rightmost, upmost, bottomost])
# print(face_bbox[i])
except:
face_bbox = np.zeros((self.num_target, 4), np.uint8)
face_mask = np.zeros((self.num_target, 256, 256), np.uint8)
for i in range(self.num_target):
face_mask[i] = np.where(tgt_IUV[i, :, :, 0] == 23, 1, 0)
face_mask[i] = face_mask[i] + np.where(tgt_IUV[i, :, :, 0] == 24, 1, 0)
# cv2.imwrite('/home/haolin/test_data/face_mask.jpg',face_mask)
# canvas=tgt_IUV[0]
# canvas=cv2.rectangle(canvas, (face_bbox[0,0], face_bbox[0,2]), (face_bbox[0,1], face_bbox[0,3]), (0, 255, 0), 2)
# cv2.imwrite('/home/haolin/test_data/canvas.jpg',canvas)
# cv2.imwrite('/home/haolin/test_data/IUV.jpg',tgt_IUV[0,:,:])
# print(face_mask.shape)
# normalize to (-1, 1)
src_IUV255 = src_IUV
tgt_IUV255 = tgt_IUV
src_texture_im = (src_texture_im / 255.0 - 0.5) * 2
tgt_texture_im = (tgt_texture_im / 255.0 - 0.5) * 2
src_IUV = (src_IUV / 255.0 - 0.5) * 2
tgt_IUV = (tgt_IUV / 255.0 - 0.5) * 2
src_img = (src_img / 255.0 - 0.5) * 2
tgt_img = (tgt_img / 255.0 - 0.5) * 2
src_mask_im = (src_mask_im / 255.0)
tgt_mask_im = (tgt_mask_im / 255.0)
src_data = [src_img, src_IUV, src_texture_im, src_mask_im]
tgt_data = [tgt_img, tgt_IUV, tgt_texture_im, tgt_mask_im]
data_255 = [src_IUV255, tgt_IUV255]
if self.output_mask == True:
src_data.append(image_inpaint_area)
src_data.append(src_mask_in_image)
src_data.append(src_common_area)
tgt_data.append(tgt_mask_in_image)
if self.face_GAN == True:
# prepare return values
tgt_data.append(face_mask)
tgt_data.append(face_bbox)
'''
for i in range(len(src_data)):
cv2.imwrite("/data1/haolin/test/"+"src_%i.jpg"%(i),src_data[i])
for i in range(len(tgt_data)):
cv2.imwrite("/data1/haolin/test/"+"tgt_%i.jpg"%(i),tgt_data[i])
'''
# return {'IUV': tgt_IUV, 'transfered_img': transfered_img, 'y': y, 'tgt_fg_mask': tgt_fg_mask}
return src_data, tgt_data, data_255
def __len__(self):
return self.dataset_len
class Fusion_dataset_textonly(data.Dataset):
def __init__(self, params, mode='train'):
super(Fusion_dataset_textonly, self).__init__()
data_root = params['data_root']
self.data_dir = os.path.join(data_root, mode)
self.vid_list = get_vid_list(self.data_dir)
self.batch_size = params['batch_size']
self.n_iters = params['n_training_iter']
self.n_sample = params['n_sample'] # get N+1 control points
self.max_ref_frames = params["maximum_ref_frames"]
num_vids = len(self.vid_list)
self.num_frames = params['num_frames']
self.frame_interval = params['frame_interval']
use_fix_intv = params['use_fix_interval']
self.num_inputs = params["maximum_ref_frames"]
self.num_target = params["num_target"]
self.fix_frame = params["fix_frame"]
self.face_GAN = params["face_GAN"]
self.output_mask = params["output_mask"]
self.dataset_len = len(self.vid_list)
def __getitem__(self, index):
vid_idx = index
frames = np.random.choice(self.num_frames, self.num_inputs + self.num_target, replace=False)
random_number = np.random.random()
if not self.fix_frame:
if random_number < 0.33333:
frames[1 + self.num_target] = frames[self.num_target]
frames[2 + self.num_target] = frames[self.num_target]
elif random_number < 0.66666:
frames[1 + self.num_target] = frames[self.num_target]
vid_path = self.vid_list[vid_idx]
img_list, iuv_list, text_list, mask_list = get_img_iuv_text_mask(vid_path)
src_texture_im = np.zeros((self.num_inputs, 800, 1200, 3), np.uint8)
for i in range(self.num_inputs):
src_texture_im[i] = cv2.imread(text_list[frames[i + self.num_target]])
tgt_texture_im = np.zeros((self.num_target, 800, 1200, 3), np.uint8)
for i in range(self.num_target):
tgt_texture_im[i] = cv2.imread(text_list[frames[i]])
src_mask_im = np.zeros((self.num_inputs, 800, 1200), np.uint8)
for i in range(self.num_inputs):
src_mask_im[i] = cv2.imread(mask_list[frames[i + self.num_target]])[:, :, 0]
tgt_mask_im = np.zeros((self.num_target, 800, 1200), np.uint8)
for i in range(self.num_target):
tgt_mask_im[i] = cv2.imread(mask_list[frames[i]])[:, :, 0]
src_texture_im = (src_texture_im / 255.0 - 0.5) * 2
tgt_texture_im = (tgt_texture_im / 255.0 - 0.5) * 2
src_mask_im = (src_mask_im / 255.0)
tgt_mask_im = (tgt_mask_im / 255.0)
src_data = [src_texture_im, src_mask_im]
tgt_data = [tgt_texture_im, tgt_mask_im]
return src_data, tgt_data
def __len__(self):
return self.dataset_len
class Fusion_dataset_smpl(data.Dataset):
def __init__(self, params, mode='train'):
super(Fusion_dataset_smpl, self).__init__()
data_root = params['data_root']
smpl_root = params['smpl_root']
mask_root = params['mask_root']
self.data_dir = os.path.join(data_root, mode)
self.smpl_dir = os.path.join(smpl_root, mode)
self.mask_dir = os.path.join(mask_root, mode)
self.vid_list = get_vid_list(self.data_dir)
self.batch_size = params['batch_size']
self.n_iters = params['n_training_iter']
self.n_sample = params['n_sample'] # get N+1 control points
self.max_ref_frames = params["maximum_ref_frames"]
num_vids = len(self.vid_list)
self.num_frames = params['num_frames']
self.frame_interval = params['frame_interval']
use_fix_intv = params['use_fix_interval']
self.num_inputs = params["maximum_ref_frames"]
self.num_target = params["num_target"]
self.fix_frame = params["fix_frame"]
self.face_GAN = params["face_GAN"]
self.output_mask = params["output_mask"]
self.self_recon = params["self_recon"]
self.dataset_len = len(self.vid_list)
def __getitem__(self, index):
vid_idx = index
random_number = np.random.random()
vid_path = self.vid_list[vid_idx]
vid_name = vid_path.split("/")[-1]
smpl_path = os.path.join(self.smpl_dir, vid_name)
smpl_path = os.path.join(smpl_path, "pose_shape.pkl")
mask_path = os.path.join(self.mask_dir, vid_name)
real_mask_list = get_mask_list(mask_path)
# print(real_mask_list)
img_list, iuv_list, text_list, mask_list = get_img_iuv_text_mask(vid_path)
frames = np.random.choice(len(img_list) - 1, self.num_inputs + self.num_target, replace=False) + 1
target_index = frames[0]
prev_real_index = target_index - 1
src_texture_im = np.zeros((self.num_inputs, 800, 1200, 3), np.uint8)
for i in range(self.num_inputs):
src_texture_im[i] = cv2.imread(text_list[frames[i + self.num_target]])
src_mask_im = np.zeros((self.num_inputs, 800, 1200), np.uint8)
for i in range(self.num_inputs):
src_mask_im[i] = cv2.imread(mask_list[frames[i + self.num_target]])[:, :, 0]
src_img = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_img[i] = cv2.imread(img_list[frames[i + self.num_target]])
tgt_img = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_img[i] = cv2.imread(img_list[frames[i]])
src_IUV = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_IUV[i] = cv2.imread(iuv_list[frames[i + self.num_target]])
tgt_IUV = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_IUV[i] = cv2.imread(iuv_list[frames[i]])
if self.output_mask == True:
src_common_area = np.zeros((800, 1200), np.uint8)
for i in range(self.num_inputs):
src_common_area = np.logical_or(src_common_area, src_mask_im[i] / 255)
src_area = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
src_area[i] = TransferTexture(TextureIm=np.repeat(src_common_area[:, :, np.newaxis], 3, axis=2),
IUV=tgt_IUV[i])
image_inpaint_area = np.zeros((self.num_target, 256, 256, 3), np.uint8)
tgt_mask_in_image = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=tgt_IUV[i])
image_inpaint_area[i] = np.logical_xor(tgt_mask_in_image[i], src_area[i])
src_mask_in_image = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=src_IUV[i])
# compute the face Bbox
# 23 and 24 correspond to the face
if self.face_GAN == True:
face_bbox = np.zeros((self.num_target, 4), np.uint8)
for i in range(self.num_target):
try:
Y1, X1 = np.where(tgt_IUV[i, :, :, 0] == 23)
Y2, X2 = np.where(tgt_IUV[i, :, :, 0] == 24)
X_con = np.concatenate([X1, X2])
Y_con = np.concatenate([Y1, Y2])
leftmost = max(np.min(X_con) - 2, 0)
rightmost = min(np.max(X_con) + 3, 256)
upmost = max(np.min(Y_con) - 2, 0)
bottomost = min(np.max(Y_con) + 3, 256)
face_bbox[i] = np.array([leftmost, rightmost, upmost, bottomost])
# print(face_bbox[i])
except:
face_bbox = np.zeros((self.num_target, 4), np.uint8)
face_mask = np.zeros((self.num_target, 256, 256), np.uint8)
for i in range(self.num_target):
face_mask[i] = np.where(tgt_IUV[i, :, :, 0] == 23, 1, 0)
face_mask[i] = face_mask[i] + np.where(tgt_IUV[i, :, :, 0] == 24, 1, 0)
# cv2.imwrite('/home/haolin/test_data/face_mask.jpg',face_mask)
# canvas=tgt_IUV[0]
# canvas=cv2.rectangle(canvas, (face_bbox[0,0], face_bbox[0,2]), (face_bbox[0,1], face_bbox[0,3]), (0, 255, 0), 2)
# cv2.imwrite('/home/haolin/test_data/canvas.jpg',canvas)
# cv2.imwrite('/home/haolin/test_data/IUV.jpg',tgt_IUV[0,:,:])
# print(face_mask.shape)
# normalize to (-1, 1)
smpl_file = open(smpl_path, 'rb')
smpl_content = pickle.load(smpl_file)
smpl_pair_index = np.array([prev_real_index, target_index])
smpl_pair = np.concatenate([smpl_content['cams'][smpl_pair_index], smpl_content['pose'][smpl_pair_index],
smpl_content['shape'][smpl_pair_index]], axis=1)
prev_real_img = cv2.imread(img_list[prev_real_index])
smpl_real_mask = cv2.imread(real_mask_list[target_index])
prev_real_img = (prev_real_img / 255.0 - 0.5) * 2
smpl_real_mask = smpl_real_mask / 255.0
smpl_data = [smpl_pair, prev_real_img, smpl_real_mask, smpl_content['vertices'][smpl_pair_index]]
src_IUV255 = src_IUV
tgt_IUV255 = tgt_IUV
src_texture_im = (src_texture_im / 255.0 - 0.5) * 2
src_IUV = (src_IUV / 255.0 - 0.5) * 2
tgt_IUV = (tgt_IUV / 255.0 - 0.5) * 2
src_img = (src_img / 255.0 - 0.5) * 2
tgt_img = (tgt_img / 255.0 - 0.5) * 2
src_mask_im = (src_mask_im / 255.0)
src_data = [src_img, src_IUV, src_texture_im, src_mask_im]
tgt_data = [tgt_img, tgt_IUV]
data_255 = [src_IUV255, tgt_IUV255]
if self.output_mask == True:
src_data.append(image_inpaint_area)
src_data.append(src_mask_in_image)
src_data.append(src_common_area)
if self.face_GAN == True:
# prepare return values
tgt_data.append(face_mask)
tgt_data.append(face_bbox)
'''
for i in range(len(src_data)):
cv2.imwrite("/data1/haolin/test/"+"src_%i.jpg"%(i),src_data[i])
for i in range(len(tgt_data)):
cv2.imwrite("/data1/haolin/test/"+"tgt_%i.jpg"%(i),tgt_data[i])
'''
# return {'IUV': tgt_IUV, 'transfered_img': transfered_img, 'y': y, 'tgt_fg_mask': tgt_fg_mask}
return src_data, tgt_data, data_255, smpl_data
def __len__(self):
return self.dataset_len
class Fusion_dataset_smpl_test(data.Dataset):
def __init__(self, params, mode='test'):
super(Fusion_dataset_smpl_test, self).__init__()
data_root = params['data_root']
smpl_root = params['smpl_root']
mask_root = params['mask_root']
self.data_dir = os.path.join(data_root, mode)
self.smpl_dir = os.path.join(smpl_root, mode)
self.mask_dir = os.path.join(mask_root, mode)
self.vid_list = get_vid_list(self.data_dir)
self.vid_list.sort()
self.batch_size = params['batch_size']
self.n_iters = params['n_training_iter']
self.n_sample = params['n_sample'] # get N+1 control points
self.max_ref_frames = params["maximum_ref_frames"]
num_vids = len(self.vid_list)
self.num_frames = params['num_frames']
self.frame_interval = params['frame_interval']
use_fix_intv = params['use_fix_interval']
self.num_inputs = params["maximum_ref_frames"]
self.num_target = params["num_target"]
self.fix_frame = params["fix_frame"]
self.face_GAN = params["face_GAN"]
self.output_mask = params["output_mask"]
self.self_recon = params["self_recon"]
self.log_file_dir = os.path.join(os.path.dirname(params['test_save_dir']), "log_result",
"chosen_frame_train.txt")
self.dataset_len = len(self.vid_list)
print("the dataset len is ", self.dataset_len)
# self.dataset_len = 1
def __getitem__(self, index):
vid_idx = index
'''
target_video_name="Popping_video_17_7"
for i,video in enumerate(self.vid_list):
print(video)
if video.find(target_video_name)>=0:
vid_idx=i
break
print(vid_idx)
'''
random_number = np.random.random()
vid_path = self.vid_list[vid_idx]
vid_name = vid_path.split("/")[-1]
smpl_path = os.path.join(self.smpl_dir, vid_name)
smpl_path = os.path.join(smpl_path, "pose_shape.pkl")
mask_path = os.path.join(self.mask_dir, vid_name)
real_mask_list = get_mask_list(mask_path)
# print(real_mask_list)
img_list, iuv_list, text_list, mask_list = get_img_iuv_text_mask(vid_path)
img_name_list = []
for image_path in img_list:
img_name = image_path.split('/')[-1]
img_name_list.append(img_name)
self.num_frames = len(img_list)
all_IUV = np.zeros((self.num_frames, 256, 256, 3), np.uint8)
angle = np.zeros((self.num_frames))
for i in range(self.num_frames):
all_IUV[i] = cv2.imread(iuv_list[i])
for i in range(self.num_frames):
angle[i] = compute_angle(all_IUV[i])
# print(angle[i])
max_angle = np.max(angle)
min_angle = np.min(angle)
median_angle = np.median(angle)
max_index = np.argmax(angle)
min_index = np.argmin(angle)
if self.num_inputs == 4:
index_33 = np.argsort(angle)[self.num_frames // 3]
index_66 = np.argsort(angle)[self.num_frames * 2 // 3]
frames = np.array([max_index, index_33, index_66, min_index], np.int)
elif self.num_inputs == 1:
angle = np.abs(angle)
front_index = np.argmin(angle)
frames = np.array([front_index], np.int)
elif self.num_inputs < 4:
index_median = np.argsort(angle)[self.num_frames // 2]
frames = np.array([max_index, index_median, min_index], np.int)
elif self.num_inputs == 5:
index_25 = np.argsort(angle)[self.num_frames // 4]
index_50 = np.argsort(angle)[self.num_frames * 2 // 4]
index_75 = np.argsort(angle)[self.num_frames * 3 // 4]
frames = np.array([max_index, index_25, index_50, index_75, min_index], np.int)
pro_frames = frames
frames = np.clip(frames, 0, 30)
print(pro_frames, frames)
with open(self.log_file_dir, "a") as log_file:
msg = "the chosen frame index of video %s is" % (vid_name)
for i in range(frames.shape[0]):
msg += ",%s" % (img_name_list[frames[i]])
msg += ".\n"
log_file.write('%s\n' % msg) # save the message
src_texture_im = np.zeros((self.num_inputs, 800, 1200, 3), np.uint8)
for i in range(self.num_inputs):
src_texture_im[i] = cv2.imread(text_list[frames[i]])
src_mask_im = np.zeros((self.num_inputs, 800, 1200), np.uint8)
for i in range(self.num_inputs):
src_mask_im[i] = cv2.imread(mask_list[frames[i]])[:, :, 0]
src_img = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_img[i] = cv2.imread(img_list[frames[i]])
tgt_img = np.zeros((self.num_frames, 256, 256, 3), np.uint8)
for i in range(self.num_frames):
tgt_img[i] = cv2.imread(img_list[i])
src_IUV = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_IUV[i] = cv2.imread(iuv_list[frames[i]])
tgt_IUV = np.zeros((self.num_frames, 256, 256, 3), np.uint8)
for i in range(self.num_frames):
tgt_IUV[i] = cv2.imread(iuv_list[i])
if self.output_mask == True:
src_common_area = np.zeros((800, 1200), np.uint8)
for i in range(self.num_inputs):
src_common_area = np.logical_or(src_common_area, src_mask_im[i] / 255)
src_mask_in_image = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=src_IUV[i])
# normalize to (-1, 1)
smpl_file = open(smpl_path, 'rb')
smpl_content = pickle.load(smpl_file)
smpl_seq = np.concatenate([smpl_content['cams'], smpl_content['pose'], smpl_content['shape']], axis=1)
smpl_real_mask = np.zeros((self.num_frames, 256, 256, 3), np.uint8)
for i in range(self.num_frames):
smpl_real_mask[i] = cv2.imread(real_mask_list[i])
smpl_real_mask = smpl_real_mask / 255.0
smpl_data = [smpl_seq, smpl_real_mask, smpl_content['vertices']]
src_IUV255 = src_IUV
tgt_IUV255 = tgt_IUV
src_texture_im = (src_texture_im / 255.0 - 0.5) * 2
src_IUV = (src_IUV / 255.0 - 0.5) * 2
tgt_IUV = (tgt_IUV / 255.0 - 0.5) * 2
src_img = (src_img / 255.0 - 0.5) * 2
tgt_img = (tgt_img / 255.0 - 0.5) * 2
src_mask_im = (src_mask_im / 255.0)
src_data = [src_img, src_IUV, src_texture_im, src_mask_im]
tgt_data = [tgt_img, tgt_IUV]
data_255 = [src_IUV255, tgt_IUV255]
if self.output_mask == True:
src_data.append(src_common_area)
src_data.append(src_mask_in_image)
return src_data, tgt_data, data_255, smpl_data, vid_name, img_name_list, pro_frames
def __len__(self):
return self.dataset_len
class Fusion_dataset_smpl_interval(data.Dataset):
def __init__(self, params, mode='train'):
super(Fusion_dataset_smpl_interval, self).__init__()
data_root = params['data_root']
smpl_root = params['smpl_root']
mask_root = params['mask_root']
self.data_dir = os.path.join(data_root, mode)
self.smpl_dir = os.path.join(smpl_root, mode)
self.mask_dir = os.path.join(mask_root, mode)
self.vid_list = get_vid_list(self.data_dir)
self.batch_size = params['batch_size']
self.n_iters = params['n_training_iter']
self.n_sample = params['n_sample'] # get N+1 control points
self.max_ref_frames = params["maximum_ref_frames"]
num_vids = len(self.vid_list)
self.num_frames = params['num_frames']
self.frame_interval = params['frame_interval']
use_fix_intv = params['use_fix_interval']
self.num_inputs = params["maximum_ref_frames"]
self.num_target = params["num_target"]
self.fix_frame = params["fix_frame"]
self.face_GAN = params["face_GAN"]
self.output_mask = params["output_mask"]
self.self_recon = params["self_recon"]
self.dataset_len = len(self.vid_list)
self.log_file_dir = os.path.join(params['project_dir'], "log_result", "chosen_frame.txt")
def __getitem__(self, index):
vid_idx = index
random_number = np.random.random()
vid_path = self.vid_list[vid_idx]
vid_name = vid_path.split("/")[-1]
smpl_path = os.path.join(self.smpl_dir, vid_name)
smpl_path = os.path.join(smpl_path, "pose_shape.pkl")
mask_path = os.path.join(self.mask_dir, vid_name)
real_mask_list = get_mask_list(mask_path)
# print(real_mask_list)
img_list, iuv_list, text_list, mask_list = get_img_iuv_text_mask(vid_path)
frames = np.random.choice(len(img_list), self.num_inputs + self.num_target, replace=False)
src_texture_im = np.zeros((self.num_inputs, 800, 1200, 3), np.uint8)
for i in range(self.num_inputs):
src_texture_im[i] = cv2.imread(text_list[frames[i + self.num_target]])
src_mask_im = np.zeros((self.num_inputs, 800, 1200), np.uint8)
for i in range(self.num_inputs):
src_mask_im[i] = cv2.imread(mask_list[frames[i + self.num_target]])[:, :, 0]
src_img = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_img[i] = cv2.imread(img_list[frames[i + self.num_target]])
tgt_img = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_img[i] = cv2.imread(img_list[frames[i]])
src_IUV = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_IUV[i] = cv2.imread(iuv_list[frames[i + self.num_target]])
tgt_IUV = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_IUV[i] = cv2.imread(iuv_list[frames[i]])
if self.output_mask == True:
src_common_area = np.zeros((800, 1200), np.uint8)
for i in range(self.num_inputs):
src_common_area = np.logical_or(src_common_area, src_mask_im[i] / 255)
src_area = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
src_area[i] = TransferTexture(TextureIm=np.repeat(src_common_area[:, :, np.newaxis], 3, axis=2),
IUV=tgt_IUV[i])
image_inpaint_area = np.zeros((self.num_target, 256, 256, 3), np.uint8)
tgt_mask_in_image = np.zeros((self.num_target, 256, 256, 3), np.uint8)
for i in range(self.num_target):
tgt_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=tgt_IUV[i])
image_inpaint_area[i] = np.logical_xor(tgt_mask_in_image[i], src_area[i])
src_mask_in_image = np.zeros((self.num_inputs, 256, 256, 3), np.uint8)
for i in range(self.num_inputs):
src_mask_in_image[i] = TransferTexture(TextureIm=np.ones((800, 1200, 3), np.uint8), IUV=src_IUV[i])
# compute the face Bbox
# 23 and 24 correspond to the face
if self.face_GAN == True:
face_bbox = np.zeros((self.num_target, 4), np.uint8)
for i in range(self.num_target):
try:
Y1, X1 = np.where(tgt_IUV[i, :, :, 0] == 23)
Y2, X2 = np.where(tgt_IUV[i, :, :, 0] == 24)
X_con = np.concatenate([X1, X2])
Y_con = np.concatenate([Y1, Y2])
leftmost = max(np.min(X_con) - 2, 0)
rightmost = min(np.max(X_con) + 3, 256)
upmost = max(np.min(Y_con) - 2, 0)
bottomost = min(np.max(Y_con) + 3, 256)
face_bbox[i] = np.array([leftmost, rightmost, upmost, bottomost])
# print(face_bbox[i])
except:
face_bbox = np.zeros((self.num_target, 4), np.uint8)
face_mask = np.zeros((self.num_target, 256, 256), np.uint8)
for i in range(self.num_target):
face_mask[i] = np.where(tgt_IUV[i, :, :, 0] == 23, 1, 0)
face_mask[i] = face_mask[i] + np.where(tgt_IUV[i, :, :, 0] == 24, 1, 0)
# cv2.imwrite('/home/haolin/test_data/face_mask.jpg',face_mask)
# canvas=tgt_IUV[0]
# canvas=cv2.rectangle(canvas, (face_bbox[0,0], face_bbox[0,2]), (face_bbox[0,1], face_bbox[0,3]), (0, 255, 0), 2)
# cv2.imwrite('/home/haolin/test_data/canvas.jpg',canvas)
# cv2.imwrite('/home/haolin/test_data/IUV.jpg',tgt_IUV[0,:,:])
# print(face_mask.shape)
# normalize to (-1, 1)
smpl_file = open(smpl_path, 'rb')
smpl_content = pickle.load(smpl_file)
smpl_seq_index = frames[:]
smpl_seq = np.concatenate([smpl_content['cams'][smpl_seq_index], smpl_content['pose'][smpl_seq_index],
smpl_content['shape'][smpl_seq_index]], axis=1)
smpl_real_mask = cv2.imread(real_mask_list[frames[0]])
smpl_real_mask = smpl_real_mask / 255.0
smpl_data = [smpl_seq, smpl_real_mask, smpl_content['vertices'][smpl_seq_index]]
src_IUV255 = src_IUV
tgt_IUV255 = tgt_IUV
src_texture_im = (src_texture_im / 255.0 - 0.5) * 2
src_IUV = (src_IUV / 255.0 - 0.5) * 2
tgt_IUV = (tgt_IUV / 255.0 - 0.5) * 2
src_img = (src_img / 255.0 - 0.5) * 2
tgt_img = (tgt_img / 255.0 - 0.5) * 2
src_mask_im = (src_mask_im / 255.0)
src_data = [src_img, src_IUV, src_texture_im, src_mask_im]
tgt_data = [tgt_img, tgt_IUV]
data_255 = [src_IUV255, tgt_IUV255]
if self.output_mask == True:
src_data.append(image_inpaint_area)
src_data.append(src_mask_in_image)
src_data.append(src_common_area)
if self.face_GAN == True:
# prepare return values
tgt_data.append(face_mask)
tgt_data.append(face_bbox)
'''
for i in range(len(src_data)):
cv2.imwrite("/data1/haolin/test/"+"src_%i.jpg"%(i),src_data[i])
for i in range(len(tgt_data)):
cv2.imwrite("/data1/haolin/test/"+"tgt_%i.jpg"%(i),tgt_data[i])
'''
# return {'IUV': tgt_IUV, 'transfered_img': transfered_img, 'y': y, 'tgt_fg_mask': tgt_fg_mask}
return src_data, tgt_data, data_255, smpl_data
def __len__(self):
return self.dataset_len
if __name__ == "__main__":
opt = get_general_options()
print("param is ready")
train_data = Fusion_dataset_smpl(opt)
print("data_loader is ready")
src_data, tgt_data, data_255, smpl_data, target_vid_name, img_name_list, frames = train_data.__getitem__(1)
for i in range(len(smpl_data)):
print(smpl_data[i].shape)
|
{"hexsha": "a1111b90a3a8f77ad26edea81d98caad77b1cdc7", "size": 34007, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data.py", "max_stars_repo_name": "Larry-u/JAFPro", "max_stars_repo_head_hexsha": "10e5ee3b77bcdb103709c08c3e7d033396bab5ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-21T12:50:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:36:46.000Z", "max_issues_repo_path": "src/data.py", "max_issues_repo_name": "Larry-u/JAFPro", "max_issues_repo_head_hexsha": "10e5ee3b77bcdb103709c08c3e7d033396bab5ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-18T23:13:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-19T02:43:53.000Z", "max_forks_repo_path": "src/data.py", "max_forks_repo_name": "Larry-u/JAFPro", "max_forks_repo_head_hexsha": "10e5ee3b77bcdb103709c08c3e7d033396bab5ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.2109275731, "max_line_length": 122, "alphanum_fraction": 0.5991119475, "include": true, "reason": "import numpy", "num_tokens": 9178}
|
import argparse
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from simulator.network_simulator.bbr import BBR
from simulator.network_simulator.cubic import Cubic
from simulator.trace import Trace
def parse_args():
"""Parse arguments from the command line."""
parser = argparse.ArgumentParser("Plot validation curv.")
parser.add_argument('--log-file', type=str, nargs="+", required=True,
help="path to a testing log file.")
parser.add_argument('--save-dir', type=str, default=None,
help="path to save.")
args, unknown = parser.parse_known_args()
return args
def main():
args = parse_args()
bbr = BBR(False)
cubic = Cubic(False)
validation_traces = []
save_dirs = []
for i in range(20):
trace_file = os.path.join(args.save_dir, 'validation_traces', "trace_{}.json".format(i))
if not os.path.exists(trace_file):
continue
validation_traces.append(Trace.load_from_file(trace_file))
save_dir = os.path.join(args.save_dir, 'validation_traces',"trace_{}".format(i))
os.makedirs(save_dir, exist_ok=True)
save_dirs.append(save_dir)
bbr_trace_rewards = bbr.test_on_traces(validation_traces, save_dirs, False)
cubic_trace_rewards = cubic.test_on_traces(validation_traces, save_dirs, False)
bbr_rewards = [mi_level_reward for mi_level_reward, _ in bbr_trace_rewards]
cubic_rewards = [mi_level_reward for mi_level_reward, _ in cubic_trace_rewards]
for log_file in args.log_file:
plt.figure()
model_name = log_file.split('/')[-2]
plt.title(model_name)
df = pd.read_csv(log_file, sep='\t')
best_step = int(df['num_timesteps'][df['mean_validation_reward'].argmax()])
t_used = df['tot_t_used(min)'][df['mean_validation_reward'].argmax()]
best_reward = df['mean_validation_reward'].max()
best_model_path = os.path.join(os.path.dirname(log_file), "model_step_{}.ckpt.meta".format(best_step))
plt.plot(df['num_timesteps'], df['mean_validation_reward'],
'o-', label="best_reward: {:.2f}, best step: {}, used {:.2f}min".format(best_reward, int(best_step), t_used))
plt.axhline(y=np.mean(bbr_rewards), c='r', label='BBR')
plt.axhline(y=np.mean(cubic_rewards), c='k', label='Cubic')
plt.xlabel('Num steps')
plt.ylabel('Validation Reward')
plt.legend()
assert os.path.exists(best_model_path)
print(best_model_path.replace(".meta", ""))
if args.save_dir:
os.makedirs(args.save_dir, exist_ok=True)
plt.savefig(os.path.join(args.save_dir,
'{}_val_curve.png'.format(model_name)))
plt.close()
if __name__ == '__main__':
main()
|
{"hexsha": "48e13296fc28357c0f747eb3a0b67441c81f025d", "size": 2885, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_scripts/plot_validation.py", "max_stars_repo_name": "zxxia/RL-CC", "max_stars_repo_head_hexsha": "d3d3be0097d69ee07b06363ad531cf2479029d74", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-04T17:51:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T17:51:42.000Z", "max_issues_repo_path": "src/plot_scripts/plot_validation.py", "max_issues_repo_name": "zxxia/RL-CC", "max_issues_repo_head_hexsha": "d3d3be0097d69ee07b06363ad531cf2479029d74", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_scripts/plot_validation.py", "max_forks_repo_name": "zxxia/RL-CC", "max_forks_repo_head_hexsha": "d3d3be0097d69ee07b06363ad531cf2479029d74", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-05T02:29:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-16T16:15:19.000Z", "avg_line_length": 38.4666666667, "max_line_length": 126, "alphanum_fraction": 0.6547660312, "include": true, "reason": "import numpy", "num_tokens": 660}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file adjlist.py is referred and derived from project NetworkX,
#
# https://github.com/networkx/networkx/blob/master/networkx/readwrite/adjlist.py
#
# which has the following license:
#
# Copyright (C) 2004-2020, NetworkX Developers
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
import networkx.readwrite.adjlist
from networkx.readwrite.adjlist import parse_adjlist as _parse_adjlist
from networkx.utils.decorators import open_file
from graphscope import nx
from graphscope.nx.utils.compat import import_as_graphscope_nx
from graphscope.nx.utils.compat import patch_docstring
import_as_graphscope_nx(networkx.readwrite.adjlist)
@patch_docstring(_parse_adjlist)
def parse_adjlist(
lines, comments="#", delimiter=None, create_using=None, nodetype=None
):
G = nx.empty_graph(0, create_using)
edges = []
nodes = [] # nodes that has not any adjacency
for line in lines:
p = line.find(comments)
if p >= 0:
line = line[:p]
if not line:
continue
vlist = line.strip().split(delimiter)
u = vlist.pop(0)
# convert types
if nodetype is not None:
try:
u = nodetype(u)
except Exception as e:
raise TypeError(
"Failed to convert node ({}) to type {}".format(u, nodetype)
) from e
if len(vlist) == 0:
nodes.append(u)
if nodetype is not None:
try:
vlist = map(nodetype, vlist)
except Exception as e:
raise TypeError(
"Failed to convert nodes ({}) to type {}".format(
",".join(vlist), nodetype
)
) from e
edges.extend([u, v] for v in vlist)
# N.B: batch add edges to graph.
if nodes:
G.add_nodes_from(nodes)
G.add_edges_from(edges)
return G
@open_file(0, mode="rb")
def read_adjlist(
path,
comments="#",
delimiter=None,
create_using=None,
nodetype=None,
encoding="utf-8",
):
"""Read graph in adjacency list format from path.
Parameters
----------
path : string or file
Filename or file handle to read.
Filenames ending in .gz or .bz2 will be uncompressed.
create_using : graphscope.nx graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
nodetype : int, str, float, tuple, bool Python object, optional
Convert nodes to this type.
comments : string, optional
Marker for comment lines
delimiter : string, optional
Separator for node labels. The default is whitespace.
Returns
-------
G: graphscope.nx graph
The graph corresponding to the lines in adjacency list format.
Notes
-----
This format does not store graph or node data.
See Also
--------
read_edgelist
"""
lines = (line.decode(encoding) for line in path)
return parse_adjlist(
lines,
comments=comments,
delimiter=delimiter,
create_using=create_using,
nodetype=nodetype,
)
# fixture for pytest
def teardown_module(module):
import os
for fname in ["test.adjlist", "test.adjlist.gz"]:
if os.path.isfile(fname):
os.unlink(fname)
|
{"hexsha": "979453e1c5353721eebc4c8294e2dc50df0ca323", "size": 3624, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/graphscope/nx/readwrite/adjlist.py", "max_stars_repo_name": "haoxins/GraphScope", "max_stars_repo_head_hexsha": "e1e22a425b5c33bed0dea930f8722e6159484153", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-07T07:57:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T09:44:01.000Z", "max_issues_repo_path": "python/graphscope/nx/readwrite/adjlist.py", "max_issues_repo_name": "haoxins/GraphScope", "max_issues_repo_head_hexsha": "e1e22a425b5c33bed0dea930f8722e6159484153", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-12-22T09:19:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T02:43:34.000Z", "max_forks_repo_path": "python/graphscope/nx/readwrite/adjlist.py", "max_forks_repo_name": "haoxins/GraphScope", "max_forks_repo_head_hexsha": "e1e22a425b5c33bed0dea930f8722e6159484153", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-25T10:16:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T11:51:20.000Z", "avg_line_length": 27.0447761194, "max_line_length": 81, "alphanum_fraction": 0.6214128035, "include": true, "reason": "import networkx,from networkx", "num_tokens": 855}
|
[STATEMENT]
lemma nprv_addLemmaE:
assumes "prv \<phi>" "nprv (insert \<phi> F) \<psi>"
and "\<phi> \<in> fmla" "\<psi> \<in> fmla" and "F \<subseteq> fmla" and "finite F"
shows "nprv F \<psi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nprv F \<psi>
[PROOF STEP]
using assms nprv_cut prv_nprvI
[PROOF STATE]
proof (prove)
using this:
prv \<phi>
nprv (insert \<phi> F) \<psi>
\<phi> \<in> fmla
\<psi> \<in> fmla
F \<subseteq> fmla
finite F
\<lbrakk>nprv ?F ?\<phi>; nprv (insert ?\<phi> ?F) ?\<psi>; ?F \<subseteq> fmla; finite ?F; ?\<phi> \<in> fmla; ?\<psi> \<in> fmla\<rbrakk> \<Longrightarrow> nprv ?F ?\<psi>
\<lbrakk>prv ?\<phi>; ?\<phi> \<in> fmla; ?F \<subseteq> fmla; finite ?F\<rbrakk> \<Longrightarrow> nprv ?F ?\<phi>
goal (1 subgoal):
1. nprv F \<psi>
[PROOF STEP]
by blast
|
{"llama_tokens": 374, "file": "Syntax_Independent_Logic_Natural_Deduction", "length": 2}
|
import sys
sys.path.append('..')
from common.core import *
from common.gfxutil import *
from common.audio import *
from common.mixer import *
from common.note import *
from common.wavegen import *
from common.wavesrc import *
from common.writer import *
from kivy.core.window import Window
from kivy.clock import Clock as kivyClock
from kivy.uix.label import Label
from kivy.graphics.instructions import InstructionGroup
from kivy.graphics import Color, Ellipse, Rectangle
from kivy.graphics import PushMatrix, PopMatrix, Translate, Scale, Rotate
from kivy.config import Config
import Leap
from Crosshair import *
from FlameHand import *
from random import random, randint, choice
import numpy as np
import math
'''
Representation of the player's interactions with the game,
as well as his current standing (score, etc.)
'''
class Player(InstructionGroup):
def __init__(self):
super(Player, self).__init__()
self.score = 0
self.score_mult = 1
self.health = 100
self.controller = Leap.Controller()
self.leftHand = Crosshair()
self.rightHand = FlameHand()
self.attacking = False
self.add(self.leftHand)
self.add(self.rightHand)
def set_hands(self, hands):
# TODO: handle overlapping of enemy (in Handler)
for hand in hands:
pos = LeapHelper.position_as_pixels(hand)
if hand.is_left:
self.leftHand.set_hand(hand)
self.leftHand.set_pos(pos)
else:
self.rightHand.set_hand(hand)
self.rightHand.set_pos(pos)
def get_flame(self):
return self.rightHand.flameParticle
def get_button(self):
if not self.rightHand:
return None
return self.rightHand.get_button()
def arm_weapon(self, btn):
if not self.rightHand:
return
self.rightHand.arm_weapon(btn)
def unarm_weapon(self):
if not self.rightHand:
return
self.rightHand.unarm_weapon()
def is_attacking(self):
return self.attacking
def on_update(self, dt):
if self.controller.is_connected:
frame = self.controller.frame()
self.set_hands(frame.hands)
if self.rightHand:
self.rightHand.set_brightness()
# Update attack state
hand = self.rightHand.get_hand()
if not hand:
return
y = hand.palm_normal.y
if not self.attacking and y >= .95:
self.attacking = True
elif self.attacking and y <= .5:
self.attacking = False
else:
# Can't be attacking if no right hand
self.attacking = False
# Should never be removed from the game
return True
def score_up(self):
self.score += 10 * self.score_mult
def set_score_mult(self, size):
self.score_mult = size / 5 + 1
def get_health(self):
return self.health
def decrement_health(self, c=10):
self.health -= c
|
{"hexsha": "11b53b056896fff2669894313a7eeea7dd133a00", "size": 3207, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/Player.py", "max_stars_repo_name": "osmidy/Dischord", "max_stars_repo_head_hexsha": "3c3802eb4917adb9384256d8a0c7ba4f123fd166", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "project/Player.py", "max_issues_repo_name": "osmidy/Dischord", "max_issues_repo_head_hexsha": "3c3802eb4917adb9384256d8a0c7ba4f123fd166", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/Player.py", "max_forks_repo_name": "osmidy/Dischord", "max_forks_repo_head_hexsha": "3c3802eb4917adb9384256d8a0c7ba4f123fd166", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0731707317, "max_line_length": 73, "alphanum_fraction": 0.6052385407, "include": true, "reason": "import numpy", "num_tokens": 668}
|
import torch
import torch.distributions
from torch.distributions import Normal, Uniform
from torch.nn import Sequential, Tanh, ReLU, Linear, Dropout, CELU, BatchNorm1d
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm
import sys, os, argparse, datetime, time, copy
from argparse import ArgumentParser
# matplotlib.use('svg')
matplotlib.rcParams['figure.figsize'] = (10,10)
plt.rcParams['svg.fonttype'] = 'none'
# exit()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
FloatTensor = torch.cuda.FloatTensor
Tensor = torch.cuda.FloatTensor
elif not torch.cuda.is_available():
FloatTensor = torch.FloatTensor
Tensor = torch.FloatTensor
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
# sys.path.append("..") # Up to -> KFAC -> Optimization -> PHD
# [print(x) for x in sys.path if 'PhD' in x]
cwd = os.path.abspath(os.getcwd())
sys.path.append("/".join(cwd.split("/")[:-2]))
from pytorch_ProbabilisticLayers.src.ProbabilisticLayers import BayesianNeuralNetwork
from pytorch_ProbabilisticLayers.src.ProbabilisticLayers import BayesLinear, VariationalNormal, MC_BatchNorm1D, SIVILayer
from pytorch_ProbabilisticLayers.data.ProbabilisticLayers_SyntheticData import generate_nonstationary_data
from pytorch_ProbabilisticLayers.src.ProbabilisticLayers_Utils import RunningAverageMeter
from pytorch_ProbabilisticLayers.src.ProbabilisticLayers_Loss import MC_NLL
from pytorch_ProbabilisticLayers.src.ProbabilisticLayers_Utils import MC_GradientCorrection
# print('Python', os.environ['PYTHONPATH'].split(os.pathsep))
# print('MKL', torch.has_mkl)
from pytorch_lightning import LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping
# seed_everything(2)
class LitBayesNN(LightningModule):
@staticmethod
def add_model_specific_args(parent_parser):
'''
Adds arguments to the already existing argument parser 'parent_parser'
'''
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--num_MC', type=int, default=100)
parser.add_argument('--in_features', type=int, default=1)
parser.add_argument('--out_features', type=int, default=1)
parser.add_argument('--num_hidden', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=100)
return parser
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
self.fc1 = BayesLinear(self.hparams.in_features,
self.hparams.num_hidden,
self.hparams.num_MC, prior_scale=1.)
self.fc2 = BayesLinear(self.hparams.num_hidden,
self.hparams.num_hidden,
self.hparams.num_MC, prior_scale=1.)
self.fc3 = BayesLinear(self.hparams.num_hidden,
self.hparams.out_features,
num_MC=self.hparams.num_MC, prior_scale=1.)
# self.mc_gradientcorrection = MC_GradientCorrection(self.parameters(), num_MC=self.hparams.num_MC)
self.criterion = MC_NLL(num_samples=self.hparams.num_samples) # mean requires rescaling of gradient
def forward(self, x):
batch_size = x.shape[0]
out = x.unsqueeze(0).repeat(self.hparams.num_MC, *(x.dim() * (1,)))
assert out.dim()==3
actfunc = F.leaky_relu
out = self.fc1(out)
out = actfunc(out)
out = self.fc2(out)
out = actfunc(out)
out = self.fc3(out)
assert out.shape[:2] == torch.Size([self.hparams.num_MC, batch_size]), f'{out.size()[:2]=} != [{self.hparams.num_MC}, {batch_size}]'
return out
def collect_kl_div(self):
self.kl_div = 0
for name, module in self.named_children():
if any([isinstance(module, layer) for layer in [BayesLinear]]):
if hasattr(module, 'kl_div'):
self.kl_div = self.kl_div + module.kl_div
return self.kl_div
def training_step(self, batch, batch_idx):
x, y = batch
pred = self.forward(x)
MSE = F.mse_loss(pred.mean(dim=0), y)
NLL = self.criterion(pred, y) / self.hparams.num_samples
KL = self.collect_kl_div() / self.hparams.num_samples
progress_bar = {'NLL': NLL, 'KL': KL, 'MSE': MSE}
return {'loss': NLL+KL, 'progress_bar': progress_bar}
def validation_step(self, batch, batch_idx):
x, y = batch
pred = self.forward(x)
MSE = F.mse_loss(pred.mean(dim=0), y)
NLL = self.criterion(pred, y) / self.hparams.num_samples
KL = self.collect_kl_div() / self.hparams.num_samples
progress_bar = {'Val_NLL': NLL, 'Val_KL': KL, 'Val_MSE': MSE}
return {'val_loss': NLL + KL, 'Val_NLL': NLL, 'Val_KL': KL, 'Val_MSE': MSE, 'progress_bar': progress_bar}
def validation_epoch_end(self, outputs):
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
NLL = torch.stack([x['Val_NLL'] for x in outputs]).mean()
KL = torch.stack([x['Val_KL'] for x in outputs]).mean()
MSE = torch.stack([x['Val_MSE'] for x in outputs]).mean()
# if (self.trainer.current_epoch+1)%(self.trainer.max_epochs//10)==0:
# self.plot_prediction()
progress_bar = {'val_loss': val_loss, 'Val_NLL': NLL, 'Val_KL': KL, 'Val_MSE': MSE}
return {'val_loss': val_loss, 'progress_bar': progress_bar}
def on_fit_end(self):
self.plot_prediction()
# a=1
def plot_prediction(self):
with torch.no_grad():
x_pred = torch.linspace(3*x.min(), 3*x.max(), 100).reshape(-1,1)
pred = self.forward(x_pred)
x_pred = x_pred.squeeze()
mu = pred.mean(dim=0).squeeze()
std = pred.std(dim=0).squeeze()
fig = plt.figure()
plt.xlim(3*x.min(), 3*x.max())
plt.ylim(2*y.min(), 2*y.max())
plt.scatter(x,y, alpha=0.25, s=1)
plt.plot(x_pred, mu, color='red', alpha=0.5)
plt.fill_between(x_pred, mu-1*std, mu+1*std, alpha=0.25, color='red')
plt.fill_between(x_pred, mu-2*std, mu+2*std, alpha=0.10, color='red')
plt.fill_between(x_pred, mu-3*std, mu+3*std, alpha=0.05, color='red')
plt.title(f"Epoch: {self.trainer.current_epoch}")
plt.grid()
plt.show()
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.01)
# return torch.optim.SGD(self.parameters(), lr=0.001, momentum=0.9, nesterov=True)
def backward(self, trainer, loss: Tensor, optimizer: torch.optim.Optimizer, optimizer_idx: int) -> None:
loss.backward()
# for name, param in model.named_parameters():
# print('*'*100)
# print(name)
# print(".grad.shape: ", param.grad.shape)
# print(".grad_batch.shape: ", param.grad_batch.shape)
#
# exit()
def optimizer_step(self,
epoch: int,
batch_idx: int,
optimizer: torch.optim.Optimizer,
optimizer_idx: int,
second_order_closure=None,
on_tpu: bool = False,
using_native_amp: bool = False,
using_lbfgs: bool = False):
# self.mc_gradientcorrection.step()
optimizer.step()
# self.trainer.progress_bar_metrics.update({'dμ': copy.deepcopy(self.fc1.weight.loc.grad[0,0])/self.fc1.sampled_w[:,0,0].mean().detach()})
# self.trainer.progress_bar_metrics.update({'dLdμ': copy.deepcopy(self.fc1.weight.loc.grad[0, 0])})
# self.trainer.progress_bar_metrics.update({'locs.grad.mean': copy.deepcopy(self.fc3.locs.grad[:,0,0].mean())})
# self.trainer.progress_bar_metrics.update({'Std[dLdμ]': copy.deepcopy(self.fc1.locs.grad[:, 0, 0].std())})
#
# self.trainer.progress_bar_metrics.update({'dρ': copy.deepcopy(self.fc1.weight.logscale.grad[0,0]) / self.fc1.sampled_w[:,0, 0].mean().detach()})
# self.trainer.progress_bar_metrics.update({'dLdρ': copy.deepcopy(self.fc1.weight.logscale.grad[0, 0])})
# self.trainer.progress_bar_metrics.update({'Std[dLdρ]': copy.deepcopy(self.fc1.logscales.grad[:, 0, 0].std())})
# self.trainer.progress_bar_metrics.update({'E[dLdρ]': copy.deepcopy(self.fc1.logscales.grad[:, 0, 0].mean())})
# print(f"{self.fc1.logscales.grad[:20,0,0]=}")
optimizer.zero_grad()
class LastLayerSIVI(LightningModule):
@staticmethod
def add_model_specific_args(parent_parser):
'''
Adds arguments to the already existing argument parser 'parent_parser'
'''
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--num_MC', type=int, default=25)
parser.add_argument('--in_features', type=int, default=1)
parser.add_argument('--out_features', type=int, default=1)
parser.add_argument('--num_hidden', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=100)
return parser
def __init__(self, **kwargs):
super().__init__()
self.save_hyperparameters()
self.fc1 = Linear(self.hparams.in_features, self.hparams.num_hidden)
self.fc2 = Linear(self.hparams.num_hidden, self.hparams.num_hidden)
self.fc3 = SIVILayer(self.hparams.num_hidden,
self.hparams.out_features,
_dim_noise_input=5)
# self.fc3 = BayesLinear(self.hparams.num_hidden,
# self.hparams.out_features)
# self.mc_gradientcorrection = MC_GradientCorrection(self.parameters(), num_MC=self.hparams.num_MC)
self.criterion = MC_NLL(num_samples=self.hparams.num_samples) # mean requires rescaling of gradient
def forward(self, x):
batch_size = x.shape[0]
# out = x.unsqueeze(0).repeat(self.hparams.num_MC, *(x.dim() * (1,)))r
# assert out.dim()==3
actfunc = [torch.tanh, F.celu, F.leaky_relu][2]
out = self.fc1(x)
out = actfunc(out)
out = self.fc2(out)
out = actfunc(out)
# print(f"{out.shape=}")
out = out.unsqueeze(0).repeat(self.hparams.num_MC, 1, 1)
out = self.fc3(out)
assert out.shape[:2] == torch.Size([self.hparams.num_MC, batch_size]), f'{out.size()[:2]=} != [{self.hparams.num_MC}, {batch_size}]'
return out
def collect_kl_div(self):
self.kl_div = 0.
for name, module in self.named_children():
if any([isinstance(module, layer) for layer in [BayesLinear, SIVILayer]]):
if hasattr(module, 'kl_div'):
self.kl_div = self.kl_div + module.kl_div
return self.kl_div
def training_step(self, batch, batch_idx):
x, y = batch
pred = self.forward(x)
MSE = F.mse_loss(pred.mean(dim=0), y)
NLL = self.criterion(pred, y) / self.hparams.num_samples
KL = self.collect_kl_div() / self.hparams.num_samples
progress_bar = {'NLL': NLL, 'KL': KL, 'MSE': MSE}
return {'loss': NLL+KL, 'progress_bar': progress_bar}
def validation_step(self, batch, batch_idx):
x, y = batch
pred = self.forward(x)
MSE = F.mse_loss(pred.mean(dim=0), y)
NLL = self.criterion(pred, y) / self.hparams.num_samples
KL = self.collect_kl_div() / self.hparams.num_samples
progress_bar = {'Val_NLL': NLL, 'Val_KL': KL, 'Val_MSE': MSE}
return {'val_loss': NLL + KL, 'Val_NLL': NLL, 'Val_KL': KL, 'Val_MSE': MSE, 'progress_bar': progress_bar}
def validation_epoch_end(self, outputs):
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
NLL = torch.stack([x['Val_NLL'] for x in outputs]).mean()
KL = torch.stack([x['Val_KL'] for x in outputs]).mean()
MSE = torch.stack([x['Val_MSE'] for x in outputs]).mean()
# if (self.trainer.current_epoch+1)%(self.trainer.max_epochs//10)==0:
# self.plot_prediction()
progress_bar = {'val_loss': val_loss, 'Val_NLL': NLL, 'Val_KL': KL, 'Val_MSE': MSE}
return {'val_loss': val_loss, 'progress_bar': progress_bar}
def on_fit_end(self):
self.plot_prediction()
self.fc3.sample_posterior_dist()
def plot_prediction(self):
with torch.no_grad():
x_pred = torch.linspace(3*x.min(), 3*x.max(), 100).reshape(-1,1)
pred = self.forward(x_pred)
x_pred = x_pred.squeeze()
mu = pred.mean(dim=0).squeeze()
std = pred.std(dim=0).squeeze()
fig = plt.figure()
plt.xlim(3*x.min(), 3*x.max())
plt.ylim(2*y.min(), 2*y.max())
plt.scatter(x,y, alpha=0.25, s=1)
plt.plot(x_pred, mu, color='red', alpha=0.5)
plt.fill_between(x_pred, mu-1*std, mu+1*std, alpha=0.25, color='red')
plt.fill_between(x_pred, mu-2*std, mu+2*std, alpha=0.10, color='red')
plt.fill_between(x_pred, mu-3*std, mu+3*std, alpha=0.05, color='red')
plt.title(f"Epoch: {self.trainer.current_epoch}")
plt.grid()
plt.show()
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.01)
# return torch.optim.SGD(self.parameters(), lr=0.001, momentum=0.9, nesterov=True)
def backward(self, trainer, loss: Tensor, optimizer: torch.optim.Optimizer, optimizer_idx: int) -> None:
loss.backward()
# for name, param in model.named_parameters():
# print('*'*100)
# print(name)
# print(".grad.shape: ", param.grad.shape)
# print(".grad_batch.shape: ", param.grad_batch.shape)
#
# exit()
def optimizer_step(self,
epoch: int,
batch_idx: int,
optimizer: torch.optim.Optimizer,
optimizer_idx: int,
second_order_closure=None,
on_tpu: bool = False,
using_native_amp: bool = False,
using_lbfgs: bool = False):
# self.mc_gradientcorrection.step()
optimizer.step()
# self.trainer.progress_bar_metrics.update({'dμ': copy.deepcopy(self.fc1.weight.loc.grad[0,0])/self.fc1.sampled_w[:,0,0].mean().detach()})
# self.trainer.progress_bar_metrics.update({'dLdμ': copy.deepcopy(self.fc1.weight.loc.grad[0, 0])})
# self.trainer.progress_bar_metrics.update({'locs.grad.mean': copy.deepcopy(self.fc3.locs.grad[:,0,0].mean())})
# self.trainer.progress_bar_metrics.update({'Std[dLdμ]': copy.deepcopy(self.fc1.locs.grad[:, 0, 0].std())})
#
# self.trainer.progress_bar_metrics.update({'dρ': copy.deepcopy(self.fc1.weight.logscale.grad[0,0]) / self.fc1.sampled_w[:,0, 0].mean().detach()})
# self.trainer.progress_bar_metrics.update({'dLdρ': copy.deepcopy(self.fc1.weight.logscale.grad[0, 0])})
# self.trainer.progress_bar_metrics.update({'Std[dLdρ]': copy.deepcopy(self.fc1.logscales.grad[:, 0, 0].std())})
# self.trainer.progress_bar_metrics.update({'E[dLdρ]': copy.deepcopy(self.fc1.logscales.grad[:, 0, 0].mean())})
# print(f"{self.fc1.logscales.grad[:20,0,0]=}")
optimizer.zero_grad()
parser = argparse.ArgumentParser()
parser.add_argument('--num_samples', type=int, default=1000)
parser.add_argument('--x_noise_std', type=float, default=0.01)
parser.add_argument('--y_noise_std', type=float, default=.2)
parser.add_argument('--max_epochs', type=int, default=1000)
parser.add_argument('--progress_bar_refresh_rate', type=int, default=5)
parser = LitBayesNN.add_model_specific_args(parser)
args = parser.parse_args()
x, y = generate_nonstationary_data(num_samples=args.num_samples,
x_noise_std=args.x_noise_std,
y_noise_std=args.y_noise_std,
plot=False)
x = (x - x.mean(dim=0)) / (x.std(dim=0) + 1e-1)
y = (y - y.mean(dim=0)) / (y.std(dim=0) + 1e-1)
train_loader = DataLoader(TensorDataset(x, y), batch_size=args.batch_size, shuffle=True)
val_loader = DataLoader(TensorDataset(x, y), batch_size=args.num_samples, shuffle=False)
# litbnn = LitBayesNN(**vars(args), pi=np.pi)
litbnn = LastLayerSIVI(**vars(args))
early_stop_callback = EarlyStopping(
min_delta=0.00,
patience=10,
verbose=True,
mode='min'
)
# trainer = Trainer(args, progress_bar_refresh_rate=1,
# # limit_train_batches=0.3,
# early_stop_callback=early_stop_callback,
# check_val_every_n_epoch=5,
# # row_log_interval=10,
# logger=False,
# )
trainer = Trainer.from_argparse_args( args,
progress_bar_refresh_rate=1,
# limit_train_batches=0.3,
early_stop_callback=early_stop_callback,
check_val_every_n_epoch=5,
logger=False,
)
trainer.fit(litbnn, train_dataloader=train_loader, val_dataloaders=val_loader)
|
{"hexsha": "29357cce21c0d5d95a48c0e2342d0822e5c3a1f6", "size": 15456, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/SIVILastLayer.py", "max_stars_repo_name": "ludwigwinkler/pytorch_ProbabilisticLayers", "max_stars_repo_head_hexsha": "10311a11e76bc98027766918131486ad8d9a9d36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-11-06T15:23:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-25T09:37:11.000Z", "max_issues_repo_path": "experiments/SIVILastLayer.py", "max_issues_repo_name": "ludwigwinkler/pytorch_ProbabilisticLayers", "max_issues_repo_head_hexsha": "10311a11e76bc98027766918131486ad8d9a9d36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-03-19T15:59:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-30T08:54:50.000Z", "max_forks_repo_path": "experiments/SIVILastLayer.py", "max_forks_repo_name": "ludwigwinkler/pytorch_ProbabilisticLayers", "max_forks_repo_head_hexsha": "10311a11e76bc98027766918131486ad8d9a9d36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8108108108, "max_line_length": 148, "alphanum_fraction": 0.6995341615, "include": true, "reason": "import numpy", "num_tokens": 4466}
|
import numpy as np
matrix = np.random.randint(10, size=(5, 5))
print(matrix);
i=0;
j=0;
shortestPath = [];
while "true":
if i >= 5:
break;
center = int(matrix[i][j]);
nearElements = [];
top = 0;
if i > 0:
nearElements.append([i - 1, j, (matrix[i - 1][j])]);
top = (matrix[i - 1][j]);
bottom = 0;
if i < 4:
nearElements.append([i + 1, j, (matrix[i + 1][j])]);
bottom = (matrix[i + 1][j]);
right = 0;
if j < 4:
nearElements.append([i, j + 1, (matrix[i][j + 1])]);
right = (matrix[i][j + 1]);
left = 0;
if j != 0:
nearElements.append([i, j - 1, (matrix[i][j - 1])]);
left = (matrix[i][j - 1]);
for idxI in range(len(nearElements)):
twoElementSum = center + nearElements[idxI][2];
for idxJ in range(idxI + 1, len(nearElements)-1):
tempSum = center + nearElements[idxI][2] + nearElements[idxJ][2];
if tempSum == 21:
print("****************************************");
print ("Path that sums up to 21: ");
print(center," (",i,",", j,")");
print(nearElements[idxI][2]," (" ,nearElements[idxI][0],",", nearElements[idxI][1],")");
print(nearElements[idxJ][2]," (", nearElements[idxJ][1],",", nearElements[idxJ][2],")");
print("****************************************");
if not shortestPath:
shortestPath.append([i, j, center]);
shortestPath.append([nearElements[idxI][0], nearElements[idxI][1], nearElements[idxI][2]]);
shortestPath.append([nearElements[idxJ][0], nearElements[idxJ][1], nearElements[idxJ][2]]);
for idxK in range(j + 1,len(nearElements) - 1):
fourSum = center + nearElements[idxI][2] + nearElements[idxJ][2] + nearElements[idxK][2];
if fourSum == 21:
print();
print ("Path that sums up to 21: ");
print(center," (",i,",", j,")");
print(nearElements[idxI][2]," (" ,nearElements[idxI][0],",", nearElements[idxI][1],")");
print(nearElements[idxJ][2]," (", nearElements[idxJ][1],",", nearElements[idxJ][2],")");
print(nearElements[idxK][2]," (", nearElements[idxK][1],",", nearElements[idxK][2],")");
if not shortestPath:
shortestPath.append([i, j, center]);
shortestPath.append([nearElements[idxI][0], nearElements[idxI][1], nearElements[idxI][2]]);
shortestPath.append([nearElements[idxJ][0], nearElements[idxJ][1], nearElements[idxJ][2]]);
shortestPath.append([nearElements[idxK][0], nearElements[idxK][1], nearElements[idxK][2]]);
j+=1
if j == 5:
i+=1;
j = 0;
if len(shortestPath)>0:
print ("Shortest path ");
for idx in range(0,len(shortestPath)):
print (shortestPath[idx][2] , "(", shortestPath[idx][0],",",shortestPath[idx][1],")");
else:
print ("No path found that sums up to 21");
|
{"hexsha": "3c34298f08c209a793b0f23805e608bb54a6b0be", "size": 3217, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonTask/path.py", "max_stars_repo_name": "panchalneel/CaesarCipher", "max_stars_repo_head_hexsha": "5a0d0e7a9ab3511a81551323f985f4271857fdff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonTask/path.py", "max_issues_repo_name": "panchalneel/CaesarCipher", "max_issues_repo_head_hexsha": "5a0d0e7a9ab3511a81551323f985f4271857fdff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonTask/path.py", "max_forks_repo_name": "panchalneel/CaesarCipher", "max_forks_repo_head_hexsha": "5a0d0e7a9ab3511a81551323f985f4271857fdff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7792207792, "max_line_length": 119, "alphanum_fraction": 0.4830587504, "include": true, "reason": "import numpy", "num_tokens": 856}
|
from __future__ import print_function
#
# Computing the Gibbs free energy difference between a set of polymorphs by integrating the helmholtz free energy at all volumes
#
# Copyright Eric Dybeck and Michael R. Shirts, University of Virginia, 2014
#
import numpy # numerical array library
import pymbar # multistate Bennett acceptance ratio
import calculate_gro_volume
from pymbar import timeseries # timeseries analysis
from optparse import OptionParser # for parsing command-line options
import pdb
#=============================================================================================
# READ IN USER INPUTS
#=============================================================================================
parser = OptionParser()
parser.add_option('-p', '--plot', dest = 'plot', help = 'Plot output (default false)', default=True, action = 'store_true')
parser.add_option('-S', dest = 'MinVolume', help = 'Minimum box vector scaling', default = 770)
parser.add_option('-B', dest = 'MaxVolume', help = 'Maximum box vector scaling', default = 970)
parser.add_option('-s', dest = 'spacing', help = 'Spacing between lambda points', default = 5)
parser.add_option('-n', dest = 'polymorphs', help = 'Polymorphs to analyze', default = 'all')
parser.add_option('-N', dest = 'molecules', help = 'number of supercell molecules', default = 72)
parser.add_option('-I', dest = 'independent', help = 'number of independent molecules', default = 4)
parser.add_option('-i', dest = 'ignoreframes', help = 'Initial frames to ignore', default = 2000)
parser.add_option('-j', dest = 'includeframes', help = 'Number of frames to include', default = 100000)
parser.add_option('-u', dest = 'potential', help = 'Potential used in the simulation', default = 'oplsaa')
parser.add_option('-T', dest = 'Temperature', help = 'Temperature', default = 200)
parser.add_option('-P', dest = 'Pressure', help = 'Pressure', default = 1)
#parser.add_option('-V', dest = 'Volume', help = 'Reference volume for the helmholtz free energy calculations', default = "8.755 8.679 8.667") #Gromos 72ind
#parser.add_option('-A', dest = 'Helmholtz', help = 'Helmholtz free energy for each polymorph at the reference volume', default = "-11.014 -10.796 -10.835") #Gromos 72ind
#parser.add_option('-V', dest = 'Volume', help = 'Reference volume for the helmholtz free energy calculations', default = "8.773 8.651 8.645") #OPLSAAFAKEG 72ind
#parser.add_option('-A', dest = 'Helmholtz', help = 'Helmholtz free energy for each polymorph at the reference volume', default = "-11.378 -11.213 -11.248") #OPLSAAFAKEG 72ind
#parser.add_option('-V', dest = 'Volume', help = 'Reference volume for the helmholtz free energy calculations', default = "8.755 8.679 8.667") #Gromos 72ind
#parser.add_option('-A', dest = 'Helmholtz', help = 'Helmholtz free energy for each polymorph at the reference volume', default = "-11.014 -10.796 -10.835") #Gromos 72ind
#parser.add_option('-V', dest = 'Volume', help = 'Reference volume for the helmholtz free energy calculations', default = "0.484 0.477 0.481") #GROMOS 4ind
#parser.add_option('-A', dest = 'Helmholtz', help = 'Helmholtz free energy for each polymorph at the reference volume', default = "-11.347 -11.201 -10.953") #OPLSAAFAKEG 4ind
parser.add_option('-d', dest = 'directory', help = 'Parent directory of the volume change directories', default = 'Volume')
parser.add_option('-H', '--hinge', dest = 'hinge', help = 'Optional string at end of jobs', default = 'DefaultHinge')
(options, args) = parser.parse_args()
MinV = float(options.MinVolume)
MaxV = float(options.MaxVolume)
dV = float(options.spacing)
Temp = float(options.Temperature)
Pressure = int(options.Pressure)
Molecules = int(options.molecules)
Independent = int(options.independent)
ignoreframes = int(options.ignoreframes)
includeframes = int(options.includeframes)
potential = str(options.potential)
directory = options.directory
hinge = options.hinge
AvgVolume = dict()
Helmholtz = dict()
AvgVolume['gromos'] = [0.480, 0.475, 0.479]
AvgVolume['oplsaafakeg'] = [0.481, 0.474, 0.476]
AvgVolume['oplsaa'] = [0.486, 0.480, 0.478]
AvgVolume['oplsaafakea'] = [0.494, 0.474, 0.478]
#200K
Helmholtz['gromos'] = [-10.881, -10.626, -10.529]
Helmholtz['oplsaafakeg'] = [-11.221, -11.038, -10.918]
Helmholtz['oplsaa'] = [-11.077, -10.888, -10.770]
Helmholtz['oplsaafakea'] = [-10.618, -10.985, -10.911]
#100K
#Helmholtz['gromos'] = [-10,145, -9.867, -9.814]
#Helmholtz['oplsaa'] = [-10.402, -10.176, -10.115]
#72 Benzene
#Helmholtz['gromos'] = [-11.077, -10.888, -10.770]
#Helmholtz['oplsaa'] = [-11.261, -11.058, -11.061]
if (options.plot):
import matplotlib # for making plots, version 'matplotlib-1.1.0-1'; errors may pop up when using earlier versions
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.font_manager import FontProperties as FP
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
colors = ('b', 'g', 'r')
markers= ('o', 'o', 'o')
#=============================================================================================
# ENSURE THAT USER INPUTS ARE SENSIBLE
#=============================================================================================
if Temp < 0:
print("Invalid Temperature: " + str(Temp))
sys.exit()
if (MinV == -1 ) and (MaxV == -1) and (dV == -1):
print("Using default values!")
Volumes = ['v100','v102','v104','v106','v108','v110','v112','v114','v116','v118','v120'] #The scaling parameters sampled
elif MinV < 0 or MaxV < 0 or dV < 0 or MinV > MaxV:
print("Invalid Volume Specifications")
sys.exit()
else:
Volume = MinV
Volumes = [];
Volume_names = [];
while Volume < MaxV:
Volumes.append(Volume)
#Format the name
if Volume < 10:
Volume_names.append('000' + str(int(Volume))+'V')
elif Volume < 100:
Volume_names.append('00' + str(int(Volume))+'V')
#elif Volume < 1000:
#Volume_names.append('0' + str(int(Volume))+'V')
else:
Volume_names.append(str(int(Volume))+'V')
Volume=Volume+dV
#Catch the final Volume point
Volumes.append(MaxV)
if MaxV < 10:
Volume_names.append('000' + str(int(Volume))+'V')
elif MaxV < 100:
Volume_names.append('00' + str(int(Volume))+'V')
#elif MaxV < 1000:
#Volume_names.append('0' + str(int(Volume))+'V')
else:
Volume_names.append(str(int(Volume))+'V')
if (options.polymorphs == 'all'):
polymorph = ['Benzene I', 'Benzene II', 'Benzene III']
polymorph_short = ['p1', 'p2', 'p3']
elif (options.polymorphs == 'p1'):
polymorph = ['Benzene I']
polymorph_short = ['p1']
elif (options.polymorphs == 'p2'):
polymorph = ['Benzene II']
polymorph_short = ['p2']
elif (options.polymorphs == 'p3'):
polymorph = ['Benzene III']
polymorph_short = ['p3']
else:
print("Polymorph Inputs Wrong")
sys.exit()
#POTENTIAL
if potential != "oplsaa" and potential != "gromos" and potential != "oplsaafakeg" and potential != "oplsaafakea":
print("Invalid Potential")
print("Supported potentials: oplsaa gromos oplsaafakeg oplsaafakea")
sys.exit()
#=============================================================================================
# FORMAT INPUTS
#=============================================================================================
#POTENTIAL
PotNAME = ""
if potential == "oplsaa":
PotNAME = "OPLS"
Chargehinge=""
elif potential == "gromos":
PotNAME = "GROM"
Chargehinge=""
elif potential == "oplsaafakeg":
PotNAME = "FAKEG"
Chargehinge="_C01150"
elif potential == "oplsaafakea":
PotNAME = "FAKEA"
Chargehinge="_C01150"
#NUMBER OF MOLECULES
Molname = ""
if Molecules == Independent:
Molname = str(Molecules) + '_'
else:
Molname = str(Molecules) + '_' + str(Independent) + 'ind_'
#PRESSURE
Pname = ""
if Pressure < 10:
Pname="00" + str(int(Pressure))+"P"
elif Pressure < 100:
Pname="0" + str(int(Pressure))+"P"
else:
Pname=str(int(Pressure))+"P"
# OPTIONAL HINGE
if hinge == "DefaultHinge":
hinge = ""
else:
hinge = "_" + hinge
#=============================================================================================
# READ IN RAW DATA
#=============================================================================================
# Constants.
kB = 1.3806488e-23 * 6.0221413e23 / (1000.0 * 4.184) # Boltzmann constant in kcal/mol
gname = "pre_EQ.gro"
molecule = 'Benzene' #The crystalline molecule
ignore_symbols = ['#', '@', '@TYPE', 'STEP', '=====================']; #Lines to ignore when reading in energies
#Read in the reference volume and helmholtz free energy
Vref = numpy.zeros(len(polymorph),float)
Aref = numpy.zeros(len(polymorph),float)
A_shift = 0.0
for i,token in enumerate(AvgVolume[potential]):
Vref[i]=float(token)
for i,token in enumerate(Helmholtz[potential]):
Aref[i]=int(Independent)*float(token) + A_shift
# Parameters
T_k = Temp*numpy.ones(len(Volumes),float) #Convert temperatures to floats
P = numpy.zeros([3,len(Volumes)],float) #Convert Pressures to floats
ddP = numpy.zeros([3,len(Volumes)],float) #Standard Deviation of the Mean Pressure at each volume
V = Temp*numpy.ones(len(Volumes),float) #Convert Volumes to floats
P_NVT = numpy.zeros([3,20000],float) #Pressure of each configuration in the NVT simulation
V_NPT = numpy.zeros([3,20000],float) #Volume of each configuration in the NPT simulation
N_k = numpy.zeros(3,int) #Number of configurations from the NPT simulation for each polymorph
K = len(T_k) # How many states?
Kbig = K+0 # total number of states examined; none are unsampled
N_max = 200000 # maximum number of snapshots/simulation (could make this automated) - doesn't matter, as long as it's long enough.
beta_k = 1.0/(kB*T_k) # beta factor for the different temperatures
dA = numpy.zeros([3,(K+1)/2],float)
ddA = numpy.zeros([3,(K+1)/2],float)
#seeds = [201]; #The random seed used (not included at the moment)
g_k = numpy.zeros([len(T_k)],float)
for i,poly in enumerate(polymorph):
# Allocate storage for simulation data
#N_k = numpy.zeros([Kbig],numpy.int32) # N_k[k] is the total number of snapshots from alchemical state k
#u_kln = numpy.zeros([K,Kbig,N_max], numpy.float64) # u_kln[k,l,n] is the reduced potential energy of snapshot n from umbrella simulation k
for k in range(K):
n = 0
linenum = 0
#dirname='../finishedJobs/' + directory + '/benzene_GRO_' + PotNAME + '_' + polymorph_short[i] + '_' + Molname + str(int(Temp))+'K' + Chargehinge + '_000L_100G_' + Pname
dirname='/oldhome/ecd4bd/finishedJobs_archive/' + directory + '/benzene_GRO_' + PotNAME + '_' + polymorph_short[i] + '_' + Molname + str(int(Temp))+'K' + Chargehinge + '_000L_100G_' + Pname
# cycle through all the pressure data
fname=dirname + '_' + Volume_names[k]+hinge + '/pressure.xvg'
infile = open(fname, 'r')
lines = infile.readlines()
infile.close()
print("loading " + fname)
ignorecounter=0
counter=0
for line in lines:
tokens_pressure = line.split()
if tokens_pressure[0] in ignore_symbols:
continue
#ignore the first set of frames
if ignorecounter < ignoreframes:
ignorecounter+=1
continue
P_NVT[i,counter] = float(tokens_pressure[1])
P[i,k] = P[i,k] * (float(counter)/(counter+1)) + float(tokens_pressure[1])*(float(1)/(counter+1)) #Moving average of the pressure
counter+=1
#Calculate the standard deviation of the pressure
ddP[i,k] = numpy.std(P_NVT[i,:counter])/(counter**0.5)
fname=dirname+ '_' + Volume_names[k]+hinge +'/'+gname
print("loading " + fname)
V[k] = (float(Independent)/Molecules)*numpy.round(calculate_gro_volume.Volume(fname),3)
##Read in the actual volume distribution
#fname_volume=dirname+'/volume.xvg'
##fname_volume = '../finishedJobs/' + directory + '/benzene_GRO_' + PotNAME + '_' + polymorph_short[i] + '_' + Molname + str(int(Temp))+'K' + Chargehinge + '_000L_100G_' + Pname + '_c/volume.xvg'
#infile = open(fname_volume, 'r')
#lines_volume = infile.readlines()
#infile.close()
#print "loading " + fname_volume
#ignorecounter=0
#n=0;
#for j,line in enumerate(lines_volume):
# tokens_volume = line.split()
# if tokens_volume[0] in ignore_symbols:
# continue
# #ignore the first set of frames
# if ignorecounter < ignoreframes/10.0:
# ignorecounter+=1
# continue
# #time[n] = float(tokens_volume[0])/1000.0
# V_NPT[i,n] = float(tokens_volume[1]);
# n+=1
#N_k[i]=n
#=====================================================================================================
# REGRESS A POLYNOMIAL FIT TO THE PV CURVES AND INTEGRATE TO FIND A(V) FOR EACH POLYMORPH
#=====================================================================================================
nm_to_M = 1.0e-09 #Conversion from angstroms into meters
Bar_to_Pa = 100000 #Conversion from bar to pascals
J_to_kcal = 0.2390057*0.001 #Conversion from kJ to kcal
Na = 6.022*10**23 #Avogadros numbers
degree = 4 #degree of the polynomial fit
dV=0.001 #Incremental volume for plotting the regressed fit
V_axis = numpy.arange(V[0],V[len(V)-1]+dV,dV) #V axis for plotting and numerically integrating
P_axis = numpy.zeros([len(polymorph),len(V_axis)],float)#Fitted pressure at each volume in V_axis for each polymorph
A = numpy.zeros([len(polymorph),len(V_axis)],float) #Helmholtz free energy estimate for each polymorph at each volume
Prob_V = numpy.zeros([len(polymorph),len(V_axis)],float) #Probability of occupying a volume between V and V+dV for each polymorph
G = numpy.zeros([len(polymorph)],float) #Gibbs free energy estimate for each polymorph (from interacting crystal to noninteracting ideal gas)
#Regress the polynomial fit
polyfit = numpy.transpose(numpy.polyfit(V,numpy.transpose(P),degree))
polyfit_integral = numpy.zeros([len(polymorph),degree+1],float)
for i in range(degree+1):
polyfit_integral[:,i] = polyfit[:,i]*1.0/(degree-i+1)
#Calculate the regressed pressure and the helmholtz free energy
for i in range(len(polymorph)):
A[i,:] = Aref[i] #Add the reference free energy term
for j in range(degree+1):
P_axis[i,:] += polyfit[i,j]*numpy.power(V_axis[:],degree-j)
A[i,:] -= (nm_to_M)**3 * Bar_to_Pa * J_to_kcal * Na *(polyfit_integral[i,j]*numpy.power(V_axis[:],degree-j+1) - polyfit_integral[i,j]*numpy.power(Vref[i],degree-j+1))
pdb.set_trace()
#Calculate the probability of being at each volume
for i in range(len(polymorph)):
Prob_V[i,:] = numpy.exp(-1*beta_k[0]*A[i,:]) * numpy.exp(-((nm_to_M)**3 * Bar_to_Pa * J_to_kcal * Na)*beta_k[0]*Pressure*V_axis[:])*dV
G[i] = -1.0/beta_k[0] * numpy.log(numpy.sum(Prob_V[i,:]))/float(Independent)
Prob_V[i,:] /= numpy.sum(Prob_V[i,:])
#Calculate the Gibbs free energy difference between the different polymorphs
#for i in range(len(polymorph)):
# G[i] = numpy.dot((A[i,:]-A_shift),Prob_V[i,:])# - numpy.dot(A[0,:],Prob_V[0,:])
#G /= float(Independent)
#Print the integrated free energy difference:
print("Helmholtz Reference:")
print((Aref-A_shift)/float(Independent))
print("PV Correction:")
print(G-(Aref-A_shift)/float(Independent))
print("Gibbs Free Energy:")
print(G)
#=============================================================================================
# PLOT THE FINAL DATA
#=============================================================================================
# now plot all 3 lines of the pressure as a function of volume
if (options.plot) and options.polymorphs == 'all':
fig=plt.figure(1)
ax=fig.add_subplot(111)
xlabel = 'Volume (nm^3)'
ylabel = 'Average Pressure (Bar)'
#plt.title('All Polymorphs')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
#print 'Xaxis:'
#print V
#print 'YAxis:'
#print P[i,:]
for i in range(len(polymorph)):
ax.errorbar(V,P[i,:],ddP[i,:],color=colors[i],marker=markers[i],linestyle='None') #Plot the actual data
ax.plot(V_axis,P_axis[i,:],color=colors[i],label=polymorph[i]) #Plot the regressed fit
#plt.hold(true)
plt.legend(loc='upper right')
filename='../../Pictures/' + PotNAME + '_' + str(Molecules) + '_' + str(Independent) + 'ind_' + str(int(Temp)) + 'K'+'_PvsV.pdf'
plt.savefig(filename, bbox_inches='tight')
plt.show()
# Also plot all three lines of the free energy vs V
fig=plt.figure(2)
ax=fig.add_subplot(111)
xlabel = 'Volume (nm^3)'
ylabel = 'Free Energy (kcal/mol)'
#plt.title('All Polymorphs')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
#print V_axis
#print A
for i in range(len(polymorph)):
ax.plot(V_axis,A[i,:],color=colors[i]) #Plot the free energy
#filename='All_' + PotNAME + '_' + str(Molecules) + '_' + str(int(Temp)) + 'K'+'_dAvsV.png'
#plt.savefig(filename, bbox_inches='tight')
plt.show()
# Also plot all three histograms of volume
fig=plt.figure(3)
xlabel = 'Volume'
ylabel = 'Probability'
#plt.title('All Polymorphs')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
#print V_axis
#print Prob_V
for i in range(len(polymorph)):
plt.plot(V_axis,Prob_V[i,:]/(V_axis[len(V_axis)-1]-V_axis[0])/10,color=colors[i]) #Plot the free energy
Volumes = V_NPT[i,0:N_k[i]-1]
weights = numpy.ones_like(Volumes)/len(Volumes)
plt.hist(Volumes,20,alpha=0.3,color=colors[i],label=polymorph[i],weights=weights)
#filename = 'All_' + str(Molecules) + '_' + str(int(Temp)) + 'K'+'_dAvsV.png'
plt.show()
#plt.savefig(filename, bbox_inches='tight')
|
{"hexsha": "b3e1aa4c5b30210c9f844799ea39b57b4315280c", "size": 17737, "ext": "py", "lang": "Python", "max_stars_repo_path": "PSCP/analysis-scripts/dA-to-dG.py", "max_stars_repo_name": "shirtsgroup/finite-temperature-crystal-scripts", "max_stars_repo_head_hexsha": "799bc882d958d9afa264a168dae0b3051bafaf0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PSCP/analysis-scripts/dA-to-dG.py", "max_issues_repo_name": "shirtsgroup/finite-temperature-crystal-scripts", "max_issues_repo_head_hexsha": "799bc882d958d9afa264a168dae0b3051bafaf0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-07-25T04:59:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-25T22:48:53.000Z", "max_forks_repo_path": "PSCP/analysis-scripts/dA-to-dG.py", "max_forks_repo_name": "shirtsgroup/finite-temperature-crystal-scripts", "max_forks_repo_head_hexsha": "799bc882d958d9afa264a168dae0b3051bafaf0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-04T07:01:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-04T07:01:25.000Z", "avg_line_length": 44.1218905473, "max_line_length": 199, "alphanum_fraction": 0.6277837289, "include": true, "reason": "import numpy", "num_tokens": 5060}
|
"""
Stores data in a custom class and generates attributes for other modules
"""
import warnings
import numpy as np
from skhyper.view import hsiPlot
from sklearn.decomposition import PCA
class Process:
"""
Process object to store the hyperspectral array.
Parameters
----------
X : array, dimensions (3 or 4)
The hyperspectral data. It should be 3- or 4-dimensional in the form:
X_3d = [x, y, spectrum] or
X_4d = [x, y, z, spectrum]
scale : bool
Scales the spectra to either between {0, 1} or {-1, 1} depending on presence of negative values.
Attributes
----------
shape : array
Returns the shape of the hyperspectral array
n_dimension : int
Returns the number of dimensions of the hyperspectral array (3 or 4)
n_features : int
Returns the number of spectral points (features)
n_samples : int
Returns the total number of pixels in the hyperspectral array
flat : array, dimension (2)
Returns a flattened 2-d version of the hyperspectral array
image : array, shape(x, y, (z))
Returns the image averaged over the selected spectral range
spectrum : array, shape(n_features)
Returns the spectrum averaged over the selected pixels
mean_image : array, shape(x, y, (z))
Returns the image averaged over the entire spectral range
mean_spectrum : array, shape(n_features)
Returns the spectrum averaged over all the pixels
Examples
--------
>>> import numpy as np
>>> from skhyper.process import Process
>>>
>>> test_data = np.random.rand(100, 100, 10, 1024)
>>> X = Process(test_data, scale=True)
>>>
>>> X.n_dimension
4
>>>
>>> X.n_features
1024
>>>
>>> X.n_samples
100000
"""
def __init__(self, X, scale=True):
self.data = X
self._scale = scale
# Data properties
self.shape = None
self.n_dimension = None
self.n_features = None
self.n_samples = None
# Hyperspectral image/spectrum
self.image = None
self.spectrum = None
self.mean_image = None
self.mean_spectrum = None
self.update()
def __getitem__(self, item):
return self.data[item]
def update(self):
""" Update properties of the hyperspectral array
This should be called whenever `X.data` is directly modified to update the attributes
of the `X` object.
"""
# Perform data operations
self._data_checks()
if self._scale: self._data_scale()
self._data_mean()
self._data_access()
def _data_checks(self):
if type(self.data) != np.ndarray:
raise TypeError('Data must be a numpy array')
self.shape = self.data.shape
self.n_dimension = len(self.shape)
if self.n_dimension != 3 and self.n_dimension != 4:
raise TypeError('Data must be 3- or 4- dimensional.')
if self.n_dimension == 3:
self.n_samples = self.shape[0] * self.shape[1]
self.n_features = self.shape[2]
if not self.n_samples > self.n_features:
# raise TypeError('The number of samples must be greater than the number of features')
warnings.warn('n_samples (number of pixels) should be greater than n_features (spectral points)')
elif self.n_dimension == 4:
self.n_samples = self.shape[0] * self.shape[1] * self.shape[2]
self.n_features = self.shape[3]
if not self.n_samples > self.n_features:
raise TypeError('The number of samples must be greater than the number of features')
def _data_flatten(self):
if self.n_dimension == 3:
return np.reshape(self.data, (self.shape[0] * self.shape[1], self.shape[2]))
elif self.n_dimension == 4:
return np.reshape(self.data, (self.shape[0] * self.shape[1] * self.shape[2], self.shape[3]))
def _data_mean(self):
if self.n_dimension == 3:
self.mean_image = np.squeeze(np.mean(self.data, 2))
self.mean_spectrum = np.squeeze(np.mean(np.mean(self.data, 1), 0))
elif self.n_dimension == 4:
self.mean_image = np.squeeze(np.mean(self.data, 3))
self.mean_spectrum = np.squeeze(np.mean(np.mean(np.mean(self.data, 2), 1), 0))
def _data_scale(self):
self.data = self.data / np.abs(np.max(self.data))
def _data_access(self):
self.image = _AccessImage(self.data, self.shape, self.n_dimension)
self.spectrum = _AccessSpectrum(self.data, self.shape, self.n_dimension)
def view(self):
""" Hyperspectral viewer
Opens a hyperspectral viewer with the hyperspectral array loaded (pyqt GUI)
"""
hsiPlot(self)
def flatten(self):
"""Flatten the hyperspectral data
Flattens the hyperspectral data from 3d/4d to 2d by unravelling the pixel order.
"""
return self._data_flatten()
def scree(self):
"""Returns the array for the scree plot
Returns the scree plot from `PCA` as an array.
Returns
-------
scree : array, shape (n_features,)
"""
mdl = PCA()
mdl.fit_transform(self.flatten())
scree = mdl.explained_variance_ratio_
return scree
class _AccessImage:
def __init__(self, X, shape, n_dimension):
self.data = X
self.shape = shape
self.n_dimension = n_dimension
def __getitem__(self, item):
if self.n_dimension == 3:
return np.squeeze(np.mean(self.data[item], 2))
elif self.n_dimension == 4:
return np.squeeze(np.mean(self.data[item], 3))
class _AccessSpectrum:
def __init__(self, X, shape, n_dimension):
self.data = X
self.shape = shape
self.n_dimension = n_dimension
def __getitem__(self, item):
if self.n_dimension == 3:
return np.squeeze(np.mean(np.mean(self.data[item], 1), 0))
elif self.n_dimension == 4:
return np.squeeze(np.mean(np.mean(np.mean(self.data[item], 2), 1), 0))
|
{"hexsha": "e12cd8ac76cd95201e33aab900e9caae3ea542d1", "size": 6229, "ext": "py", "lang": "Python", "max_stars_repo_path": "skhyper/process/_process.py", "max_stars_repo_name": "tensor-strings/scikit-hyper", "max_stars_repo_head_hexsha": "795a0b48d92c644ef3662e9957c04e5a9a6f85b2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skhyper/process/_process.py", "max_issues_repo_name": "tensor-strings/scikit-hyper", "max_issues_repo_head_hexsha": "795a0b48d92c644ef3662e9957c04e5a9a6f85b2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skhyper/process/_process.py", "max_forks_repo_name": "tensor-strings/scikit-hyper", "max_forks_repo_head_hexsha": "795a0b48d92c644ef3662e9957c04e5a9a6f85b2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2441314554, "max_line_length": 113, "alphanum_fraction": 0.6086049125, "include": true, "reason": "import numpy", "num_tokens": 1475}
|
import argparse
import os
import torch
import yaml
import numpy as np
import torch.nn.functional as F
import cv2
import config_folder as cf
from data_loaders.Chairs import Chairs
from data_loaders.kitti import KITTI
from data_loaders.sintel import Sintel
from data_loaders.KLens import KLens
from frame_utils import writeFlow
import flow_viz
from model import MaskFlownet, MaskFlownet_S, Upsample, EpeLossWithMask
def disparity_writeout(disp, path_ref, path_meas, mask):
# Warping
root_path = "./out_images/"
if not os.path.exists(root_path):
os.makedirs(root_path)
writeFlow(
os.path.join(
root_path,
os.path.basename(
os.path.splitext(path_ref)[0]
) +
"_" +
os.path.basename(
os.path.splitext(path_meas)[0]
) +
".flo"),
disp
)
cv2.imwrite(
os.path.join(
root_path,
os.path.basename(
os.path.splitext(path_ref)[0]
) +
"_" +
os.path.basename(
os.path.splitext(path_meas)[0]
) +
"_flow.jpg"
),
flow_viz.flow_to_image(disp)[:, :, [2, 1, 0]]
)
cv2.imwrite(
os.path.join(
root_path,
os.path.basename(
os.path.splitext(path_ref)[0]
) +
"_" +
os.path.basename(
os.path.splitext(path_meas)[0]
) +
"_mask.png"
),
mask*255
)
def centralize(img1, img2):
rgb_mean = torch.cat((img1, img2), 2)
rgb_mean = rgb_mean.view(rgb_mean.shape[0], 3, -1).mean(2)
rgb_mean = rgb_mean.view(rgb_mean.shape[0], 3, 1, 1)
return img1 - rgb_mean, img2-rgb_mean, rgb_mean
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, nargs='?', default=None)
parser.add_argument('--dataset_cfg', type=str, default='chairs.yaml')
parser.add_argument('-c', '--checkpoint', type=str, default=None,
help='model checkpoint to load')
parser.add_argument('-b', '--batch', type=int, default=1,
help='Batch Size')
parser.add_argument('-f', '--root_folder', type=str, default=None,
help='Root folder of KITTI')
parser.add_argument('--resize', type=str, default='')
args = parser.parse_args()
resize = (int(args.resize.split(',')[0]), int(
args.resize.split(',')[1])) if args.resize else None
num_workers = 2
print(os.path.join('config_folder', args.dataset_cfg))
with open(os.path.join('config_folder', args.dataset_cfg)) as f:
config = cf.Reader(yaml.load(f))
with open(os.path.join('config_folder', args.config)) as f:
config_model = cf.Reader(yaml.load(f))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = eval(config_model.value['network']['class'])(config)
checkpoint = torch.load(os.path.join('weights', args.checkpoint))
net.load_state_dict(checkpoint)
net = net.to(device)
if config.value['dataset'] == 'kitti':
dataset = KITTI(args.root_folder, split='train',
editions='mixed', resize=resize, parts='valid')
elif config.value['dataset'] == 'chairs':
dataset = Chairs(args.root_folder, split='valid')
elif config.value['dataset'] == 'sintel':
dataset = Sintel(args.root_folder, split='valid', subset='final')
elif config.value['dataset'] == 'klens':
dataset = KLens()
data_loader = torch.utils.data.DataLoader(dataset=dataset,
shuffle=False,
batch_size=args.batch,
num_workers=num_workers,
drop_last=False,
pin_memory=True)
epe = []
for idx, sample in enumerate(data_loader):
with torch.no_grad():
im0, im1, label, mask, path,raftflow = sample
if config.value['dataset'] == 'klens':
im0_path = path[0]
im1_path = path[1]
im0 = im0.permute(0, 3, 1, 2)
im1 = im1.permute(0, 3, 1, 2)
im0, im1, _ = centralize(im0, im1)
shape = im0.shape
pad_h = (64 - shape[2] % 64) % 64
pad_w = (64 - shape[3] % 64) % 64
if pad_h != 0 or pad_w != 0:
im0 = F.interpolate(
im0, size=[shape[2] + pad_h, shape[3] + pad_w], mode='bilinear')
im1 = F.interpolate(
im1, size=[shape[2] + pad_h, shape[3] + pad_w], mode='bilinear')
im0 = im0.to(device)
im1 = im1.to(device)
pred, flows, warpeds = net(im0, im1, raftflow, im0_path, im1_path)
up_flow = Upsample(pred[-1], 4)
up_occ_mask = Upsample(flows[0], 4)
if pad_h != 0 or pad_w != 0:
up_flow = F.interpolate(up_flow, size=[shape[2], shape[3]], mode='bilinear') * \
torch.tensor([shape[d] / up_flow.shape[d]
for d in (2, 3)], device=device).view(1, 2, 1, 1)
up_occ_mask = F.interpolate(
up_occ_mask, size=[shape[2], shape[3]], mode='bilinear')
print("left : ",im0_path[0], "right : ",im1_path[0])
if config.value['dataset'] == 'klens':
for i in range(up_flow.shape[0]):
disparity_writeout(
up_flow[i].permute(1, 2, 0).cpu().numpy(),
im0_path[i],
im1_path[i],
up_occ_mask[i].permute(1, 2, 0).cpu().numpy(),
)
#epe.append(EpeLossWithMask()(up_flow, label, mask).detach())
# Flip the flow to get the final prediction
#final_flow = up_flow.flip(1)
print("\n\nCheck out_images for output!\n\n")
|
{"hexsha": "a7d7526e883c968d05c539bf72d1a15ba0c94dda", "size": 5720, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict.py", "max_stars_repo_name": "klens-codes/MaskFlownet-Pytorch", "max_stars_repo_head_hexsha": "94d41fd20f774845a1b2df7f77ec95c44217af94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict.py", "max_issues_repo_name": "klens-codes/MaskFlownet-Pytorch", "max_issues_repo_head_hexsha": "94d41fd20f774845a1b2df7f77ec95c44217af94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict.py", "max_forks_repo_name": "klens-codes/MaskFlownet-Pytorch", "max_forks_repo_head_hexsha": "94d41fd20f774845a1b2df7f77ec95c44217af94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8735632184, "max_line_length": 88, "alphanum_fraction": 0.5652097902, "include": true, "reason": "import numpy", "num_tokens": 1455}
|
"""Script to make the obiwan scaling plots in my thesis"""
if __name__ == "__main__":
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import subprocess
import os
import pickle
import json
import re
import pandas as pd
STAGES=['tims', 'mask_junk', 'srcs',
'fitblobs', 'coadds', 'writecat']
def add_scatter(ax,x,y,c='b',m='o',lab='hello',s=80,drawln=False):
ax.scatter(x,y, s=s, lw=2.,facecolors='none',edgecolors=c, marker=m,label=lab)
if drawln: ax.plot(x,y, c=c,ls='-')
class Plots(object):
def __init__(self,tm):
self.tm= tm
def tractor_profile_plots(tm,name='tmp.png'):
fig,ax=plt.subplots()
xvals= np.arange(tm['stage'].size)+1
print(tm['parallel'])
add_scatter(ax,xvals, tm['serial']/60., c='b',m='o',lab='serial',drawln=True)
add_scatter(ax,xvals, tm['parallel']/60., c='g',m='o',lab='parallel',drawln=True)
plt.legend(loc='lower right',scatterpoints=1)
#add_scatter(ax,xvals, tm['total']/60., c='b',m='o',lab='total')
ax.set_xticks(xvals)
ax.set_xticklabels(tm['stage'],rotation=45, ha='right')
ax.set_yscale('log')
ax.set_ylim([1e-3,1e2])
xlab=ax.set_ylabel('Wall Time (min)')
ylab=ax.set_xlabel('Tractor Stage')
plt.savefig(name, bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_wall_node(d):
name='wall_v_nodes.png'
fig,ax=plt.subplots()
xvals= np.arange(d['nodes'].size)+1
add_scatter(ax,xvals, d['tims_mean']/60., c='b',m='o',lab='tims',drawln=True)
add_scatter(ax,xvals, d['fit_mean']/60., c='g',m='o',lab='fit',drawln=True)
add_scatter(ax,xvals, d['tot_mean']/60., c='k',m='o',lab='total',drawln=True)
plt.legend(loc='lower right',scatterpoints=1)
#add_scatter(ax,xvals, tm['total']/60., c='b',m='o',lab='total')
ax.set_xticks(xvals)
names= np.zeros(d['nodes'].size).astype(str)
for i in range(names.size):
names[i]= '%d/%d' % (d['cores'][i],d['nodes'][i])
ax.set_xticklabels(names,rotation=45, ha='right')
#ax.set_yscale('log')
#ax.set_ylim([1e-3,1e3])
ylab=ax.set_ylabel('Wall Time (min)')
xlab=ax.set_xlabel('Cores/Nodes')
plt.savefig(name, bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def params_of_run(bigstring):
def get_param(expr,bigstring):
a=re.search(expr,bigstring)
return (bigstring[slice(a.regs[0][0],a.regs[0][1])]
.split('=')[1]
.replace(',',''))
d={}
d['rsdir']= get_param(r'rowstart=[0-9]+,',bigstring)
d['nobj']= get_param(r'nobj=[0-9]+,',bigstring)
d['brick']= get_param(r"brick='[0-9]{4}[mp][0-9]{3}',",bigstring).replace("'",'')
d['cores']= get_param(r'threads=[0-9]+,',bigstring)
return d
def number_injected(bigstring,nobj=None):
d={}
a= re.search(r'INFO:decals_sim:sources.*?flagged as nearby [0-9]+?',bigstring)
n_skip= (bigstring[slice(a.regs[0][0],a.regs[0][1])]
.split(' ')[-1])
d['frac_injected']= (nobj-int(n_skip))/float(nobj)
return d
def time_total(bigstring):
"""Returns dict of seconds spent in total"""
# rsdir
ts={}
for text in ['started','finshed']:
a=re.search(r'obiwan %s at.*?\n' % text,bigstring)
ymd,t = tuple(bigstring[slice(a.regs[0][0],a.regs[0][1])]
.strip()
.split(' ')[-2:])
ts[text]= pd.Timestamp('%s %s' % (ymd,t))
return dict(total_sec=(ts['finshed'] - ts['started']).total_seconds())
def time_per_stage(bigstring):
"""Returns dict of seconds spend in each stage"""
# rsdir
a=re.search(r'rowstart=[0-9]+,',bigstring)
rsdir= (bigstring[slice(a.regs[0][0],a.regs[0][1])]
.split('=')[1]
.replace(',',''))
rsdir= (bigstring[slice(a.regs[0][0],a.regs[0][1])]
.split('=')[1]
.replace(',',''))
t={}
for stage in STAGES:
a=re.search(r'Resources for stage %s(.*\n)*?Grand total Wall:.*\n' % stage,
bigstring)
print('stage=%s, a=' % stage,a)
lines= bigstring[slice(a.regs[0][0],a.regs[0][1])].split('\n')
print('lines=',lines)
lines= pd.Series(lines)
line= lines[lines.str.contains('Grand total Wall')].str.split(r'\s+')
assert(line.size == 1)
assert(line.str[-1].values[0] == 'sec')
t[stage]=line.str[-2].values[0]
return t
def write_header(savenm):
with open(savenm,'w') as foo:
text= 'nobj brick rsdir frac_injected cores'
for stage in STAGES:
text += ' %s' % stage
text += ' total_sec'
foo.write(text+'\n')
print('Wrote header %s' % savenm)
def write_measurements(d,savenm='test.txt'):
with open(savenm,'a') as foo:
text= '%s %s %s %.3f %s' % (d['nobj'],d['brick'],d['rsdir'],d['frac_injected'],d['cores'])
for stage in STAGES:
text += ' %s' % d[stage]
text += ' %s' % d['total_sec']
foo.write(text+'\n')
print('Appended measurements %s' % savenm)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="test")
parser.add_argument("--logfiles",action="store",required=True,
help="list of logfiles for the scaling run, e.g. nobj=500,1000,1500 for many bricks and rsdirs")
parser.add_argument("--savenm",action="store",help='text file name to write measurements to',required=True)
args = parser.parse_args()
# Extract
if not os.path.exists(args.savenm):
write_header(args.savenm)
fns= np.loadtxt(args.logfiles,dtype=str)
for logfile in fns:
with open(logfile,'r') as foo:
bigstring= foo.read()
d= {**params_of_run(bigstring),
**time_per_stage(bigstring),
**time_total(bigstring),
}
d= {**d,
**number_injected(bigstring,nobj=int(d['nobj']))
}
write_measurements(d, args.savenm)
# Plots
df= pd.read_csv(args.savenm,sep=' ')
|
{"hexsha": "21d9fa6e385542d6fdff98032817ac83eed2dc64", "size": 6261, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/obiwan/scaling/timing.py", "max_stars_repo_name": "legacysurvey/obiwan", "max_stars_repo_head_hexsha": "1810927aaad3cf458ada3962576355727c464ca6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-09-11T23:08:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-23T05:19:57.000Z", "max_issues_repo_path": "py/obiwan/scaling/timing.py", "max_issues_repo_name": "legacysurvey/obiwan", "max_issues_repo_head_hexsha": "1810927aaad3cf458ada3962576355727c464ca6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2017-08-15T00:47:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-12T21:53:35.000Z", "max_forks_repo_path": "py/obiwan/scaling/timing.py", "max_forks_repo_name": "legacysurvey/obiwan", "max_forks_repo_head_hexsha": "1810927aaad3cf458ada3962576355727c464ca6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-16T15:41:44.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-08T19:55:51.000Z", "avg_line_length": 37.2678571429, "max_line_length": 120, "alphanum_fraction": 0.5737102699, "include": true, "reason": "import numpy", "num_tokens": 1844}
|
onefunc <- function()
{
m <- try(func(infile = bf, csh = csh , pmod = ""))
}
|
{"hexsha": "a4e3c95e22f7d3e1cf2f0955853aceafff3b6576", "size": 81, "ext": "r", "lang": "R", "max_stars_repo_path": "_unittests/ut_languages/data/r4.r", "max_stars_repo_name": "mohamedelkansouli/Ensae_py2", "max_stars_repo_head_hexsha": "e54a05f90c6aa6e2a5065eac9f9ec10aca64b46a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2015-07-19T21:20:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T11:50:53.000Z", "max_issues_repo_path": "_unittests/ut_languages/data/r4.r", "max_issues_repo_name": "mohamedelkansouli/Ensae_py2", "max_issues_repo_head_hexsha": "e54a05f90c6aa6e2a5065eac9f9ec10aca64b46a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2015-06-16T15:38:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-29T11:04:01.000Z", "max_forks_repo_path": "_unittests/ut_languages/data/r4.r", "max_forks_repo_name": "mohamedelkansouli/Ensae_py2", "max_forks_repo_head_hexsha": "e54a05f90c6aa6e2a5065eac9f9ec10aca64b46a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2015-01-13T08:24:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T14:51:23.000Z", "avg_line_length": 16.2, "max_line_length": 54, "alphanum_fraction": 0.5061728395, "num_tokens": 26}
|
(************************************************************************)
(* v * The Coq Proof Assistant / The Coq Development Team *)
(* <O___,, * INRIA - CNRS - LIX - LRI - PPS - Copyright 1999-2011 *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(************************************************************************)
(*i $Id: Ranalysis2.v 14641 2011-11-06 11:59:10Z herbelin $ i*)
Require Import Rbase.
Require Import Rfunctions.
Require Import Ranalysis1.
Open Local Scope R_scope.
(**********)
Lemma formule :
forall (x h l1 l2:R) (f1 f2:R -> R),
h <> 0 ->
f2 x <> 0 ->
f2 (x + h) <> 0 ->
(f1 (x + h) / f2 (x + h) - f1 x / f2 x) / h -
(l1 * f2 x - l2 * f1 x) / Rsqr (f2 x) =
/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1) +
l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h)) -
f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2) +
l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x).
Proof.
intros; unfold Rdiv, Rminus, Rsqr in |- *.
repeat rewrite Rmult_plus_distr_r; repeat rewrite Rmult_plus_distr_l;
repeat rewrite Rinv_mult_distr; try assumption.
replace (l1 * f2 x * (/ f2 x * / f2 x)) with (l1 * / f2 x * (f2 x * / f2 x));
[ idtac | ring ].
replace (l1 * (/ f2 x * / f2 (x + h)) * f2 x) with
(l1 * / f2 (x + h) * (f2 x * / f2 x)); [ idtac | ring ].
replace (l1 * (/ f2 x * / f2 (x + h)) * - f2 (x + h)) with
(- (l1 * / f2 x * (f2 (x + h) * / f2 (x + h)))); [ idtac | ring ].
replace (f1 x * (/ f2 x * / f2 (x + h)) * (f2 (x + h) * / h)) with
(f1 x * / f2 x * / h * (f2 (x + h) * / f2 (x + h)));
[ idtac | ring ].
replace (f1 x * (/ f2 x * / f2 (x + h)) * (- f2 x * / h)) with
(- (f1 x * / f2 (x + h) * / h * (f2 x * / f2 x)));
[ idtac | ring ].
replace (l2 * f1 x * (/ f2 x * / f2 x * / f2 (x + h)) * f2 (x + h)) with
(l2 * f1 x * / f2 x * / f2 x * (f2 (x + h) * / f2 (x + h)));
[ idtac | ring ].
replace (l2 * f1 x * (/ f2 x * / f2 x * / f2 (x + h)) * - f2 x) with
(- (l2 * f1 x * / f2 x * / f2 (x + h) * (f2 x * / f2 x)));
[ idtac | ring ].
repeat rewrite <- Rinv_r_sym; try assumption || ring.
apply prod_neq_R0; assumption.
Qed.
(* begin hide *)
Notation Rmin_pos := Rmin_pos (only parsing). (* compat *)
(* end hide *)
Lemma maj_term1 :
forall (x h eps l1 alp_f2:R) (eps_f2 alp_f1d:posreal)
(f1 f2:R -> R),
0 < eps ->
f2 x <> 0 ->
f2 (x + h) <> 0 ->
(forall h:R,
h <> 0 ->
Rabs h < alp_f1d ->
Rabs ((f1 (x + h) - f1 x) / h - l1) < Rabs (eps * f2 x / 8)) ->
(forall a:R,
Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) ->
h <> 0 ->
Rabs h < alp_f1d ->
Rabs h < Rmin eps_f2 alp_f2 ->
Rabs (/ f2 (x + h) * ((f1 (x + h) - f1 x) / h - l1)) < eps / 4.
Proof.
intros.
assert (H7 := H3 h H6).
assert (H8 := H2 h H4 H5).
apply Rle_lt_trans with
(2 / Rabs (f2 x) * Rabs ((f1 (x + h) - f1 x) / h - l1)).
rewrite Rabs_mult.
apply Rmult_le_compat_r.
apply Rabs_pos.
rewrite Rabs_Rinv; [ left; exact H7 | assumption ].
apply Rlt_le_trans with (2 / Rabs (f2 x) * Rabs (eps * f2 x / 8)).
apply Rmult_lt_compat_l.
unfold Rdiv in |- *; apply Rmult_lt_0_compat;
[ prove_sup0 | apply Rinv_0_lt_compat; apply Rabs_pos_lt; assumption ].
exact H8.
right; unfold Rdiv in |- *.
repeat rewrite Rabs_mult.
rewrite Rabs_Rinv; discrR.
replace (Rabs 8) with 8.
replace 8 with 8; [ idtac | ring ].
rewrite Rinv_mult_distr; [ idtac | discrR | discrR ].
replace (2 * / Rabs (f2 x) * (Rabs eps * Rabs (f2 x) * (/ 2 * / 4))) with
(Rabs eps * / 4 * (2 * / 2) * (Rabs (f2 x) * / Rabs (f2 x)));
[ idtac | ring ].
replace (Rabs eps) with eps.
repeat rewrite <- Rinv_r_sym; try discrR || (apply Rabs_no_R0; assumption).
ring.
symmetry in |- *; apply Rabs_right; left; assumption.
symmetry in |- *; apply Rabs_right; left; prove_sup.
Qed.
Lemma maj_term2 :
forall (x h eps l1 alp_f2 alp_f2t2:R) (eps_f2:posreal)
(f2:R -> R),
0 < eps ->
f2 x <> 0 ->
f2 (x + h) <> 0 ->
(forall a:R,
Rabs a < alp_f2t2 ->
Rabs (f2 (x + a) - f2 x) < Rabs (eps * Rsqr (f2 x) / (8 * l1))) ->
(forall a:R,
Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) ->
h <> 0 ->
Rabs h < alp_f2t2 ->
Rabs h < Rmin eps_f2 alp_f2 ->
l1 <> 0 -> Rabs (l1 / (f2 x * f2 (x + h)) * (f2 x - f2 (x + h))) < eps / 4.
Proof.
intros.
assert (H8 := H3 h H6).
assert (H9 := H2 h H5).
apply Rle_lt_trans with
(Rabs (l1 / (f2 x * f2 (x + h))) * Rabs (eps * Rsqr (f2 x) / (8 * l1))).
rewrite Rabs_mult; apply Rmult_le_compat_l.
apply Rabs_pos.
rewrite <- (Rabs_Ropp (f2 x - f2 (x + h))); rewrite Ropp_minus_distr.
left; apply H9.
apply Rlt_le_trans with
(Rabs (2 * (l1 / (f2 x * f2 x))) * Rabs (eps * Rsqr (f2 x) / (8 * l1))).
apply Rmult_lt_compat_r.
apply Rabs_pos_lt.
unfold Rdiv in |- *; unfold Rsqr in |- *; repeat apply prod_neq_R0;
try assumption || discrR.
red in |- *; intro H10; rewrite H10 in H; elim (Rlt_irrefl _ H).
apply Rinv_neq_0_compat; apply prod_neq_R0; try assumption || discrR.
unfold Rdiv in |- *.
repeat rewrite Rinv_mult_distr; try assumption.
repeat rewrite Rabs_mult.
replace (Rabs 2) with 2.
rewrite (Rmult_comm 2).
replace (Rabs l1 * (Rabs (/ f2 x) * Rabs (/ f2 x)) * 2) with
(Rabs l1 * (Rabs (/ f2 x) * (Rabs (/ f2 x) * 2)));
[ idtac | ring ].
repeat apply Rmult_lt_compat_l.
apply Rabs_pos_lt; assumption.
apply Rabs_pos_lt; apply Rinv_neq_0_compat; assumption.
repeat rewrite Rabs_Rinv; try assumption.
rewrite <- (Rmult_comm 2).
unfold Rdiv in H8; exact H8.
symmetry in |- *; apply Rabs_right; left; prove_sup0.
right.
unfold Rsqr, Rdiv in |- *.
do 1 rewrite Rinv_mult_distr; try assumption || discrR.
do 1 rewrite Rinv_mult_distr; try assumption || discrR.
repeat rewrite Rabs_mult.
repeat rewrite Rabs_Rinv; try assumption || discrR.
replace (Rabs eps) with eps.
replace (Rabs 8) with 8.
replace (Rabs 2) with 2.
replace 8 with (4 * 2); [ idtac | ring ].
rewrite Rinv_mult_distr; discrR.
replace
(2 * (Rabs l1 * (/ Rabs (f2 x) * / Rabs (f2 x))) *
(eps * (Rabs (f2 x) * Rabs (f2 x)) * (/ 4 * / 2 * / Rabs l1))) with
(eps * / 4 * (Rabs l1 * / Rabs l1) * (Rabs (f2 x) * / Rabs (f2 x)) *
(Rabs (f2 x) * / Rabs (f2 x)) * (2 * / 2)); [ idtac | ring ].
repeat rewrite <- Rinv_r_sym; try (apply Rabs_no_R0; assumption) || discrR.
ring.
symmetry in |- *; apply Rabs_right; left; prove_sup0.
symmetry in |- *; apply Rabs_right; left; prove_sup.
symmetry in |- *; apply Rabs_right; left; assumption.
Qed.
Lemma maj_term3 :
forall (x h eps l2 alp_f2:R) (eps_f2 alp_f2d:posreal)
(f1 f2:R -> R),
0 < eps ->
f2 x <> 0 ->
f2 (x + h) <> 0 ->
(forall h:R,
h <> 0 ->
Rabs h < alp_f2d ->
Rabs ((f2 (x + h) - f2 x) / h - l2) <
Rabs (Rsqr (f2 x) * eps / (8 * f1 x))) ->
(forall a:R,
Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) ->
h <> 0 ->
Rabs h < alp_f2d ->
Rabs h < Rmin eps_f2 alp_f2 ->
f1 x <> 0 ->
Rabs (f1 x / (f2 x * f2 (x + h)) * ((f2 (x + h) - f2 x) / h - l2)) <
eps / 4.
Proof.
intros.
assert (H8 := H2 h H4 H5).
assert (H9 := H3 h H6).
apply Rle_lt_trans with
(Rabs (f1 x / (f2 x * f2 (x + h))) * Rabs (Rsqr (f2 x) * eps / (8 * f1 x))).
rewrite Rabs_mult.
apply Rmult_le_compat_l.
apply Rabs_pos.
left; apply H8.
apply Rlt_le_trans with
(Rabs (2 * (f1 x / (f2 x * f2 x))) * Rabs (Rsqr (f2 x) * eps / (8 * f1 x))).
apply Rmult_lt_compat_r.
apply Rabs_pos_lt.
unfold Rdiv in |- *; unfold Rsqr in |- *; repeat apply prod_neq_R0;
try assumption.
red in |- *; intro H10; rewrite H10 in H; elim (Rlt_irrefl _ H).
apply Rinv_neq_0_compat; apply prod_neq_R0; discrR || assumption.
unfold Rdiv in |- *.
repeat rewrite Rinv_mult_distr; try assumption.
repeat rewrite Rabs_mult.
replace (Rabs 2) with 2.
rewrite (Rmult_comm 2).
replace (Rabs (f1 x) * (Rabs (/ f2 x) * Rabs (/ f2 x)) * 2) with
(Rabs (f1 x) * (Rabs (/ f2 x) * (Rabs (/ f2 x) * 2)));
[ idtac | ring ].
repeat apply Rmult_lt_compat_l.
apply Rabs_pos_lt; assumption.
apply Rabs_pos_lt; apply Rinv_neq_0_compat; assumption.
repeat rewrite Rabs_Rinv; assumption || idtac.
rewrite <- (Rmult_comm 2).
unfold Rdiv in H9; exact H9.
symmetry in |- *; apply Rabs_right; left; prove_sup0.
right.
unfold Rsqr, Rdiv in |- *.
rewrite Rinv_mult_distr; try assumption || discrR.
rewrite Rinv_mult_distr; try assumption || discrR.
repeat rewrite Rabs_mult.
repeat rewrite Rabs_Rinv; try assumption || discrR.
replace (Rabs eps) with eps.
replace (Rabs 8) with 8.
replace (Rabs 2) with 2.
replace 8 with (4 * 2); [ idtac | ring ].
rewrite Rinv_mult_distr; discrR.
replace
(2 * (Rabs (f1 x) * (/ Rabs (f2 x) * / Rabs (f2 x))) *
(Rabs (f2 x) * Rabs (f2 x) * eps * (/ 4 * / 2 * / Rabs (f1 x)))) with
(eps * / 4 * (Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f2 x) * / Rabs (f2 x)) *
(Rabs (f1 x) * / Rabs (f1 x)) * (2 * / 2)); [ idtac | ring ].
repeat rewrite <- Rinv_r_sym; try discrR || (apply Rabs_no_R0; assumption).
ring.
symmetry in |- *; apply Rabs_right; left; prove_sup0.
symmetry in |- *; apply Rabs_right; left; prove_sup.
symmetry in |- *; apply Rabs_right; left; assumption.
Qed.
Lemma maj_term4 :
forall (x h eps l2 alp_f2 alp_f2c:R) (eps_f2:posreal)
(f1 f2:R -> R),
0 < eps ->
f2 x <> 0 ->
f2 (x + h) <> 0 ->
(forall a:R,
Rabs a < alp_f2c ->
Rabs (f2 (x + a) - f2 x) <
Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))) ->
(forall a:R,
Rabs a < Rmin eps_f2 alp_f2 -> / Rabs (f2 (x + a)) < 2 / Rabs (f2 x)) ->
h <> 0 ->
Rabs h < alp_f2c ->
Rabs h < Rmin eps_f2 alp_f2 ->
f1 x <> 0 ->
l2 <> 0 ->
Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h)) * (f2 (x + h) - f2 x)) <
eps / 4.
Proof.
intros.
assert (H9 := H2 h H5).
assert (H10 := H3 h H6).
apply Rle_lt_trans with
(Rabs (l2 * f1 x / (Rsqr (f2 x) * f2 (x + h))) *
Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))).
rewrite Rabs_mult.
apply Rmult_le_compat_l.
apply Rabs_pos.
left; apply H9.
apply Rlt_le_trans with
(Rabs (2 * l2 * (f1 x / (Rsqr (f2 x) * f2 x))) *
Rabs (Rsqr (f2 x) * f2 x * eps / (8 * f1 x * l2))).
apply Rmult_lt_compat_r.
apply Rabs_pos_lt.
unfold Rdiv in |- *; unfold Rsqr in |- *; repeat apply prod_neq_R0;
assumption || idtac.
red in |- *; intro H11; rewrite H11 in H; elim (Rlt_irrefl _ H).
apply Rinv_neq_0_compat; apply prod_neq_R0.
apply prod_neq_R0.
discrR.
assumption.
assumption.
unfold Rdiv in |- *.
repeat rewrite Rinv_mult_distr;
try assumption || (unfold Rsqr in |- *; apply prod_neq_R0; assumption).
repeat rewrite Rabs_mult.
replace (Rabs 2) with 2.
replace
(2 * Rabs l2 * (Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * Rabs (/ f2 x)))) with
(Rabs l2 * (Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * (Rabs (/ f2 x) * 2))));
[ idtac | ring ].
replace
(Rabs l2 * Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * Rabs (/ f2 (x + h)))) with
(Rabs l2 * (Rabs (f1 x) * (Rabs (/ Rsqr (f2 x)) * Rabs (/ f2 (x + h)))));
[ idtac | ring ].
repeat apply Rmult_lt_compat_l.
apply Rabs_pos_lt; assumption.
apply Rabs_pos_lt; assumption.
apply Rabs_pos_lt; apply Rinv_neq_0_compat; unfold Rsqr in |- *;
apply prod_neq_R0; assumption.
repeat rewrite Rabs_Rinv; [ idtac | assumption | assumption ].
rewrite <- (Rmult_comm 2).
unfold Rdiv in H10; exact H10.
symmetry in |- *; apply Rabs_right; left; prove_sup0.
right; unfold Rsqr, Rdiv in |- *.
rewrite Rinv_mult_distr; try assumption || discrR.
rewrite Rinv_mult_distr; try assumption || discrR.
rewrite Rinv_mult_distr; try assumption || discrR.
rewrite Rinv_mult_distr; try assumption || discrR.
repeat rewrite Rabs_mult.
repeat rewrite Rabs_Rinv; try assumption || discrR.
replace (Rabs eps) with eps.
replace (Rabs 8) with 8.
replace (Rabs 2) with 2.
replace 8 with (4 * 2); [ idtac | ring ].
rewrite Rinv_mult_distr; discrR.
replace
(2 * Rabs l2 *
(Rabs (f1 x) * (/ Rabs (f2 x) * / Rabs (f2 x) * / Rabs (f2 x))) *
(Rabs (f2 x) * Rabs (f2 x) * Rabs (f2 x) * eps *
(/ 4 * / 2 * / Rabs (f1 x) * / Rabs l2))) with
(eps * / 4 * (Rabs l2 * / Rabs l2) * (Rabs (f1 x) * / Rabs (f1 x)) *
(Rabs (f2 x) * / Rabs (f2 x)) * (Rabs (f2 x) * / Rabs (f2 x)) *
(Rabs (f2 x) * / Rabs (f2 x)) * (2 * / 2)); [ idtac | ring ].
repeat rewrite <- Rinv_r_sym; try discrR || (apply Rabs_no_R0; assumption).
ring.
symmetry in |- *; apply Rabs_right; left; prove_sup0.
symmetry in |- *; apply Rabs_right; left; prove_sup.
symmetry in |- *; apply Rabs_right; left; assumption.
apply prod_neq_R0; assumption || discrR.
apply prod_neq_R0; assumption.
Qed.
Lemma D_x_no_cond : forall x a:R, a <> 0 -> D_x no_cond x (x + a).
Proof.
intros.
unfold D_x, no_cond in |- *.
split.
trivial.
apply Rminus_not_eq.
unfold Rminus in |- *.
rewrite Ropp_plus_distr.
rewrite <- Rplus_assoc.
rewrite Rplus_opp_r.
rewrite Rplus_0_l.
apply Ropp_neq_0_compat; assumption.
Qed.
Lemma Rabs_4 :
forall a b c d:R, Rabs (a + b + c + d) <= Rabs a + Rabs b + Rabs c + Rabs d.
Proof.
intros.
apply Rle_trans with (Rabs (a + b) + Rabs (c + d)).
replace (a + b + c + d) with (a + b + (c + d)); [ apply Rabs_triang | ring ].
apply Rle_trans with (Rabs a + Rabs b + Rabs (c + d)).
apply Rplus_le_compat_r.
apply Rabs_triang.
repeat rewrite Rplus_assoc; repeat apply Rplus_le_compat_l.
apply Rabs_triang.
Qed.
Lemma Rlt_4 :
forall a b c d e f g h:R,
a < b -> c < d -> e < f -> g < h -> a + c + e + g < b + d + f + h.
Proof.
intros; apply Rlt_trans with (b + c + e + g).
repeat apply Rplus_lt_compat_r; assumption.
repeat rewrite Rplus_assoc; apply Rplus_lt_compat_l.
apply Rlt_trans with (d + e + g).
rewrite Rplus_assoc; apply Rplus_lt_compat_r; assumption.
rewrite Rplus_assoc; apply Rplus_lt_compat_l; apply Rlt_trans with (f + g).
apply Rplus_lt_compat_r; assumption.
apply Rplus_lt_compat_l; assumption.
Qed.
(* begin hide *)
Notation Rmin_2 := Rmin_glb_lt (only parsing).
(* end hide *)
Lemma quadruple : forall x:R, 4 * x = x + x + x + x.
Proof.
intro; ring.
Qed.
Lemma quadruple_var : forall x:R, x = x / 4 + x / 4 + x / 4 + x / 4.
Proof.
intro; rewrite <- quadruple.
unfold Rdiv in |- *; rewrite <- Rmult_assoc; rewrite Rinv_r_simpl_m; discrR.
reflexivity.
Qed.
(**********)
Lemma continuous_neq_0 :
forall (f:R -> R) (x0:R),
continuity_pt f x0 ->
f x0 <> 0 ->
exists eps : posreal, (forall h:R, Rabs h < eps -> f (x0 + h) <> 0).
Proof.
intros; unfold continuity_pt in H; unfold continue_in in H;
unfold limit1_in in H; unfold limit_in in H; elim (H (Rabs (f x0 / 2))).
intros; elim H1; intros.
exists (mkposreal x H2).
intros; assert (H5 := H3 (x0 + h)).
cut
(dist R_met (x0 + h) x0 < x ->
dist R_met (f (x0 + h)) (f x0) < Rabs (f x0 / 2)).
unfold dist in |- *; simpl in |- *; unfold R_dist in |- *;
replace (x0 + h - x0) with h.
intros; assert (H7 := H6 H4).
red in |- *; intro.
rewrite H8 in H7; unfold Rminus in H7; rewrite Rplus_0_l in H7;
rewrite Rabs_Ropp in H7; unfold Rdiv in H7; rewrite Rabs_mult in H7;
pattern (Rabs (f x0)) at 1 in H7; rewrite <- Rmult_1_r in H7.
cut (0 < Rabs (f x0)).
intro; assert (H10 := Rmult_lt_reg_l _ _ _ H9 H7).
cut (Rabs (/ 2) = / 2).
assert (Hyp : 0 < 2).
prove_sup0.
intro; rewrite H11 in H10; assert (H12 := Rmult_lt_compat_l 2 _ _ Hyp H10);
rewrite Rmult_1_r in H12; rewrite <- Rinv_r_sym in H12;
[ idtac | discrR ].
cut (IZR 1 < IZR 2).
unfold IZR in |- *; unfold INR, nat_of_P in |- *; simpl in |- *; intro;
elim (Rlt_irrefl 1 (Rlt_trans _ _ _ H13 H12)).
apply IZR_lt; omega.
unfold Rabs in |- *; case (Rcase_abs (/ 2)); intro.
assert (Hyp : 0 < 2).
prove_sup0.
assert (H11 := Rmult_lt_compat_l 2 _ _ Hyp r); rewrite Rmult_0_r in H11;
rewrite <- Rinv_r_sym in H11; [ idtac | discrR ].
elim (Rlt_irrefl 0 (Rlt_trans _ _ _ Rlt_0_1 H11)).
reflexivity.
apply (Rabs_pos_lt _ H0).
ring.
assert (H6 := Req_dec x0 (x0 + h)); elim H6; intro.
intro; rewrite <- H7; unfold dist, R_met in |- *; unfold R_dist in |- *;
unfold Rminus in |- *; rewrite Rplus_opp_r; rewrite Rabs_R0;
apply Rabs_pos_lt.
unfold Rdiv in |- *; apply prod_neq_R0;
[ assumption | apply Rinv_neq_0_compat; discrR ].
intro; apply H5.
split.
unfold D_x, no_cond in |- *.
split; trivial || assumption.
assumption.
change (0 < Rabs (f x0 / 2)) in |- *.
apply Rabs_pos_lt; unfold Rdiv in |- *; apply prod_neq_R0.
assumption.
apply Rinv_neq_0_compat; discrR.
Qed.
|
{"author": "JasonGross", "repo": "category-coq-experience-tests", "sha": "f9949ede618788fd051fe8327f997ee683388e49", "save_path": "github-repos/coq/JasonGross-category-coq-experience-tests", "path": "github-repos/coq/JasonGross-category-coq-experience-tests/category-coq-experience-tests-f9949ede618788fd051fe8327f997ee683388e49/coq/coq-8.3pl5-foundations/theories/Reals/Ranalysis2.v"}
|
(* Copyright (c) 2014, Robert Dockins *)
Require Import String.
Require Import atoms.
Require Import permutations.
Require Import basics.
Require Import preord.
Require Import categories.
Require Import sets.
Require Import finsets.
Require Import esets.
Require Import effective.
Require Import directed.
Require Import plotkin.
Require Import joinable.
Require Import approx_rels.
Require Import cpo.
Require Import profinite.
Require Import finprod.
Require Import discrete.
Require Import List.
(** * The simply-typed λ-calculus with booleans.
This file develops the simply-typed λ-calculus
with named variables. Types are interpreted
as unpointed domains in PLT.
Soundness and adequacy of the denotational semantics
are proved with respect to a standard big-step operational
semantics. This uses the standard logical relation
approach. As a corollary, we obtain strong normalization
for the calculus.
*)
(** ** Types and type denotations
We have a boolean base type and functions.
*)
Inductive ty :=
| ty_bool
| ty_arrow : ty -> ty -> ty.
Delimit Scope ty_scope with ty.
Notation "2" := ty_bool : ty_scope.
Notation "x ⇒ y" := (ty_arrow (x)%ty (y)%ty) : ty_scope.
Bind Scope ty_scope with ty.
Delimit Scope lam_scope with lam.
Open Scope lam_scope.
(** Types are interpreted via a straightforward
translation into PLT domains.
*)
Fixpoint tydom (τ:ty) : PLT :=
match τ with
| 2%ty => disc finbool
| (τ₁ ⇒ τ₂)%ty => tydom τ₁ ⇒ tydom τ₂
end.
(** The syntax of types has decidable equality. This is
important because it allows us to work around some
problems that arise with dependent types.
*)
Lemma ty_dec : forall x y:ty, {x=y}+{x<>y}.
Proof.
decide equality.
Qed.
(** ** Type contexts
Now we instantiate a module for finite products.
This gives us a domain in PLT for representing
type contexts, and provides operations and lemmas
we need for working with them.
*)
Module env_input <: FINPROD_INPUT.
Definition A := ty.
Definition Adec := ty_dec.
Definition F := tydom.
End env_input.
Module ENV := finprod.finprod(env_input).
Notation env := ENV.env.
Canonical Structure ENV.env_supported.
Notation inenv := ENV.inenv.
Notation cxt := ENV.finprod.
Notation castty := (cast ENV.ty).
Notation proj := ENV.proj.
Notation bind := ENV.bind.
(** ** Terms and term denotations
Terms are intrinsicly-typed, carrying both
a type environment for their free variables
and the final type of the term.
Variables carry a name (atom) and a proof
that (x,σ) appears in the type environment.
Lambdas extend the type environment in the standard way.
*)
Inductive term (Γ:env) : ty -> Type :=
| tvar : forall (x:atom) (σ:ty),
inenv Γ x σ ->
term Γ σ
| tbool : forall n:bool,
term Γ 2
| tapp : forall σ₁ σ₂,
term Γ (σ₁ ⇒ σ₂) ->
term Γ σ₁ ->
term Γ σ₂
| tif : forall σ,
term Γ 2 ->
term Γ σ ->
term Γ σ ->
term Γ σ
| tlam : forall x σ₁ σ₂,
term ((x,σ₁)::Γ) σ₂ ->
term Γ (σ₁ ⇒ σ₂).
Arguments tapp [_ _ _] _ _.
Notation "x • y" := (tapp x y)
(at level 52, left associativity, format "x • y") : lam_scope.
Notation subst := (ENV.subst term).
Notation term_wk := (ENV.tm_wk term).
Notation term_subst := (ENV.tm_subst term).
(** The terms in environment [Γ] with type [τ] are interpreted
as PLT-homs from [cxt Γ] to [tydom τ].
*)
Definition dom (Γ:env) (τ:ty) : Type := cxt Γ → tydom τ.
Fixpoint denote (Γ:env) (τ:ty) (m:term Γ τ) : dom Γ τ :=
match m in term _ τ' return dom Γ τ' with
| tvar _ x σ IN => castty IN ∘ proj Γ x
| tbool _ b => disc_elem b ∘ PLT.terminate false (cxt Γ)
| tif _ σ x y z => disc_cases (fun b:bool => if b then 〚y〛 else 〚z〛)
∘ 〈 id, 〚x〛 〉
| tapp m₁ m₂ => apply ∘ 〈 〚m₁〛, 〚m₂〛 〉
| tlam _ x σ₁ σ₂ m' => Λ(〚m'〛 ∘ bind Γ x σ₁)
end
where "〚 m 〛" := (denote _ _ m) : lam_scope.
(** Here we define a generic traversal function. This traversal
is uniformly used to define both weakening and substitution
by exploiting the finprod library. Defining this traversal
and its correctness proof is sufficent to get out a
definition of substitution and a proof of correctness.
*)
Section traverse.
Variable thingy:env -> atom -> ty -> Type.
Variable thingy_term : forall Γ x σ, thingy Γ x σ -> term Γ σ.
Variable rename_var : env -> atom -> atom.
Variable weaken_vars : forall Γ₁ Γ₂ y τ,
(forall x σ, inenv Γ₁ x σ -> thingy Γ₂ x σ) ->
(forall x σ, inenv ((y,τ)::Γ₁) x σ -> thingy ((rename_var Γ₂ y,τ)::Γ₂) x σ).
Fixpoint traverse
(Γ₁ Γ₂:env) (σ:ty)
(VAR : forall x σ, inenv Γ₁ x σ -> thingy Γ₂ x σ)
(m:term Γ₁ σ) : term Γ₂ σ :=
match m with
| tvar _ x σ IN => thingy_term Γ₂ x σ (VAR x σ IN)
| tbool _ b => tbool Γ₂ b
| @tapp _ σ₁ σ₂ m₁ m₂ =>
@tapp Γ₂ σ₁ σ₂ (traverse Γ₁ Γ₂ (σ₁ ⇒ σ₂) VAR m₁)
(traverse Γ₁ Γ₂ σ₁ VAR m₂)
| tif _ σ x y z =>
tif Γ₂ σ (traverse Γ₁ Γ₂ 2 VAR x)
(traverse Γ₁ Γ₂ σ VAR y)
(traverse Γ₁ Γ₂ σ VAR z)
| tlam _ x σ₁ σ₂ m' =>
let x' := rename_var Γ₂ x in
tlam Γ₂ x' σ₁ σ₂
(traverse ((x,σ₁)::Γ₁) ((x',σ₁)::Γ₂) σ₂
(weaken_vars Γ₁ Γ₂ x σ₁ VAR)
m')
end.
Hypothesis weaken_sem_bind : forall Γ₁ Γ₂ x σ VAR,
bind Γ₁ x σ ∘ PLT.pair_map (ENV.varmap_denote term denote thingy thingy_term Γ₁ Γ₂ VAR) id
≈ ENV.varmap_denote term denote thingy thingy_term ((x,σ)::Γ₁) ((rename_var Γ₂ x,σ)::Γ₂)
(weaken_vars Γ₁ Γ₂ x σ VAR) ∘ bind Γ₂ (rename_var Γ₂ x) σ.
Hypothesis varmap_denote_proj : forall Γ₁ Γ₂ VAR x σ i,
〚 thingy_term Γ₂ x σ (VAR x σ i) 〛
≈ castty i ∘ proj Γ₁ x ∘ ENV.varmap_denote term denote thingy thingy_term Γ₁ Γ₂ VAR.
Lemma traverse_correct
(Γ₁ Γ₂:env) (σ:ty)
(m:term Γ₁ σ) : forall
(VAR : forall x σ, inenv Γ₁ x σ -> thingy Γ₂ x σ),
denote _ _ (traverse Γ₁ Γ₂ σ VAR m) ≈
denote _ _ m ∘ ENV.varmap_denote term denote thingy thingy_term Γ₁ Γ₂ VAR.
Proof.
revert Γ₂. induction m; simpl; intros.
apply varmap_denote_proj.
rewrite <- (cat_assoc PLT). apply cat_respects; auto.
symmetry. apply PLT.terminate_univ.
rewrite <- (cat_assoc PLT). apply cat_respects; auto.
rewrite (PLT.pair_compose_commute false).
apply PLT.pair_eq.
apply IHm1; auto.
apply IHm2; auto.
rewrite <- (cat_assoc PLT).
rewrite (PLT.pair_compose_commute false).
rewrite (cat_ident2 PLT).
symmetry.
rewrite (disc_cases_commute _ _ _ _ _ (ENV.varmap_denote _ _ _ _ _ _ _)).
apply cat_respects; auto.
symmetry.
apply disc_cases_univ.
intros. rewrite disc_cases_elem'.
rewrite (cat_ident1 PLT).
destruct x; auto.
rewrite IHm1. auto.
symmetry.
rewrite (PLT.curry_compose_commute _ _ _ _ _ (〚 m 〛∘ bind Γ x σ₁)).
apply PLT.curry_eq.
rewrite <- (cat_assoc PLT).
rewrite IHm.
rewrite <- (cat_assoc PLT). apply cat_respects; auto.
rewrite weaken_sem_bind.
apply cat_respects; auto.
Qed.
End traverse.
(** Register terms together with the denotation and traversal functions
as a term model. This gives us access to the generic substitution
definition in finprod.
*)
Program Definition lam_termmodel :=
ENV.TermModel term tvar traverse denote traverse_correct _.
Next Obligation.
simpl. auto.
Qed.
Existing Instance lam_termmodel.
(** Restate the substitution correctness lemma. *)
Lemma subst_soundness Γ x σ₁ σ₂ n₁ n₂ :
〚 n₁ 〛 ∘ bind Γ x σ₁ ∘ 〈id, 〚 n₂ 〛〉 ≈ 〚 subst Γ σ₂ σ₁ x n₁ n₂ 〛.
Proof.
generalize (ENV.subst_soundness term). simpl. auto.
Qed.
(** ** Operational semantics and soundness
This is a standard call-by-value operational semantics. As this
calculus is strongly-normalizing, we could just as well use a
call-by-need strategy.
Notation: [m⇓z] means that [m] evaluates to [z].
[m↓] means that [m] evaluates to itself; i.e., [m] is a value.
*)
Reserved Notation "m ⇓ z" (at level 82, left associativity).
Reserved Notation "m ↓" (at level 82, left associativity).
Inductive eval (Γ:env) : forall τ, term Γ τ -> term Γ τ -> Prop :=
| ebool : forall b,
tbool Γ b ↓
| eif : forall σ x y z b q,
x ⇓ (tbool Γ b) ->
(if b then y else z) ⇓ q ->
(tif Γ σ x y z) ⇓ q
| elam : forall x σ₁ σ₂ m,
tlam Γ x σ₁ σ₂ m ↓
| eapp : forall x σ₁ σ₂ m₁ m₂ n₁ n₂ z,
m₁ ⇓ (tlam Γ x σ₁ σ₂ n₁) ->
m₂ ⇓ n₂ ->
subst Γ σ₂ σ₁ x n₁ n₂ ⇓ z ->
m₁ • m₂ ⇓ z
where "m ⇓ z" := (eval _ _ m z)
and "m ↓" := (eval _ _ m m).
(** Evaluation preserves the denotation of terms. *)
Theorem soundness : forall Γ τ (m z:term Γ τ),
m ⇓ z -> 〚m〛 ≈ 〚z〛.
Proof.
intros. induction H; simpl; auto.
rewrite IHeval1.
simpl.
rewrite disc_cases_elem'.
rewrite (cat_ident1 PLT).
destruct b; auto.
rewrite IHeval1.
rewrite IHeval2.
rewrite <- IHeval3.
simpl.
rewrite PLT.curry_apply2.
apply subst_soundness.
Qed.
(** ** Misc technical lemmas
*)
(** Syntactic types have decicable equality, which
implies injectivity for dependent pairs with
(syntactic) types as the type being depended upon.
*)
Lemma inj_pair2_ty : forall (F:ty -> Type) τ x y,
existT F τ x = existT F τ y -> x = y.
Proof.
intros.
apply Eqdep_dec.inj_pair2_eq_dec in H. auto.
decide equality.
Qed.
Lemma env_dec : forall a b:env, {a=b}+{a<>b}.
Proof.
decide equality.
decide equality.
decide equality.
apply string_dec.
Qed.
Ltac inj_ty :=
repeat match goal with
[ H : existT _ _ _ = existT _ _ _ |- _ ] =>
apply inj_pair2_ty in H ||
apply (Eqdep_dec.inj_pair2_eq_dec _ string_dec) in H ||
apply (Eqdep_dec.inj_pair2_eq_dec _ env_dec) in H
end.
Ltac inv H :=
inversion H; subst; inj_ty; repeat subst.
(** We will need a variety of technical results about the operational semantics.
*)
Lemma eval_value Γ τ x y :
eval Γ τ x y -> eval Γ τ y y.
Proof.
intro H. induction H.
apply ebool.
auto.
apply elam.
auto.
Qed.
Lemma eval_eq Γ τ x y1 y2 :
eval Γ τ x y1 -> eval Γ τ x y2 -> y1 = y2.
Proof.
intro H. revert y2.
induction H.
intros. inv H. auto.
intros. inv H1.
assert (tbool Γ b = tbool Γ b0).
apply IHeval1. auto.
inv H2.
apply IHeval2; auto.
intros. inv H. auto.
intros. inv H2.
apply IHeval1 in H8.
apply IHeval2 in H9.
inv H8.
apply IHeval3; auto.
Qed.
Lemma eval_trans Γ τ x y z :
eval Γ τ x y -> eval Γ τ y z -> eval Γ τ x z.
Proof.
intros.
replace z with y; auto.
eapply eval_eq with y; auto.
eapply eval_value; eauto.
Qed.
(** ** Alpha congruence
Here we define alpha congruence of terms.
*)
Inductive var_cong : env -> env -> atom -> atom -> Prop :=
| vcong_here : forall Γ₁ Γ₂ x₁ x₂ y₁ y₂ τ,
x₁ = y₁ -> x₂ = y₂ ->
var_cong ((x₁,τ)::Γ₁) ((x₂,τ)::Γ₂) y₁ y₂
| vcong_there : forall Γ₁ Γ₂ x₁ x₂ y₁ y₂ τ,
x₁ <> y₁ -> x₂ <> y₂ ->
var_cong Γ₁ Γ₂ y₁ y₂ ->
var_cong ((x₁,τ)::Γ₁) ((x₂,τ)::Γ₂) y₁ y₂.
Inductive alpha_cong : forall Γ Γ' (τ:ty), term Γ τ -> term Γ' τ -> Prop :=
| acong_var : forall Γ Γ' τ x₁ x₂ H₁ H₂,
var_cong Γ Γ' x₁ x₂ ->
alpha_cong Γ Γ' τ (tvar Γ x₁ τ H₁) (tvar Γ' x₂ τ H₂)
| acong_bool : forall Γ Γ' b,
alpha_cong Γ Γ' 2 (tbool Γ b) (tbool Γ' b)
| acong_app : forall Γ Γ' σ₁ σ₂ m₁ m₂ n₁ n₂,
alpha_cong Γ Γ' (σ₁ ⇒ σ₂) m₁ n₁ ->
alpha_cong Γ Γ' σ₁ m₂ n₂ ->
alpha_cong Γ Γ' σ₂ (m₁ • m₂) (n₁ • n₂)
| acong_if : forall Γ Γ' σ x1 x2 y1 y2 z1 z2,
alpha_cong Γ Γ' 2 x1 x2 ->
alpha_cong Γ Γ' σ y1 y2 ->
alpha_cong Γ Γ' σ z1 z2 ->
alpha_cong Γ Γ' σ (tif Γ σ x1 y1 z1) (tif Γ' σ x2 y2 z2)
| acong_lam : forall (Γ Γ':env) (x₁ x₂:atom) σ₁ σ₂ m₁ m₂,
alpha_cong ((x₁,σ₁)::Γ) ((x₂,σ₁)::Γ') σ₂ m₁ m₂ ->
alpha_cong Γ Γ' (σ₁ ⇒ σ₂) (tlam Γ x₁ σ₁ σ₂ m₁) (tlam Γ' x₂ σ₁ σ₂ m₂).
(** Alpha congruence is reflexive, transitive and symmetric.
*)
Lemma var_cong_refl Γ x τ:
inenv Γ x τ ->
var_cong Γ Γ x x.
Proof.
induction Γ; intro H.
inv H.
hnf in H. simpl in H.
destruct a.
destruct (string_dec s x). inv H.
apply vcong_here; auto.
apply vcong_there; auto.
Qed.
Lemma var_cong_sym Γ₁ Γ₂ x y :
var_cong Γ₁ Γ₂ x y ->
var_cong Γ₂ Γ₁ y x.
Proof.
intro H. induction H.
apply vcong_here; auto.
apply vcong_there; auto.
Qed.
Lemma var_cong_trans Γ₁ Γ₂ x y z :
var_cong Γ₁ Γ₂ x y ->
forall Γ₃,
var_cong Γ₂ Γ₃ y z ->
var_cong Γ₁ Γ₃ x z.
Proof.
intro H; induction H; intros.
subst. inv H1.
apply vcong_here; auto.
elim H3; auto.
inv H2.
elim H0. auto.
apply vcong_there; auto.
Qed.
Lemma alpha_eq_refl Γ σ (m:term Γ σ) :
alpha_cong Γ Γ σ m m.
Proof.
induction m.
apply acong_var.
eapply var_cong_refl; eauto.
apply acong_bool.
apply acong_app; auto.
apply acong_if; auto.
apply acong_lam; auto.
Qed.
Lemma alpha_eq_sym Γ₁ Γ₂ τ m n :
alpha_cong Γ₁ Γ₂ τ m n ->
alpha_cong Γ₂ Γ₁ τ n m.
Proof.
intro H; induction H.
apply acong_var. apply var_cong_sym. auto.
apply acong_bool.
apply acong_app; auto.
apply acong_if; auto.
apply acong_lam; auto.
Qed.
Lemma alpha_eq_trans Γ₁ τ (m:term Γ₁ τ) :
forall Γ₂ Γ₃ (n:term Γ₂ τ) (o:term Γ₃ τ),
alpha_cong Γ₁ Γ₂ τ m n ->
alpha_cong Γ₂ Γ₃ τ n o ->
alpha_cong Γ₁ Γ₃ τ m o.
Proof.
induction m; intros; inv H; inv H0.
apply acong_var.
eapply var_cong_trans; eauto.
apply acong_bool.
apply acong_app; eauto.
apply acong_if; eauto.
apply acong_lam; eauto.
Qed.
(** Alpha congruent terms have equal denotations.
*)
Lemma alpha_cong_denote (Γ₁ Γ₂:env) τ (m:term Γ₁ τ) (n:term Γ₂ τ) :
alpha_cong Γ₁ Γ₂ τ m n ->
forall A (h₁:A → cxt Γ₁) (h₂:A → cxt Γ₂),
(forall a b τ (IN1:inenv Γ₁ a τ) (IN2:inenv Γ₂ b τ),
var_cong Γ₁ Γ₂ a b ->
castty IN1 ∘ proj Γ₁ a ∘ h₁ ≈ castty IN2 ∘ proj Γ₂ b ∘ h₂) ->
〚m〛 ∘ h₁ ≈ 〚n〛 ∘ h₂.
Proof.
intro H. induction H.
simpl; intros. apply H0. auto.
simpl; intros.
do 2 rewrite <- (cat_assoc PLT).
apply cat_respects; auto.
transitivity (PLT.terminate false A).
apply PLT.terminate_univ.
symmetry.
apply PLT.terminate_univ.
simpl; intros.
do 2 rewrite <- (cat_assoc PLT).
apply cat_respects; auto.
do 2 rewrite (PLT.pair_compose_commute false).
apply PLT.pair_eq.
apply IHalpha_cong1. auto.
apply IHalpha_cong2. auto.
simpl; intros.
rewrite <- (cat_assoc PLT).
rewrite <- (cat_assoc PLT).
rewrite (PLT.pair_compose_commute false).
rewrite (PLT.pair_compose_commute false).
rewrite (cat_ident2 PLT).
rewrite (cat_ident2 PLT).
rewrite (IHalpha_cong1 _ h₁ h₂ H2).
rewrite disc_cases_commute.
rewrite (disc_cases_commute _ _ _ _ _ h₂).
apply cat_respects; auto.
apply disc_cases_univ.
intros.
rewrite disc_cases_elem'.
rewrite (cat_ident1 PLT).
destruct x.
apply IHalpha_cong2; auto.
apply IHalpha_cong3; auto.
simpl; intros.
rewrite (PLT.curry_compose_commute false _ _ _ _ (〚 m₁ 〛 ∘ bind Γ x₁ σ₁)).
rewrite (PLT.curry_compose_commute false _ _ _ _ (〚 m₂ 〛 ∘ bind Γ' x₂ σ₁)).
apply PLT.curry_eq.
do 2 rewrite <- (cat_assoc PLT).
apply IHalpha_cong.
intros. inv H1.
do 2 rewrite <- (cat_assoc PLT).
rewrite (cat_assoc PLT _ _ _ _ (proj ((a,σ₁)::Γ) a)).
rewrite (ENV.proj_bind_eq _ _ _ _ (refl_equal a)).
rewrite <- (cat_assoc PLT).
unfold PLT.pair_map.
rewrite PLT.pair_commute2.
rewrite (cat_ident2 PLT).
symmetry.
rewrite (cat_assoc PLT _ _ _ _ (proj ((b,σ₁)::Γ') b)).
rewrite (ENV.proj_bind_eq _ _ _ _ (refl_equal b)).
rewrite <- (cat_assoc PLT).
rewrite PLT.pair_commute2.
do 2 rewrite (cat_assoc PLT).
apply cat_respects; auto.
etransitivity.
apply cast_compose.
symmetry.
etransitivity.
apply cast_compose.
match goal with [ |- castty ?X ≈ castty ?Y ] => generalize X Y end.
hnf in IN1. simpl in *.
destruct (string_dec a a).
inv IN1. intros.
replace e0 with e1. auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
elim n; auto.
do 2 rewrite <- (cat_assoc PLT).
rewrite (cat_assoc PLT _ _ _ _ (proj ((x₁,σ₁)::Γ) a)).
rewrite (ENV.proj_bind_neq x₁ σ₁ a Γ H9); auto.
unfold PLT.pair_map.
rewrite <- (cat_assoc PLT).
rewrite PLT.pair_commute1.
symmetry.
rewrite (cat_assoc PLT _ _ _ _ (proj ((x₂,σ₁)::Γ') b)).
rewrite (ENV.proj_bind_neq x₂ σ₁ b Γ' H10); auto.
rewrite <- (cat_assoc PLT).
rewrite PLT.pair_commute1.
repeat rewrite (cat_assoc PLT).
apply cat_respects; auto.
rewrite (cast_compose false).
rewrite (cast_compose false).
symmetry. apply H0. auto.
Qed.
Lemma alpha_cong_denote' Γ τ (m:term Γ τ) (n:term Γ τ) :
alpha_cong Γ Γ τ m n -> 〚m〛 ≈ 〚n〛.
Proof.
intros.
cut (〚m〛∘ id ≈ 〚n〛∘ id ).
intro. do 2 rewrite (cat_ident1 PLT) in H0. auto.
apply alpha_cong_denote; auto.
intros. cut (a = b). intro. subst b.
replace IN2 with IN1; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
cut (forall Γ₁ Γ₂ a b, var_cong Γ₁ Γ₂ a b -> Γ₁ = Γ₂ -> a = b).
intros. eapply H1; eauto.
clear.
intros. induction H.
inv H0; auto.
inv H0; auto.
Qed.
(** We'll end up needing quite a few facts about alpha congruence.
Here we collect them together before defining the logical relation
and tackling the fundamental lemma.
*)
(** Congruence is preserved by weakening.
*)
Lemma alpha_cong_wk : forall (Γm Γn Γm' Γn':env) τ m n H₁ H₂,
(forall a b, var_cong Γm Γn a b -> var_cong Γm' Γn' a b) ->
alpha_cong Γm Γn τ m n ->
alpha_cong _ _ τ (term_wk Γm Γm' τ H₁ m)
(term_wk Γn Γn' τ H₂ n).
Proof.
intros. revert Γm' Γn' H₁ H₂ H.
induction H0; simpl; intros.
apply acong_var. apply H0. auto.
apply acong_bool.
apply acong_app; auto.
apply IHalpha_cong1. auto.
apply IHalpha_cong2. auto.
apply acong_if; auto.
apply IHalpha_cong1. auto.
apply IHalpha_cong2. auto.
apply IHalpha_cong3. auto.
apply acong_lam. apply IHalpha_cong.
intros. inv H1.
apply vcong_here; auto.
apply vcong_there; auto.
Qed.
(** Variable congruence is closely related the [inenv] relation.
*)
Lemma varcong_inenv1 Γ₁ Γ₂ a b :
var_cong Γ₁ Γ₂ a b -> exists τ, inenv Γ₁ a τ.
Proof.
intro H. induction H. unfold inenv.
simpl. destruct (string_dec x₁ y₁); eauto. contradiction.
unfold inenv. simpl.
destruct (string_dec x₁ y₁); eauto.
Qed.
Lemma varcong_inenv2 Γ₁ Γ₂ a b :
var_cong Γ₁ Γ₂ a b -> exists τ, inenv Γ₂ b τ.
Proof.
intro H. induction H. unfold inenv.
simpl. destruct (string_dec x₂ y₂); eauto. contradiction.
unfold inenv. simpl.
destruct (string_dec x₂ y₂); eauto.
Qed.
Lemma varcong_eq Γ₁ Γ₂ a b :
var_cong Γ₁ Γ₂ a b -> Γ₁ = Γ₂ -> a = b.
Proof.
intro H. induction H; simpl; intros.
inv H1; auto. inv H2; auto.
Qed.
Lemma inenv_varcong Γ a τ :
inenv Γ a τ -> var_cong Γ Γ a a.
Proof.
unfold inenv.
induction Γ; simpl; intros.
discriminate. destruct a0.
destruct (string_dec s a). subst.
apply vcong_here; auto.
apply vcong_there; auto.
Qed.
Lemma env_supp_inenv (Γ:env) a :
a ∈ ‖Γ‖ <-> exists τ, inenv Γ a τ.
Proof.
induction Γ; simpl; split; intros.
apply nil_elem in H. elim H.
destruct H. inv H.
unfold Support.support in H. simpl in H.
unfold inenv. simpl. destruct a0.
simpl in H.
destruct (string_dec c a); eauto.
apply cons_elem in H. destruct H.
apply atom_strong_eq in H.
elim n; auto.
apply IHΓ in H.
auto.
unfold inenv in H.
simpl in H.
destruct a0.
destruct (string_dec c a).
unfold Support.support. simpl.
apply cons_elem; auto.
left; auto. subst c; auto.
apply IHΓ in H.
unfold Support.support. simpl.
apply cons_elem; auto.
Qed.
(** When congruent substitutions are applied to congruence terms,
the resulting terms are congruent.
*)
Lemma term_subst_cong : forall Γ τ (m:term Γ τ) Γ' (n:term Γ' τ) Γ₁ Γ₂
(VAR1 : ENV.varmap term Γ Γ₁) (VAR2 : ENV.varmap term Γ' Γ₂),
(forall a1 a2 σ IN1 IN2,
var_cong Γ Γ' a1 a2 ->
alpha_cong Γ₁ Γ₂ σ (VAR1 a1 σ IN1) (VAR2 a2 σ IN2)) ->
alpha_cong Γ Γ' τ m n ->
alpha_cong Γ₁ Γ₂ τ
(term_subst Γ Γ₁ τ VAR1 m)
(term_subst Γ' Γ₂ τ VAR2 n).
Proof.
intros until m; induction m; simpl; intros; auto.
inv H0. simpl.
apply H. auto.
inv H0.
apply acong_bool.
inv H0.
apply acong_app; auto.
apply IHm1; auto.
apply IHm2; auto.
inv H0. simpl.
apply acong_if; auto.
apply IHm1; auto.
apply IHm2; auto.
apply IHm3; auto.
inv H0. simpl.
apply acong_lam; auto.
apply IHm. intros.
unfold ENV.shift_vars', ENV.shift_vars, ENV.extend_map, ENV.weaken_map.
hnf in IN1. hnf in IN2. simpl in IN1. simpl in IN2.
revert IN1 IN2.
destruct (string_dec x a1); simpl; intros.
destruct (string_dec x₂ a2); simpl; intros.
subst x. subst x₂. unfold eq_rect_r.
inv IN1.
replace IN1 with (Logic.eq_refl (Some σ)). simpl.
replace IN2 with (Logic.eq_refl (Some σ)). simpl.
unfold ENV.newestvar. simpl.
apply acong_var.
apply vcong_here; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
inv H1. elim n; auto.
elim H10; auto.
destruct (string_dec x₂ a2); simpl; intros.
subst x₂. inv H1.
elim n; auto. elim H11; auto.
apply alpha_cong_wk; auto.
intros.
apply vcong_there; auto.
apply varcong_inenv1 in H2.
apply env_supp_inenv in H2.
intro. subst a. revert H2.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
apply varcong_inenv2 in H2.
apply env_supp_inenv in H2.
intro. subst b. revert H2.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
apply H. inv H1. elim n; auto. auto.
auto.
Qed.
(** Evaluation commutes with alpha congruence.
*)
Lemma eval_alpha Γ τ (m z:term Γ τ) :
(m ⇓ z) -> forall Γ' (n:term Γ' τ),
alpha_cong Γ Γ' τ m n ->
exists z', (n ⇓ z') /\ alpha_cong Γ Γ' τ z z'.
Proof.
intro H. induction H; intros.
inv H. exists (tbool Γ' b). split.
apply ebool. apply acong_bool.
inv H1.
destruct (IHeval1 Γ' x2) as [m [??]]; auto.
inv H3.
destruct (IHeval2 Γ' (if b then y2 else z2)) as [n [??]]; auto.
destruct b; auto.
exists n.
split; auto.
eapply eif. eauto. auto.
inv H. exists (tlam Γ' x₂ σ₁ σ₂ m₂).
split. apply elam.
apply acong_lam. auto.
inv H2.
destruct (IHeval1 Γ' n₁0 H8) as [z1' [??]].
destruct (IHeval2 Γ' n₂0 H11) as [z2' [??]].
inv H4.
destruct (IHeval3 Γ' (subst Γ' σ₂ σ₁ x₂ m₂0 z2')) as [z' [??]].
unfold ENV.subst.
apply term_subst_cong.
intros.
inv H7.
unfold ENV.extend_map. simpl.
revert IN1 IN2. unfold inenv; simpl.
destruct (string_dec a1 a1).
destruct (string_dec a2 a2).
intros. inv IN1.
replace IN1 with (Logic.eq_refl (Some σ)). simpl.
replace IN2 with (Logic.eq_refl (Some σ)). simpl.
unfold eq_rect_r; simpl. auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
elim n; auto. elim n; auto.
unfold ENV.extend_map. simpl.
revert IN1 IN2. unfold inenv; simpl.
destruct (string_dec x a1).
elim H18; auto.
destruct (string_dec x₂ a2).
elim H19; auto.
intros.
apply acong_var. auto.
auto.
exists z'; split; auto.
eapply eapp; eauto.
Qed.
(* FIXME, move earlier *)
Lemma app_not_value Γ σ (x y:term Γ σ) :
x⇓y -> forall σ₂ (m:term Γ (σ₂ ⇒ σ)) n, y = m•n -> False.
Proof.
intro H. induction H; intros; try discriminate.
eapply IHeval2; eauto.
subst z.
eapply IHeval3; eauto.
Qed.
Lemma if_not_value Γ σ (x y:term Γ σ) :
x⇓y -> forall a b c, y = tif Γ σ a b c -> False.
Proof.
intro H. induction H; intros; try discriminate.
eapply IHeval2; eauto.
subst z.
eapply IHeval3; eauto.
Qed.
(** The property of being a value is preserved
by alpha congruence.
*)
Lemma alpha_cong_value Γ Γ' σ x y :
alpha_cong Γ Γ' σ x y -> x↓ -> y↓.
Proof.
intro H. induction H; intros.
inv H0.
apply ebool.
inv H1.
eapply app_not_value in H9; eauto. elim H9.
eapply if_not_value in H2. elim H2. eauto.
apply elam.
Qed.
Lemma alpha_cong_eq Γ σ x y :
x = y ->
alpha_cong Γ Γ σ x y.
Proof.
intro. subst y. apply alpha_eq_refl.
Qed.
(* FIXME, can these lemmas be pushed into finprod somehow? *)
Lemma term_wk_ident : forall Γ σ m H,
term_wk Γ Γ σ H m = m.
Proof.
intros until m; induction m; simpl; intros; auto.
f_equal.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
f_equal; auto. apply IHm1. apply IHm2.
f_equal; auto. apply IHm1. apply IHm2. apply IHm3.
f_equal; auto. apply IHm.
Qed.
Lemma term_wk_compose : forall Γ₁ σ m Γ₂ Γ₃ H1 H2 H3,
term_wk Γ₂ Γ₃ σ H2 (term_wk Γ₁ Γ₂ σ H1 m) = term_wk Γ₁ Γ₃ σ H3 m.
Proof.
intros until m. induction m; simpl; intros; auto.
f_equal.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
f_equal.
apply (IHm1 Γ₂ Γ₃ H1 H2 H3).
apply (IHm2 Γ₂ Γ₃ H1 H2 H3).
f_equal.
apply IHm1.
apply IHm2.
apply IHm3.
f_equal.
apply IHm.
Qed.
Lemma term_wk_compose' : forall Γ₁ σ m Γ₂ Γ₃ H1 H2,
term_wk Γ₂ Γ₃ σ H2 (term_wk Γ₁ Γ₂ σ H1 m) =
term_wk Γ₁ Γ₃ σ (fun x τ H => H2 x τ (H1 x τ H)) m.
Proof.
intros. eapply term_wk_compose; eauto.
Qed.
(** Weakening commutes with substition, up to alpha congruence.
*)
Lemma term_subst_wk_cong : forall Γ τ (m:term Γ τ) Γ₁ Γ₂ Γ₃ Γ₄
(VAR1 : ENV.varmap term Γ Γ₁) (VAR2:ENV.varmap term Γ₃ Γ₄) H₁ H₂,
(forall a σ Ha1 Ha2 H,
alpha_cong _ _ σ (term_wk Γ₁ Γ₂ σ H (VAR1 a σ Ha1)) (VAR2 a σ Ha2)) ->
alpha_cong _ _ τ
(term_wk Γ₁ Γ₂ τ H₁ (term_subst Γ Γ₁ τ VAR1 m))
(term_subst Γ₃ Γ₄ τ VAR2 (term_wk Γ Γ₃ τ H₂ m)).
Proof.
intros until m. induction m; simpl; intros; auto.
apply acong_bool.
apply acong_app; auto.
apply IHm1; auto.
apply IHm2; auto.
apply acong_if; auto.
apply IHm1; auto.
apply IHm2; auto.
apply IHm3; auto.
apply acong_lam.
apply IHm; clear IHm.
intros. unfold ENV.shift_vars'. unfold ENV.shift_vars.
unfold ENV.extend_map. simpl. unfold ENV.weaken_map. simpl.
unfold ENV.newestvar. simpl. unfold ENV.newestvar_obligation_1. simpl.
generalize Ha1 Ha2. unfold inenv; simpl.
destruct (string_dec x a); simpl.
subst a. intros.
inv Ha0. unfold eq_rect_r.
replace Ha0 with (Logic.eq_refl (Some σ)).
replace Ha3 with (Logic.eq_refl (Some σ)). simpl.
apply acong_var.
apply vcong_here; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
intros.
eapply alpha_eq_trans.
apply alpha_cong_eq.
apply term_wk_compose'.
unfold ENV.tm_wk. simpl.
match goal with [ |- alpha_cong _ _ _
(traverse _ _ _ _ _ _ _ ?Q1 _)
(traverse _ _ _ _ _ _ _ ?Q2 _) ] =>
generalize Q1 Q2; intros
end.
assert (forall x τ, inenv Γ₂ x τ -> inenv ((fresh[Γ₂],σ₁)::Γ₂) x τ).
intros.
hnf. hnf in H1. simpl. simpl in H1.
rewrite H1.
set (q := fresh [Γ₂]).
simpl in q. fold q.
destruct (string_dec q x0).
subst q.
elimtype False.
clear -H1 e.
assert (x0 ∈ ‖Γ₂‖).
apply env_supp_inenv. eauto.
subst x0. revert H.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
auto.
apply alpha_eq_trans with
((fresh [Γ₂],σ₁)::Γ₂)
(term_wk Γ₂ ((fresh [Γ₂],σ₁)::Γ₂) σ H1
(term_wk Γ₁ Γ₂ σ H₁ (VAR1 a σ Ha0))).
rewrite term_wk_compose'.
apply alpha_cong_wk.
intros.
apply vcong_there; auto.
clear -H2.
intro.
apply varcong_inenv1 in H2.
apply env_supp_inenv in H2. subst a0. revert H2.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
clear -H2 H₁.
intro.
apply varcong_inenv2 in H2.
assert (exists τ, inenv Γ₂ b τ).
destruct H2; eauto.
apply env_supp_inenv in H0. subst b. revert H0.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
clear -H₁ H2.
assert (a0 = b).
apply varcong_eq in H2; auto.
subst a0.
apply varcong_inenv1 in H2.
destruct H2. apply H₁ in H.
eapply inenv_varcong; eauto.
apply alpha_eq_refl.
apply alpha_cong_wk.
intros.
apply vcong_there.
clear -H2.
intro.
apply varcong_inenv1 in H2.
apply env_supp_inenv in H2. subst a0. revert H2.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem. auto.
clear -H2.
intro.
apply varcong_inenv2 in H2.
apply env_supp_inenv in H2. subst b. revert H2.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
auto.
apply H.
Qed.
(** A sequence of substitutions is equal to a single composed substitution,
up to alpha equivalance.
*)
Lemma compose_term_subst : forall Γ₁ τ (m:term Γ₁ τ),
forall (Γ₂ Γ₃:env) (g:ENV.varmap term Γ₂ Γ₃) (f:ENV.varmap term Γ₁ Γ₂),
alpha_cong _ _ _
(term_subst Γ₁ Γ₃ τ (ENV.varmap_compose term _ _ _ g f) m)
(term_subst Γ₂ Γ₃ τ g (term_subst Γ₁ Γ₂ τ f m)).
Proof.
unfold ENV.varmap_compose.
do 3 intro. induction m; simpl; intros.
apply alpha_eq_refl.
apply acong_bool.
simpl. apply acong_app.
apply IHm1; auto.
apply IHm2; auto.
apply acong_if; auto.
apply IHm1; auto.
apply IHm2; auto.
apply IHm3; auto.
apply acong_lam.
eapply alpha_eq_trans. 2: apply IHm. clear IHm.
apply term_subst_cong.
clear. unfold ENV.shift_vars', ENV.shift_vars. simpl.
intros.
simpl.
unfold inenv in *. simpl in *.
unfold ENV.extend_map.
destruct (string_dec x a1).
unfold eq_rect_r. simpl.
subst a1. inv IN1.
replace IN1 with (Logic.eq_refl (Some σ)).
unfold ENV.newestvar; simpl.
unfold ENV.newestvar_obligation_1. simpl.
revert IN2.
destruct (string_dec x a2).
subst a2; intros.
replace IN2 with (Logic.eq_refl (Some σ)).
simpl.
unfold ENV.weaken_map; simpl.
set (q := (fresh_atom (‖Γ₂‖ ++ nil))).
simpl in *. fold q.
destruct (string_dec q q). simpl.
apply acong_var.
apply vcong_here; auto.
elim n; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
intros.
elim n. inv H; auto. elim H7; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
revert IN2.
destruct (string_dec x a2).
subst a2; intros.
elim n. inv H; auto. elim H8; auto.
intros.
simpl.
unfold ENV.weaken_map; simpl.
simpl.
assert (a1 = a2).
inv H; auto.
clear -H9.
apply varcong_eq in H9; auto.
subst a2.
replace IN2 with IN1.
apply term_subst_wk_cong. simpl. intros.
set (q1 := fresh [ Γ₂ ]).
set (q2 := fresh [ Γ₃ ]).
unfold inenv in *. simpl in *.
revert Ha2.
simpl in *. fold q1. fold q2.
destruct (string_dec q1 a).
subst a.
elimtype False.
assert (q1 ∈ ‖Γ₂‖).
apply env_supp_inenv. eauto.
revert H1. unfold q1.
apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
intros.
apply alpha_cong_wk.
intros. apply vcong_there; auto.
intro.
apply varcong_inenv1 in H1.
apply env_supp_inenv in H1. subst a0.
revert H1. apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
unfold q2.
intro.
apply varcong_inenv2 in H1.
apply env_supp_inenv in H1. subst b.
revert H1. apply fresh_atom_is_fresh'.
red; intros. apply app_elem; auto.
replace Ha2 with Ha1.
apply alpha_eq_refl.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply alpha_eq_refl.
Qed.
(** This technical lemma allows us to prove that applying the identity
subtitution is alpha congruent to the original term.
*)
Lemma subst_weaken_alpha Γ Γ' σ
(x:term Γ σ) (y:term Γ' σ) :
alpha_cong Γ Γ' σ x y ->
forall Γ₁ Γ₂ (VAR:ENV.varmap term Γ₁ Γ₂) H,
(forall a b τ H1 H2, var_cong Γ Γ' a b ->
alpha_cong Γ₂ Γ' τ (VAR a τ H1) (tvar Γ' b τ H2)) ->
alpha_cong _ _ σ (term_subst _ _ σ VAR (term_wk _ _ _ H x)) y.
Proof.
intro. induction H; simpl; intros.
apply H1. auto.
apply acong_bool.
apply acong_app; auto.
apply IHalpha_cong1; auto.
apply IHalpha_cong2; auto.
apply acong_if; auto.
apply IHalpha_cong1; auto.
apply IHalpha_cong2; auto.
apply IHalpha_cong3; auto.
apply acong_lam; auto.
apply IHalpha_cong. intros.
unfold ENV.shift_vars'.
unfold ENV.shift_vars. simpl.
unfold ENV.newestvar. unfold ENV.extend_map; simpl.
revert H2. unfold inenv; simpl.
unfold ENV.newestvar_obligation_1. simpl.
destruct (string_dec x₁ a). intros.
subst a. inv H2.
replace H2 with (refl_equal (Some τ)).
unfold eq_rect_r; simpl.
apply acong_var.
apply vcong_here; auto.
inv H4; auto. elim H12; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
intros.
inv H4. elim n; auto.
unfold ENV.weaken_map. simpl.
assert (inenv Γ' b τ).
revert H3. unfold inenv; simpl.
destruct (string_dec x₂ b).
contradiction. auto.
generalize (H1 a b τ H2 H5 H14). intros.
inv H6. rewrite <- H7. simpl.
apply acong_var.
apply vcong_there; auto.
clear -H₁. intro.
assert (x₁0 ∈ ‖Γ₂‖).
apply env_supp_inenv. eauto.
subst x₁0. revert H0.
apply fresh_atom_is_fresh'.
red; intros.
apply app_elem; auto.
Qed.
(** Applying the identity substuition is alpha congruenct
to the original term.
*)
Lemma subst_alpha_ident Γ Γ' σ
(x:term Γ σ) (y:term Γ' σ) :
alpha_cong Γ Γ' σ x y ->
forall Γ₂ (VAR:ENV.varmap term Γ Γ₂),
(forall a b τ H1 H2, var_cong Γ Γ' a b ->
alpha_cong Γ₂ Γ' τ (VAR a τ H1) (tvar Γ' b τ H2)) ->
alpha_cong _ _ σ (term_subst _ _ σ VAR x) y.
Proof.
intros.
rewrite <- (term_wk_ident _ _ x (fun a b H => H)).
apply subst_weaken_alpha; auto.
Qed.
(** This lemma show that extending a substitution is alpha congruent
to first shifting and then extending.
*)
Lemma extend_shift_alpha : forall
(Γ : env)
(x : atom)
(σ₁ : ty)
(VAR : ENV.varmap term Γ nil)
(n : term nil σ₁)
(a1 a2 : atom)
(σ : ty)
(IN1 : inenv ((x, σ₁) :: Γ) a1 σ)
(IN2 : inenv ((x, σ₁) :: Γ) a2 σ)
(x':atom) Hx,
var_cong ((x, σ₁) :: Γ) ((x, σ₁) :: Γ) a1 a2 ->
alpha_cong nil nil σ
(ENV.varmap_compose term ((x, σ₁) :: Γ) ((x', σ₁) :: nil) nil
(ENV.extend_map term nil nil (tvar nil) x' σ₁ n)
(ENV.shift_vars term term_wk tvar Γ nil x x' σ₁ Hx VAR) a1 σ IN1)
(ENV.extend_map term Γ nil VAR x σ₁ n a2 σ IN2).
Proof.
intros.
unfold ENV.varmap_compose.
unfold ENV.shift_vars.
unfold ENV.extend_map at 2. simpl.
unfold ENV.newestvar. simpl.
unfold ENV.newestvar_obligation_1. simpl.
revert IN1. unfold inenv. simpl.
destruct (string_dec x a1).
subst a1.
assert (x = a2).
inv H. auto. elim H7; auto.
subst a2.
intro. inv IN1.
replace IN1 with (refl_equal (Some σ)). simpl.
unfold ENV.extend_map. simpl.
revert IN2. unfold inenv; simpl.
destruct (string_dec x x).
intros.
destruct (string_dec x' x').
unfold eq_rect_r; simpl.
replace IN2 with (refl_equal (Some σ)). simpl.
apply alpha_eq_refl.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
elim n0; auto.
elim n0; auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
intros.
assert (x <> a2 /\ var_cong Γ Γ a1 a2).
inv H; auto. elim n0. auto.
destruct H0.
assert (a1 = a2).
cut (forall Γ₁ Γ₂ a b, var_cong Γ₁ Γ₂ a b -> Γ₁ = Γ₂ -> a = b).
intros. eapply H2; eauto.
clear.
intros. induction H.
inv H0; auto.
inv H0; auto.
subst a2.
revert IN2. unfold inenv. simpl.
unfold ENV.extend_map at 2. simpl.
destruct (string_dec x a1).
contradiction.
simpl; intros.
unfold ENV.weaken_map.
replace IN2 with IN1.
2: apply Eqdep_dec.UIP_dec; decide equality; decide equality.
clear.
unfold ENV.extend_map. simpl.
apply subst_weaken_alpha.
apply alpha_eq_refl.
intros.
inv H.
Qed.
(** ** Logical relation and the fundamental lemma
Now we define the logical relation. It is defined by induction
on the structure of types, in a standard way. Note that
alpha congruence is explicitly built-in.
*)
Fixpoint LR (τ:ty) : term nil τ -> (cxt nil → tydom τ) -> Prop :=
match τ as τ' return term nil τ' -> (cxt nil → tydom τ') -> Prop
with
| ty_bool => fun m h =>
exists b:bool, m = tbool nil b /\
h ≈ disc_elem b ∘ PLT.terminate _ _
| ty_arrow σ₁ σ₂ => fun m h =>
forall n h',
LR σ₁ n h' -> eval nil σ₁ n n ->
exists z1 z2,
eval _ _ (m•n) z1 /\
alpha_cong nil nil σ₂ z1 z2 /\
LR σ₂ z2 (apply ∘ 〈h, h'〉)
end.
(** The logical relation respects hom equality.
*)
Lemma LR_equiv τ : forall m h h',
h ≈ h' -> LR τ m h -> LR τ m h'.
Proof.
induction τ; simpl. intros.
destruct H0 as [b [??]]. exists b; split; auto.
rewrite <- H; auto.
simpl; intros.
destruct (H0 n h'0 H1 H2) as [z1 [z2 [?[??]]]].
exists z1; exists z2; split; auto. split; auto.
revert H5. apply IHτ2.
apply cat_respects; auto.
apply PLT.pair_eq; auto.
Qed.
(** The fundamental lemma states that every term stands in the logical relation
(up to alpha congruence) with its denotation when applied to related substitutions.
This lemma is the linchpin of the adequacy proof.
*)
Lemma fundamental_lemma : forall Γ τ (m:term Γ τ)
(VAR:ENV.varmap term Γ nil) (VARh : cxt nil → cxt Γ),
(forall a σ H, VAR a σ H ↓ /\
LR σ (VAR a σ H) (castty H ∘ proj Γ a ∘ VARh)) ->
exists z1 z2,
eval nil τ (term_subst Γ nil τ VAR m) z1 /\
alpha_cong nil nil τ z1 z2 /\
LR τ z2 (〚m〛 ∘ VARh ).
Proof.
induction m; simpl; intros.
(* var case *)
simpl. exists (VAR x σ i). exists (VAR x σ i).
destruct (H x σ i); intuition.
apply alpha_eq_refl.
(* bool case *)
exists (tbool nil n).
exists (tbool nil n).
split. apply ebool.
split. apply acong_bool.
exists n. split; auto.
rewrite <- (cat_assoc PLT). apply cat_respects; auto.
apply PLT.terminate_univ.
(* application case *)
destruct (IHm1 VAR VARh H) as [z1 [z1' [?[??]]]].
destruct (IHm2 VAR VARh H) as [z2 [z2' [?[??]]]].
simpl in H1.
destruct (H2 z2' (〚 m2 〛 ∘ VARh)) as [z3 [z3' [?[??]]]]; auto.
eapply alpha_cong_value. apply H4.
eapply eval_value. eauto.
fold LR in H8.
inv H6.
apply alpha_eq_sym in H1.
apply alpha_eq_sym in H4.
destruct (eval_alpha _ _ _ _ H14 _ _ H1) as [q1 [??]].
destruct (eval_alpha _ _ _ _ H15 _ _ H4) as [q2 [??]].
inv H10.
assert (alpha_cong _ _ _ (subst nil σ₂ σ₁ x n₁ n₂) (subst nil σ₂ σ₁ _ m₂ q2)).
unfold ENV.subst. simpl.
apply term_subst_cong. intros.
unfold ENV.extend_map. simpl.
revert IN1 IN2. unfold inenv; simpl.
destruct (string_dec x a1).
destruct (string_dec x₂ a2).
unfold eq_rect_r; simpl.
intros. inv IN1.
replace IN1 with (Logic.eq_refl (Some σ)). simpl.
replace IN2 with (Logic.eq_refl (Some σ)). simpl.
auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
inv H13. elim n; auto. elim H25; auto.
intro. discriminate.
auto.
destruct (eval_alpha _ _ _ _ H16 _ _ H13) as [q3 [??]].
exists q3. exists z3'. split.
eapply eapp; eauto.
eapply eval_trans. apply H0. eauto.
replace z2 with q2. auto.
eapply eval_eq. eauto.
eapply eval_value; eauto.
split; auto.
eapply alpha_eq_trans.
apply alpha_eq_sym in H18. apply H18. auto.
revert H8. apply LR_equiv.
rewrite <- (cat_assoc PLT).
apply cat_respects; auto.
symmetry; apply PLT.pair_compose_commute.
(* if case *)
destruct (IHm1 VAR VARh H) as [x' [x'' [?[??]]]].
simpl in H2.
destruct H2 as [b [??]].
destruct (IHm2 VAR VARh H) as [y' [y'' [?[??]]]].
destruct (IHm3 VAR VARh H) as [z' [z'' [?[??]]]].
destruct b.
exists y'. exists y''.
split; auto.
subst x''. inv H1.
eapply eif.
eauto. simpl. auto.
split; auto.
revert H6.
apply LR_equiv.
rewrite <- (cat_assoc PLT).
rewrite (PLT.pair_compose_commute false).
rewrite (cat_ident2 PLT).
rewrite H3.
rewrite disc_cases_elem'. auto.
exists z'. exists z''.
split; auto.
subst x''. inv H1.
eapply eif.
eauto. simpl. auto.
split; auto.
revert H9.
apply LR_equiv.
rewrite <- (cat_assoc PLT).
rewrite (PLT.pair_compose_commute false).
rewrite (cat_ident2 PLT).
rewrite H3.
rewrite disc_cases_elem'. auto.
(* lam case *)
econstructor. econstructor. split. apply elam.
split. apply alpha_eq_refl.
intros.
set (VAR' := ENV.extend_map term Γ nil VAR x σ₁ n).
set (VARh' := bind Γ x σ₁ ∘ 〈 VARh, h' 〉).
destruct (IHm VAR' VARh') as [z [??]]. clear IHm.
simpl; intros.
split.
subst VAR' VARh'. unfold ENV.extend_map.
hnf in H2. simpl in *.
destruct (string_dec x a). inv H2.
replace H2 with (Logic.eq_refl (Some σ)). simpl.
unfold eq_rect_r. simpl. auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
apply H.
subst VAR' VARh'. unfold ENV.extend_map.
hnf in H2. simpl in *. unfold eq_rect_r. simpl.
unfold f_equal. unfold eq_sym. simpl.
revert H2.
generalize (ENV.proj_bind_neq x σ₁ a Γ).
generalize (ENV.proj_bind_eq x σ₁ a Γ).
simpl.
generalize (proj ((x,σ₁)::Γ) a).
unfold ENV.lookup_neq. simpl.
unfold ENV.lookup_eq. simpl.
destruct (string_dec x a). simpl; intros.
inv H4. replace H4 with (refl_equal (Some σ)). simpl.
revert H0. apply LR_equiv.
rewrite cast_refl. rewrite (cat_ident2 PLT).
rewrite (cat_assoc PLT).
rewrite H2; auto.
rewrite cast_refl. rewrite (cat_ident2 PLT).
rewrite PLT.pair_commute2. auto.
apply Eqdep_dec.UIP_dec. decide equality. decide equality.
intros.
destruct (H a σ H4). revert H6.
apply LR_equiv.
rewrite cast_refl in H3.
rewrite (cat_ident2 PLT) in H3.
rewrite <- (cat_assoc PLT).
rewrite <- (cat_assoc PLT).
rewrite (cat_assoc PLT _ _ _ _ h).
rewrite H3; auto.
rewrite <- (cat_assoc PLT).
rewrite PLT.pair_commute1.
auto.
destruct H2 as [?[??]].
assert (alpha_cong _ _ _
(term_subst ((x, σ₁) :: Γ) nil σ₂ VAR' m)
(subst nil σ₂ σ₁ (fresh_atom nil)
(term_subst ((x, σ₁) :: Γ) (((fresh_atom nil), σ₁) :: nil) σ₂
(ENV.shift_vars' term term_wk tvar Γ nil x σ₁ VAR) m)
n)).
unfold VAR'.
unfold ENV.subst.
apply alpha_eq_sym.
eapply alpha_eq_trans. apply alpha_eq_sym. apply compose_term_subst.
apply term_subst_cong.
unfold ENV.shift_vars'.
intros. apply extend_shift_alpha; auto.
apply alpha_eq_refl.
destruct (eval_alpha _ _ _ _ H2 _ _ H5) as [q' [??]].
exists q'. exists x0.
split.
eapply eapp. apply elam. eauto. auto.
split.
eapply alpha_eq_trans.
apply alpha_eq_sym. eauto. auto.
revert H4. apply LR_equiv.
rewrite PLT.curry_apply3.
unfold VARh'.
rewrite (cat_assoc PLT). auto.
Qed.
(** A simpified form of the fundamental lemma that follows
from the inductively-strong one above.
*)
Lemma fundamental_lemma' : forall τ (m:term nil τ),
exists z z', eval nil τ m z /\ alpha_cong _ _ _ z z' /\ LR τ z' 〚 m 〛.
Proof.
intros.
destruct (fundamental_lemma nil τ m (tvar nil) id) as [z [z' [?[??]]]].
intros. hnf in H. simpl in H. discriminate.
destruct (eval_alpha _ _ _ _ H nil m) as [q [??]].
apply subst_alpha_ident. apply alpha_eq_refl.
intros. inv H4.
exists q. exists z'. split; auto.
split; auto.
apply alpha_eq_trans with nil z; auto.
apply alpha_eq_sym; auto.
revert H1. apply LR_equiv.
apply cat_ident1.
Qed.
(** ** Contextual equivalance and adequacy
*)
(** Now we define contextual equivalance. Contexts here are
given in "inside-out" form, which makes the induction in the
adequacy proof significantly easier.
*)
Inductive context τ : env -> ty -> Type :=
| cxt_top : context τ nil τ
| cxt_if : forall Γ σ,
term Γ σ ->
term Γ σ ->
context τ Γ σ ->
context τ Γ 2
| cxt_appl : forall Γ σ₁ σ₂,
term Γ σ₁ ->
context τ Γ σ₂ ->
context τ Γ (σ₁ ⇒ σ₂)
| cxt_appr : forall Γ σ₁ σ₂,
term Γ (σ₁ ⇒ σ₂) ->
context τ Γ σ₂ ->
context τ Γ σ₁
| cxt_lam : forall Γ (x:atom) σ₁ σ₂,
context τ Γ (σ₁ ⇒ σ₂) ->
context τ ((x,σ₁)::Γ) σ₂.
Fixpoint plug τ Γ σ (C:context τ Γ σ) : term Γ σ -> term nil τ :=
match C in context _ Γ' σ' return term Γ' σ' -> term nil τ with
| cxt_top _ => fun x => x
| cxt_if _ Γ σ y z C' => fun x => plug τ _ _ C' (tif Γ σ x y z)
| cxt_appl _ Γ σ₁ σ₂ t C' => fun x => plug τ _ _ C' (tapp x t)
| cxt_appr _ Γ σ₁ σ₂ t C' => fun x => plug τ _ _ C' (tapp t x)
| cxt_lam _ Γ a σ₁ σ₂ C' => fun x => plug τ _ _ C' (tlam Γ a σ₁ σ₂ x)
end.
Definition cxt_eq τ Γ σ (m n:term Γ σ):=
forall (C:context τ Γ σ) (z:term nil τ),
eval nil τ (plug τ Γ σ C m) z <-> eval nil τ (plug τ Γ σ C n) z.
(** Adequacy means that terms with equivalant denotations
are contextually equivalant in any boolean context.
*)
Theorem adequacy : forall Γ τ (m n:term Γ τ),
〚m〛 ≈ 〚n〛 -> cxt_eq 2 Γ τ m n.
Proof.
intros. intro.
revert n m H.
induction C.
simpl; intros.
destruct (fundamental_lemma' _ m) as [zm [zm' [?[??]]]]. simpl in *.
destruct (fundamental_lemma' _ n) as [zn [zn' [?[??]]]]. simpl in *.
destruct H2 as [bm [??]].
destruct H5 as [bn [??]].
subst zm' zn'. inv H1. inv H4.
rewrite H in H6.
rewrite H6 in H7.
assert (bm = bn).
apply (terminate_cancel false (cxt nil)) in H7.
apply disc_elem_inj in H7. auto.
exact (fun i => @ENV.internals.codom_elem nil None i (fun H => H) tt).
subst bn.
split; intro.
assert (z = (tbool nil bm)).
eapply eval_eq; eauto.
subst z. auto.
assert (z = (tbool nil bm)).
eapply eval_eq; eauto.
subst z. auto.
simpl; intros.
apply IHC. simpl.
apply cat_respects; auto.
apply PLT.pair_eq; auto.
simpl. intros.
apply IHC. simpl.
apply cat_respects; auto.
apply PLT.pair_eq; auto.
simpl; intros.
apply IHC. simpl.
apply cat_respects; auto.
apply PLT.pair_eq; auto.
simpl; intros.
apply IHC. simpl.
apply PLT.curry_eq.
apply cat_respects; auto.
Qed.
(** As a corollary of the fundamental lemma, we learn that
the calculus is strongly normalizing.
*)
Corollary normalizing : forall τ (m:term nil τ), exists z, eval nil τ m z.
Proof.
intros.
generalize (fundamental_lemma' τ m).
simpl. intros [z [?[?[??]]]]. exists z; auto.
Qed.
(** These should print "Closed under the global context", meaning these
theorems hold without the use of any axioms.
*)
Print Assumptions adequacy.
Print Assumptions normalizing.
|
{"author": "robdockins", "repo": "domains", "sha": "6feea4ed576f8aa849af9fa102633d5df1191360", "save_path": "github-repos/coq/robdockins-domains", "path": "github-repos/coq/robdockins-domains/domains-6feea4ed576f8aa849af9fa102633d5df1191360/st_lam.v"}
|
(*
File: List_Inversions.thy
Author: Manuel Eberl, TU München
A formalisation of inversions of a list and the O(n log n) divide-and-conquer algorithm
to count them.
*)
section \<open>The Inversions of a List\<close>
theory List_Inversions
imports
Main
"HOL-Combinatorics.Permutations"
begin
subsection \<open>Definition of inversions\<close>
context preorder
begin
text \<open>
We define inversions as pair of indices w.\,r.\,t.\ a preorder.
\<close>
inductive_set inversions :: "'a list \<Rightarrow> (nat \<times> nat) set" for xs :: "'a list" where
"i < j \<Longrightarrow> j < length xs \<Longrightarrow> less (xs ! j) (xs ! i) \<Longrightarrow> (i, j) \<in> inversions xs"
lemma inversions_subset: "inversions xs \<subseteq> Sigma {..<length xs} (\<lambda>i. {i<..<length xs})"
by (auto simp: inversions.simps)
lemma finite_inversions [intro]: "finite (inversions xs)"
by (rule finite_subset[OF inversions_subset]) auto
lemma inversions_altdef: "inversions xs = {(i, j). i < j \<and> j < length xs \<and> less (xs ! j) (xs ! i)}"
by (auto simp: inversions.simps)
lemma inversions_code:
"inversions xs =
Sigma {..<length xs} (\<lambda>i. Set.filter (\<lambda>j. less (xs ! j) (xs ! i)) {i<..<length xs})"
by (auto simp: inversions_altdef)
lemmas (in -) [code] = inversions_code
lemma inversions_trivial [simp]: "length xs \<le> Suc 0 \<Longrightarrow> inversions xs = {}"
by (auto simp: inversions_altdef)
lemma inversions_imp_less:
"z \<in> inversions xs \<Longrightarrow> fst z < snd z"
"z \<in> inversions xs \<Longrightarrow> snd z < length xs"
by (auto simp: inversions_altdef)
lemma inversions_Nil [simp]: "inversions [] = {}"
by (auto simp: inversions_altdef)
lemma inversions_Cons:
"inversions (x # xs) =
(\<lambda>j. (0, j + 1)) ` {j\<in>{..<length xs}. less (xs ! j) x} \<union>
map_prod Suc Suc ` inversions xs" (is "_ = ?rhs")
proof -
have "z \<in> inversions (x # xs) \<longleftrightarrow> z \<in> ?rhs" for z
by (cases z) (auto simp: inversions_altdef map_prod_def nth_Cons split: nat.splits)
thus ?thesis by blast
qed
text \<open>
The following function returns the inversions between two lists, i.\,e.\ all pairs of
an element in the first list with an element in the second list such that the former
is greater than the latter.
\<close>
definition inversions_between :: "'a list \<Rightarrow> 'a list \<Rightarrow> (nat \<times> nat) set" where
"inversions_between xs ys =
{(i, j) \<in> {..<length xs}\<times>{..<length ys}. less (ys ! j) (xs ! i)}"
lemma finite_inversions_between [intro]: "finite (inversions_between xs ys)"
by (rule finite_subset[of _ "{..<length xs} \<times> {..<length xs + length ys}"])
(auto simp: inversions_between_def)
lemma inversions_between_Nil [simp]:
"inversions_between [] ys = {}"
"inversions_between xs [] = {}"
by (simp_all add: inversions_between_def)
text \<open>
We can now show the following equality for the inversions of the concatenation of two lists:
\<close>
proposition inversions_append:
fixes xs ys
defines "m \<equiv> length xs" and "n \<equiv> length ys"
shows "inversions (xs @ ys) =
inversions xs \<union> map_prod ((+) m) ((+) m) ` inversions ys \<union>
map_prod id ((+) m) ` inversions_between xs ys"
(is "_ = ?rhs")
proof -
note defs = inversions_altdef inversions_between_def m_def n_def map_prod_def
have "z \<in> inversions (xs @ ys) \<longleftrightarrow> z \<in> ?rhs" for z
proof
assume "z \<in> inversions (xs @ ys)"
then obtain i j where [simp]: "z = (i, j)"
and ij: "i < j" "j < m + n" "less ((xs @ ys) ! j) ((xs @ ys) ! i)"
by (cases z) (auto simp: inversions_altdef m_def n_def)
from ij consider "j < m" | "i \<ge> m" | "i < m" "j \<ge> m" by linarith
thus "z \<in> ?rhs"
proof cases
assume "i < m" "j \<ge> m"
define j' where "j' = j - m"
have [simp]: "j = m + j'"
using \<open>j \<ge> m\<close> by (simp add: j'_def)
from ij and \<open>i < m\<close> show ?thesis
by (auto simp: inversions_altdef map_prod_def inversions_between_def nth_append m_def n_def)
next
assume "i \<ge> m"
define i' j' where "i' = i - m" and "j' = j - m"
have [simp]: "i = m + i'" "j = m + j'"
using \<open>i < j\<close> and \<open>i \<ge> m\<close> by (simp_all add: i'_def j'_def)
from ij show ?thesis
by (auto simp: inversions_altdef map_prod_def nth_append m_def n_def)
qed (use ij in \<open>auto simp: nth_append defs\<close>)
qed (auto simp: nth_append defs)
thus ?thesis by blast
qed
subsection \<open>Counting inversions\<close>
text \<open>
We now define versions of @{const inversions} and @{const inversions_between} that
only return the \<^emph>\<open>number\<close> of inversions.
\<close>
definition inversion_number :: "'a list \<Rightarrow> nat" where
"inversion_number xs = card (inversions xs)"
definition inversion_number_between where
"inversion_number_between xs ys = card (inversions_between xs ys)"
lemma inversions_between_code:
"inversions_between xs ys =
Set.filter (\<lambda>(i,j). less (ys ! j) (xs ! i)) ({..<length xs}\<times>{..<length ys})"
by (auto simp: inversions_between_def)
lemmas (in -) [code] = inversions_between_code
lemma inversion_number_Nil [simp]: "inversion_number [] = 0"
by (simp add: inversion_number_def)
lemma inversion_number_trivial [simp]: "length xs \<le> Suc 0 \<Longrightarrow> inversion_number xs = 0"
by (auto simp: inversion_number_def)
lemma inversion_number_between_Nil [simp]:
"inversion_number_between [] ys = 0"
"inversion_number_between xs [] = 0"
by (simp_all add: inversion_number_between_def)
text \<open>
We again get the following nice equation for the number of inversions of a concatenation:
\<close>
proposition inversion_number_append:
"inversion_number (xs @ ys) =
inversion_number xs + inversion_number ys + inversion_number_between xs ys"
proof -
define m n where "m = length xs" and "n = length ys"
let ?A = "inversions xs"
let ?B = "map_prod ((+) m) ((+) m) ` inversions ys"
let ?C = "map_prod id ((+) m) ` inversions_between xs ys"
have "inversion_number (xs @ ys) = card (?A \<union> ?B \<union> ?C)"
by (simp add: inversion_number_def inversions_append m_def)
also have "\<dots> = card (?A \<union> ?B) + card ?C"
by (intro card_Un_disjoint finite_inversions finite_inversions_between finite_UnI finite_imageI)
(auto simp: inversions_altdef inversions_between_def m_def n_def)
also have "card (?A \<union> ?B) = inversion_number xs + card ?B" unfolding inversion_number_def
by (intro card_Un_disjoint finite_inversions finite_UnI finite_imageI)
(auto simp: inversions_altdef m_def n_def)
also have "card ?B = inversion_number ys" unfolding inversion_number_def
by (intro card_image) (auto simp: map_prod_def inj_on_def)
also have "card ?C = inversion_number_between xs ys"
unfolding inversion_number_between_def by (intro card_image inj_onI) (auto simp: map_prod_def)
finally show ?thesis .
qed
subsection \<open>Stability of inversions between lists under permutations\<close>
text \<open>
A crucial fact for counting list inversions with merge sort is that the number
of inversions \<^emph>\<open>between\<close> two lists does not change when the lists are permuted. This is
true because the set of inversions commutes with the act of permuting the list:
\<close>
lemma inversions_between_permute1:
assumes "\<pi> permutes {..<length xs}"
shows "inversions_between (permute_list \<pi> xs) ys =
map_prod (inv \<pi>) id ` inversions_between xs ys"
proof -
from assms have [simp]: "\<pi> i < length xs" if "i < length xs" "\<pi> permutes {..<length xs}" for i \<pi>
using permutes_in_image[OF that(2)] that by auto
have *: "inv \<pi> permutes {..<length xs}"
using assms by (rule permutes_inv)
from assms * show ?thesis unfolding inversions_between_def map_prod_def
by (force simp: image_iff permute_list_nth permutes_inverses intro: exI[of _ "\<pi> i" for i])
qed
lemma inversions_between_permute2:
assumes "\<pi> permutes {..<length ys}"
shows "inversions_between xs (permute_list \<pi> ys) =
map_prod id (inv \<pi>) ` inversions_between xs ys"
proof -
from assms have [simp]: "\<pi> i < length ys" if "i < length ys" "\<pi> permutes {..<length ys}" for i \<pi>
using permutes_in_image[OF that(2)] that by auto
have *: "inv \<pi> permutes {..<length ys}"
using assms by (rule permutes_inv)
from assms * show ?thesis unfolding inversions_between_def map_prod_def
by (force simp: image_iff permute_list_nth permutes_inverses intro: exI[of _ "\<pi> i" for i])
qed
proposition inversions_between_permute:
assumes "\<pi>1 permutes {..<length xs}" and "\<pi>2 permutes {..<length ys}"
shows "inversions_between (permute_list \<pi>1 xs) (permute_list \<pi>2 ys) =
map_prod (inv \<pi>1) (inv \<pi>2) ` inversions_between xs ys"
by (simp add: inversions_between_permute1 inversions_between_permute2 assms
map_prod_def image_image case_prod_unfold)
corollary inversion_number_between_permute:
assumes "\<pi>1 permutes {..<length xs}" and "\<pi>2 permutes {..<length ys}"
shows "inversion_number_between (permute_list \<pi>1 xs) (permute_list \<pi>2 ys) =
inversion_number_between xs ys"
proof -
have "inversion_number_between (permute_list \<pi>1 xs) (permute_list \<pi>2 ys) =
card (map_prod (inv \<pi>1) (inv \<pi>2) ` inversions_between xs ys)"
by (simp add: inversion_number_between_def inversions_between_permute assms)
also have "\<dots> = inversion_number_between xs ys"
unfolding inversion_number_between_def using assms[THEN permutes_inj_on[OF permutes_inv]]
by (intro card_image inj_onI) (auto simp: map_prod_def)
finally show ?thesis .
qed
text \<open>
The following form of the above theorem is nicer to apply since it has the form of a
congruence rule.
\<close>
corollary inversion_number_between_cong_mset:
assumes "mset xs = mset xs'" and "mset ys = mset ys'"
shows "inversion_number_between xs ys = inversion_number_between xs' ys'"
proof -
obtain \<pi>1 \<pi>2 where \<pi>12: "\<pi>1 permutes {..<length xs'}" "xs = permute_list \<pi>1 xs'"
"\<pi>2 permutes {..<length ys'}" "ys = permute_list \<pi>2 ys'"
using assms[THEN mset_eq_permutation] by metis
thus ?thesis by (simp add: inversion_number_between_permute)
qed
subsection \<open>Inversions between sorted lists\<close>
text \<open>
Another fact that is crucial to the efficient computation of the inversion number is this:
If we have two sorted lists, we can reduce computing the inversions by inspecting the
first elements and deleting one of them.
\<close>
lemma inversions_between_Cons_Cons:
assumes "sorted_wrt less_eq (x # xs)" and "sorted_wrt less_eq (y # ys)"
shows "inversions_between (x # xs) (y # ys) =
(if \<not>less y x then
map_prod Suc id ` inversions_between xs (y # ys)
else
{..<length (x#xs)} \<times> {0} \<union>
map_prod id Suc ` inversions_between (x # xs) ys)"
using assms unfolding inversions_between_def map_prod_def
by (auto, (auto simp: set_conv_nth nth_Cons less_le_not_le image_iff
intro: order_trans split: nat.splits)?)
(* A bit fragile, but doing this manually is annoying *)
text \<open>
This leads to the following analogous equation for counting the inversions between two
sorted lists. Note that a single step of this only takes constant time (assuming we
pre-computed the lengths of the lists) so that the entire function runs in linear time.
\<close>
text \<open>
We now define a function to compute the inversion number between two lists that are
assumed to be sorted using the equalities we just derived.
\<close>
fun inversion_number_between_sorted :: "'a list \<Rightarrow> 'a list \<Rightarrow> nat" where
"inversion_number_between_sorted [] ys = 0"
| "inversion_number_between_sorted xs [] = 0"
| "inversion_number_between_sorted (x # xs) (y # ys) =
(if \<not>less y x then
inversion_number_between_sorted xs (y # ys)
else
inversion_number_between_sorted (x # xs) ys + length (x # xs))"
theorem inversion_number_between_sorted_correct:
"sorted_wrt less_eq xs \<Longrightarrow> sorted_wrt less_eq ys \<Longrightarrow>
inversion_number_between_sorted xs ys = inversion_number_between xs ys"
by (induction xs ys rule: inversion_number_between_sorted.induct)
(simp_all add: inversion_number_between_Cons_Cons)
end
subsection \<open>Merge sort\<close>
(* TODO: Could be replaced by mergesort from HOL-Library in Isabelle 2019. *)
text \<open>
For convenience, we first define a simple merge sort that does not compute the inversions.
At this point, we need to start assuming a linear ordering since the merging function
does not work otherwise.
\<close>
context linorder
begin
definition split_list
where "split_list xs = (let n = length xs div 2 in (take n xs, drop n xs))"
fun merge_lists :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"merge_lists [] ys = ys"
| "merge_lists xs [] = xs"
| "merge_lists (x # xs) (y # ys) =
(if less_eq x y then x # merge_lists xs (y # ys) else y # merge_lists (x # xs) ys)"
lemma set_merge_lists [simp]: "set (merge_lists xs ys) = set xs \<union> set ys"
by (induction xs ys rule: merge_lists.induct) auto
lemma mset_merge_lists [simp]: "mset (merge_lists xs ys) = mset xs + mset ys"
by (induction xs ys rule: merge_lists.induct) auto
lemma sorted_merge_lists [simp, intro]:
"sorted xs \<Longrightarrow> sorted ys \<Longrightarrow> sorted (merge_lists xs ys)"
by (induction xs ys rule: merge_lists.induct) auto
fun merge_sort :: "'a list \<Rightarrow> 'a list" where
"merge_sort xs =
(if length xs \<le> 1 then
xs
else
merge_lists (merge_sort (take (length xs div 2) xs))
(merge_sort (drop (length xs div 2) xs)))"
lemmas [simp del] = merge_sort.simps
lemma merge_sort_trivial [simp]: "length xs \<le> Suc 0 \<Longrightarrow> merge_sort xs = xs"
by (subst merge_sort.simps) auto
theorem mset_merge_sort [simp]: "mset (merge_sort xs) = mset xs"
by (induction xs rule: merge_sort.induct)
(subst merge_sort.simps, auto simp flip: mset_append)
corollary set_merge_sort [simp]: "set (merge_sort xs) = set xs"
by (rule mset_eq_setD) simp_all
theorem sorted_merge_sort [simp, intro]: "sorted (merge_sort xs)"
by (induction xs rule: merge_sort.induct)
(subst merge_sort.simps, use sorted01 in auto)
lemma inversion_number_between_code:
"inversion_number_between xs ys = inversion_number_between_sorted (sort xs) (sort ys)"
by (subst inversion_number_between_sorted_correct)
(simp_all add: cong: inversion_number_between_cong_mset)
lemmas (in -) [code_unfold] = inversion_number_between_code
subsection \<open>Merge sort with inversion counting\<close>
text \<open>
Finally, we can put together all the components and define a variant of merge sort that
counts the number of inversions in the original list:
\<close>
function sort_and_count_inversions :: "'a list \<Rightarrow> 'a list \<times> nat" where
"sort_and_count_inversions xs =
(if length xs \<le> 1 then
(xs, 0)
else
let (xs1, xs2) = split_list xs;
(xs1', m) = sort_and_count_inversions xs1;
(xs2', n) = sort_and_count_inversions xs2
in
(merge_lists xs1' xs2', m + n + inversion_number_between_sorted xs1' xs2'))"
by auto
termination by (relation "measure length") (auto simp: split_list_def Let_def)
lemmas [simp del] = sort_and_count_inversions.simps
text \<open>
The projection of this function to the first component is simply the standard merge sort
algorithm that we defined and proved correct before.
\<close>
theorem fst_sort_and_count_inversions [simp]:
"fst (sort_and_count_inversions xs) = merge_sort xs"
by (induction xs rule: length_induct)
(subst sort_and_count_inversions.simps, subst merge_sort.simps,
simp_all add: split_list_def case_prod_unfold Let_def)
text \<open>
The projection to the second component is the inversion number.
\<close>
theorem snd_sort_and_count_inversions [simp]:
"snd (sort_and_count_inversions xs) = inversion_number xs"
proof (induction xs rule: length_induct)
case (1 xs)
show ?case
proof (cases "length xs \<le> 1")
case False
have "xs = take (length xs div 2) xs @ drop (length xs div 2) xs" by simp
also have "inversion_number \<dots> = snd (sort_and_count_inversions xs)"
by (subst inversion_number_append, subst sort_and_count_inversions.simps)
(use False 1 in \<open>auto simp: Let_def split_list_def case_prod_unfold
inversion_number_between_sorted_correct
cong: inversion_number_between_cong_mset\<close>)
finally show ?thesis ..
qed (auto simp: sort_and_count_inversions.simps)
qed
lemmas (in -) [code_unfold] = snd_sort_and_count_inversions [symmetric]
end
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/List_Inversions/List_Inversions.thy"}
|
import numpy as np
import cv2
import numpy as np
from tqdm import tqdm
from os.path import join
def load_sphere():
cur_dir = os.path.dirname(__file__)
faces = np.loadtxt(join(cur_dir, 'sphere_faces_20.txt'), dtype=np.int)
vertices = np.loadtxt(join(cur_dir, 'sphere_vertices_20.txt'))
return vertices, faces
def load_cylinder():
cur_dir = os.path.dirname(__file__)
faces = np.loadtxt(join(cur_dir, 'cylinder_faces_20.txt'), dtype=np.int)
vertices = np.loadtxt(join(cur_dir, 'cylinder_vertices_20.txt'))
return vertices, faces
def create_point_(points, r=0.01):
""" create sphere
Args:
points (array): (N, 3)/(N, 4)
r (float, optional): radius. Defaults to 0.01.
"""
nPoints = points.shape[0]
vert, face = load_sphere()
nVerts = vert.shape[0]
vert = vert[None, :, :].repeat(points.shape[0], 0)
vert = vert + points[:, None, :]
verts = np.vstack(vert)
face = face[None, :, :].repeat(points.shape[0], 0)
face = face + nVerts * np.arange(nPoints).reshape(nPoints, 1, 1)
faces = np.vstack(face)
return {'vertices': verts, 'faces': faces, 'name': 'points'}
def calRot(axis, direc):
direc = direc/np.linalg.norm(direc)
axis = axis/np.linalg.norm(axis)
rotdir = np.cross(axis, direc)
rotdir = rotdir/np.linalg.norm(rotdir)
rotdir = rotdir * np.arccos(np.dot(direc, axis))
rotmat, _ = cv2.Rodrigues(rotdir)
return rotmat
def create_line_(start, end, r=0.01, col=None):
length = np.linalg.norm(end[:3] - start[:3])
vertices, faces = load_cylinder()
vertices[:, :2] *= r
vertices[:, 2] *= length/2
rotmat = calRot(np.array([0, 0, 1]), end - start)
vertices = vertices @ rotmat.T + (start + end)/2
ret = {'vertices': vertices, 'faces': faces, 'name': 'line'}
if col is not None:
ret['colors'] = col.reshape(-1, 3).repeat(vertices.shape[0], 0)
return ret
def create_ground_(
center=[0, 0, 0], xdir=[1, 0, 0], ydir=[0, 1, 0], # 位置
step=1, xrange=10, yrange=10, # 尺寸
white=[1., 1., 1.], black=[0., 0., 0.], # 颜色
two_sides=True
):
if isinstance(center, list):
center = np.array(center)
xdir = np.array(xdir)
ydir = np.array(ydir)
print('[Vis Info] {}, x: {}, y: {}'.format(center, xdir, ydir))
xdir = xdir * step
ydir = ydir * step
vertls, trils, colls = [],[],[]
cnt = 0
min_x = -xrange if two_sides else 0
min_y = -yrange if two_sides else 0
for i in range(min_x, xrange):
for j in range(min_y, yrange):
point0 = center + i*xdir + j*ydir
point1 = center + (i+1)*xdir + j*ydir
point2 = center + (i+1)*xdir + (j+1)*ydir
point3 = center + (i)*xdir + (j+1)*ydir
if (i%2==0 and j%2==0) or (i%2==1 and j%2==1):
col = white
else:
col = black
vert = np.stack([point0, point1, point2, point3])
col = np.stack([col for _ in range(vert.shape[0])])
tri = np.array([[2, 3, 0], [0, 1, 2]]) + vert.shape[0] * cnt
cnt += 1
vertls.append(vert)
trils.append(tri)
colls.append(col)
vertls = np.vstack(vertls)
trils = np.vstack(trils)
colls = np.vstack(colls)
return {'vertices': vertls, 'faces': trils, 'colors': colls, 'name': 'ground'}
def get_rotation_from_two_directions(direc0, direc1):
direc0 = direc0/np.linalg.norm(direc0)
direc1 = direc1/np.linalg.norm(direc1)
rotdir = np.cross(direc0, direc1)
if np.linalg.norm(rotdir) < 1e-2:
return np.eye(3)
rotdir = rotdir/np.linalg.norm(rotdir)
rotdir = rotdir * np.arccos(np.dot(direc0, direc1))
rotmat, _ = cv2.Rodrigues(rotdir)
return rotmat
PLANE_VERTICES = np.array([
[0., 0., 0.],
[1., 0., 0.],
[0., 0., 1.],
[1., 0., 1.],
[0., 1., 0.],
[1., 1., 0.],
[0., 1., 1.],
[1., 1., 1.]])
PLANE_FACES = np.array([
[4, 7, 5],
[4, 6, 7],
[0, 2, 4],
[2, 6, 4],
[0, 1, 2],
[1, 3, 2],
[1, 5, 7],
[1, 7, 3],
[2, 3, 7],
[2, 7, 6],
[0, 4, 1],
[1, 4, 5]], dtype=np.int32)
def create_plane(normal, center, dx=1, dy=1, dz=0.005, color=[0.8, 0.8, 0.8]):
vertices = PLANE_VERTICES.copy()
vertices[:, 0] = vertices[:, 0]*dx - dx/2
vertices[:, 1] = vertices[:, 1]*dy - dy/2
vertices[:, 2] = vertices[:, 2]*dz - dz/2
# 根据normal计算旋转
rotmat = get_rotation_from_two_directions(
np.array([0, 0, 1]), np.array(normal))
vertices = vertices @ rotmat.T
vertices += np.array(center).reshape(-1, 3)
return {'vertices': vertices, 'faces': PLANE_FACES.copy(), 'name': 'plane'}
def create_cameras(cameras):
vertex = np.array([[0.203982,0.061435,0.00717595],[-0.116019,0.061435,0.00717595],[-0.116019,-0.178565,0.00717595],[0.203982,-0.178565,0.00717595],[0.203982,0.061435,-0.092824],[-0.116019,0.061435,-0.092824],[-0.116019,-0.178565,-0.092824],[0.203982,-0.178565,-0.092824],[0.131154,-0.0361827,0.00717595],[0.131154,-0.0361827,0.092176],[0.122849,-0.015207,0.00717595],[0.122849,-0.015207,0.092176],[0.109589,0.00304419,0.00717595],[0.109589,0.00304419,0.092176],[0.092206,0.0174247,0.00717595],[0.092206,0.0174247,0.092176],[0.071793,0.0270302,0.00717595],[0.071793,0.0270302,0.092176],[0.0496327,0.0312577,0.00717595],[0.0496327,0.0312577,0.092176],[0.0271172,0.0298412,0.00717595],[0.0271172,0.0298412,0.092176],[0.00566135,0.0228697,0.00717595],[0.00566135,0.0228697,0.092176],[-0.0133865,0.0107812,0.00717595],[-0.0133865,0.0107812,0.092176],[-0.02883,-0.0056643,0.00717595],[-0.02883,-0.0056643,0.092176],[-0.0396985,-0.0254336,0.00717595],[-0.0396985,-0.0254336,0.092176],[-0.045309,-0.0472848,0.00717595],[-0.045309,-0.0472848,0.092176],[-0.045309,-0.069845,0.00717595],[-0.045309,-0.069845,0.092176],[-0.0396985,-0.091696,0.00717595],[-0.0396985,-0.091696,0.092176],[-0.02883,-0.111466,0.00717595],[-0.02883,-0.111466,0.092176],[-0.0133865,-0.127911,0.00717595],[-0.0133865,-0.127911,0.092176],[0.00566135,-0.14,0.00717595],[0.00566135,-0.14,0.092176],[0.0271172,-0.146971,0.00717595],[0.0271172,-0.146971,0.092176],[0.0496327,-0.148388,0.00717595],[0.0496327,-0.148388,0.092176],[0.071793,-0.14416,0.00717595],[0.071793,-0.14416,0.092176],[0.092206,-0.134554,0.00717595],[0.092206,-0.134554,0.092176],[0.109589,-0.120174,0.00717595],[0.109589,-0.120174,0.092176],[0.122849,-0.101923,0.00717595],[0.122849,-0.101923,0.092176],[0.131154,-0.080947,0.00717595],[0.131154,-0.080947,0.092176],[0.133982,-0.058565,0.00717595],[0.133982,-0.058565,0.092176],[-0.0074325,0.061435,-0.0372285],[-0.0074325,0.074435,-0.0372285],[-0.0115845,0.061435,-0.0319846],[-0.0115845,0.074435,-0.0319846],[-0.018215,0.061435,-0.0274218],[-0.018215,0.074435,-0.0274218],[-0.0269065,0.061435,-0.0238267],[-0.0269065,0.074435,-0.0238267],[-0.0371125,0.061435,-0.0214253],[-0.0371125,0.074435,-0.0214253],[-0.048193,0.061435,-0.0203685],[-0.048193,0.074435,-0.0203685],[-0.0594505,0.061435,-0.0207226],[-0.0594505,0.074435,-0.0207226],[-0.0701785,0.061435,-0.0224655],[-0.0701785,0.074435,-0.0224655],[-0.0797025,0.061435,-0.0254875],[-0.0797025,0.074435,-0.0254875],[-0.0874245,0.061435,-0.0295989],[-0.0874245,0.074435,-0.0295989],[-0.0928585,0.061435,-0.0345412],[-0.0928585,0.074435,-0.0345412],[-0.0956635,0.061435,-0.040004],[-0.0956635,0.074435,-0.040004],[-0.0956635,0.061435,-0.045644],[-0.0956635,0.074435,-0.045644],[-0.0928585,0.061435,-0.051107],[-0.0928585,0.074435,-0.051107],[-0.0874245,0.061435,-0.056049],[-0.0874245,0.074435,-0.056049],[-0.0797025,0.061435,-0.0601605],[-0.0797025,0.074435,-0.0601605],[-0.0701785,0.061435,-0.0631825],[-0.0701785,0.074435,-0.0631825],[-0.0594505,0.061435,-0.0649255],[-0.0594505,0.074435,-0.0649255],[-0.048193,0.061435,-0.0652795],[-0.048193,0.074435,-0.0652795],[-0.0371125,0.061435,-0.064223],[-0.0371125,0.074435,-0.064223],[-0.0269065,0.061435,-0.0618215],[-0.0269065,0.074435,-0.0618215],[-0.018215,0.061435,-0.0582265],[-0.018215,0.074435,-0.0582265],[-0.0115845,0.061435,-0.0536635],[-0.0115845,0.074435,-0.0536635],[-0.0074325,0.061435,-0.0484195],[-0.0074325,0.074435,-0.0484195],[-0.0060185,0.061435,-0.0428241],[-0.0060185,0.074435,-0.0428241]])*0.5
tri = [[4,3,2],[1,4,2],[6,1,2],[6,5,1],[8,4,1],[5,8,1],[3,7,2],[7,6,2],[4,7,3],[8,7,4],[6,7,5],[7,8,5],[43,42,44],[42,43,41],[43,46,45],[46,43,44],[58,9,57],[9,58,10],[55,58,57],[56,58,55],[53,54,55],[54,56,55],[12,11,9],[12,9,10],[21,20,22],[20,21,19],[34,33,32],[32,33,31],[35,36,37],[37,36,38],[33,36,35],[36,33,34],[29,30,31],[30,32,31],[40,39,37],[40,37,38],[39,40,41],[40,42,41],[47,48,49],[49,48,50],[48,47,45],[46,48,45],[49,52,51],[52,49,50],[52,53,51],[52,54,53],[14,15,13],[15,14,16],[11,14,13],[12,14,11],[18,17,15],[18,15,16],[17,18,19],[18,20,19],[27,35,37],[17,27,15],[27,53,55],[27,49,51],[11,27,9],[27,47,49],[27,33,35],[23,27,21],[27,39,41],[27,55,57],[9,27,57],[15,27,13],[39,27,37],[47,27,45],[53,27,51],[27,11,13],[43,27,41],[27,29,31],[27,43,45],[27,17,19],[21,27,19],[33,27,31],[27,23,25],[23,24,25],[25,24,26],[24,21,22],[24,23,21],[28,36,34],[42,28,44],[28,58,56],[54,28,56],[52,28,54],[28,34,32],[28,46,44],[18,28,20],[20,28,22],[30,28,32],[40,28,42],[58,28,10],[28,48,46],[28,12,10],[28,14,12],[36,28,38],[28,24,22],[28,40,38],[48,28,50],[28,52,50],[14,28,16],[28,18,16],[24,28,26],[28,27,25],[28,25,26],[28,30,29],[27,28,29],[108,59,60],[59,108,107],[62,59,61],[59,62,60],[103,102,101],[102,103,104],[64,61,63],[64,62,61],[70,67,69],[67,70,68],[70,71,72],[71,70,69],[83,84,82],[83,82,81],[86,85,87],[86,87,88],[86,83,85],[83,86,84],[77,78,75],[75,78,76],[105,106,103],[103,106,104],[108,106,107],[106,105,107],[97,96,95],[96,97,98],[96,93,95],[93,96,94],[93,92,91],[92,93,94],[79,105,103],[59,79,61],[79,93,91],[83,79,85],[85,79,87],[61,79,63],[79,103,101],[65,79,67],[79,99,97],[89,79,91],[79,77,75],[79,59,107],[67,79,69],[79,89,87],[79,73,71],[105,79,107],[79,97,95],[79,71,69],[79,83,81],[99,79,101],[93,79,95],[79,65,63],[73,79,75],[99,100,97],[97,100,98],[102,100,101],[100,99,101],[89,90,87],[87,90,88],[90,89,91],[92,90,91],[66,67,68],[66,65,67],[66,64,63],[65,66,63],[74,75,76],[74,73,75],[71,74,72],[73,74,71],[80,106,108],[74,80,72],[86,80,84],[84,80,82],[64,80,62],[80,108,60],[80,100,102],[62,80,60],[66,80,64],[80,70,72],[80,102,104],[96,80,94],[80,90,92],[70,80,68],[80,86,88],[78,80,76],[106,80,104],[80,96,98],[80,92,94],[100,80,98],[90,80,88],[80,66,68],[80,74,76],[82,80,81],[80,79,81],[80,78,77],[79,80,77]]
tri = [a[::-1] for a in tri]
triangles = np.array(tri) - 1
meshes = []
for nv, (key, camera) in enumerate(cameras.items()):
vertices = (camera['R'].T @ (vertex.T - camera['T'])).T
meshes.append({
'vertices': vertices, 'faces': triangles, 'name': 'camera_{}'.format(nv), 'vid': nv
})
return meshes
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
def create_cameras_texture(cameras, imgnames, scale=5e-3):
import trimesh
import pyrender
from PIL import Image
from os.path import join
cam_path = join(current_dir, 'objs', 'background.obj')
meshes = []
for nv, (key, camera) in enumerate(tqdm(cameras.items(), desc='loading images')):
cam_trimesh = trimesh.load(cam_path, process=False)
vert = np.asarray(cam_trimesh.vertices)
K, R, T = camera['K'], camera['R'], camera['T']
img = Image.open(imgnames[nv])
height, width = img.height, img.width
vert[:, 0] *= width
vert[:, 1] *= height
vert[:, 2] *= 0
vert[:, 0] -= vert[:, 0]*0.5
vert[:, 1] -= vert[:, 1]*0.5
vert[:, 1] = - vert[:, 1]
vert[:, :2] *= scale
# vert[:, 2] = 1
cam_trimesh.vertices = (vert - T.T) @ R
cam_trimesh.visual.material.image = img
cam_mesh = pyrender.Mesh.from_trimesh(cam_trimesh, smooth=True)
meshes.append(cam_mesh)
return meshes
def create_mesh_pyrender(vert, faces, col):
import trimesh
import pyrender
mesh = trimesh.Trimesh(vert, faces, process=False)
material = pyrender.MetallicRoughnessMaterial(
metallicFactor=0.0,
alphaMode='OPAQUE',
baseColorFactor=col)
mesh = pyrender.Mesh.from_trimesh(
mesh,
material=material)
return mesh
|
{"hexsha": "d632b890f66d7b559b0acbfbb84550409831f527", "size": 12311, "ext": "py", "lang": "Python", "max_stars_repo_path": "vis/lib/geometry/geometry.py", "max_stars_repo_name": "CvHadesSun/3D-Tools", "max_stars_repo_head_hexsha": "b852bf84710fed0b5a01fa71a1ab371ec93392df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-20T10:06:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T09:22:39.000Z", "max_issues_repo_path": "vis/lib/geometry/geometry.py", "max_issues_repo_name": "CvHadesSun/3D-Tools", "max_issues_repo_head_hexsha": "b852bf84710fed0b5a01fa71a1ab371ec93392df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vis/lib/geometry/geometry.py", "max_forks_repo_name": "CvHadesSun/3D-Tools", "max_forks_repo_head_hexsha": "b852bf84710fed0b5a01fa71a1ab371ec93392df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.555, "max_line_length": 3498, "alphanum_fraction": 0.6045000406, "include": true, "reason": "import numpy", "num_tokens": 5184}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def calc_dihedral(b1, b2, b3):
'''Refer to the formula on wikipedia: https://en.wikipedia.org/wiki/Dihedral_angle#cite_note-3
for details.
It returns angles in a unit of degree.
b array has to be a 2D-array in order to support vectorization.
>>> b = np.array([ [0.3, 0.5, 0.3] ])
b array explained:
- root axis (axis 0): instances of b1 (or b2 or b3)
- primary axisx (axis 1): XYZ coordinates
'''
b12 = np.cross(b1, b2, axis = 1)
b23 = np.cross(b2, b3, axis = 1)
b1213 = np.cross(b12, b23, axis = 1)
b2_norm = b2 / np.linalg.norm(b2, axis = 1, keepdims = True)
v1 = np.sum( b1213 * b2_norm, axis = 1 )
v2 = np.sum( b12 * b23, axis =1 )
return np.arctan2(v1, v2) / np.pi * 180.0
|
{"hexsha": "95091716e3df67838d66cf7bb98b491f0e75533a", "size": 863, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyrotein/angle.py", "max_stars_repo_name": "carbonscott/pyrotein", "max_stars_repo_head_hexsha": "4c41eade0d014e70aadf9f9c475cbc4255a0a32e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-05T21:09:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T21:09:23.000Z", "max_issues_repo_path": "pyrotein/angle.py", "max_issues_repo_name": "carbonscott/pyrotein", "max_issues_repo_head_hexsha": "4c41eade0d014e70aadf9f9c475cbc4255a0a32e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyrotein/angle.py", "max_forks_repo_name": "carbonscott/pyrotein", "max_forks_repo_head_hexsha": "4c41eade0d014e70aadf9f9c475cbc4255a0a32e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8214285714, "max_line_length": 98, "alphanum_fraction": 0.5689455388, "include": true, "reason": "import numpy", "num_tokens": 297}
|
import numpy as np
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower, ArrayCatalog
from nbodykit.source.mesh.field import FieldMesh
from nbodykit.transform import HaloRadius, HaloVelocityDispersion
from nbodykit.cosmology.cosmology import Cosmology
from time import time
import os, sys
import yaml, re
#
sys.path.append('../utils/')
import hod
#Get parameter file
cfname = sys.argv[1]
with open(cfname, 'r') as ymlfile:
args = yaml.load(ymlfile, Loader=yaml.FullLoader)
nc = args['nc']
bs = args['bs']
alist = args['alist']
#
#
#Global, fixed things
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
cosmo = Cosmology.from_dict(cosmodef)
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
if rank == 0: print(args, flush=True)
def read_conversions(db):
"""Read the conversion factors we need and check we have the right time."""
mpart,Lbox,rsdfac,acheck = None,None,None,None
with open(db+"/attr-v2","r") as ff:
for line in ff.readlines():
mm = re.search("MassTable.*\#HUMANE\s+\[\s*0\s+(\d*\.\d*)\s*0+\s+0\s+0\s+0\s+\]",line)
if mm != None:
mpart = float(mm.group(1)) * 1e10
mm = re.search("BoxSize.*\#HUMANE\s+\[\s*(\d+)\s*\]",line)
if mm != None:
Lbox = float(mm.group(1))
mm = re.search("RSDFactor.*\#HUMANE\s+\[\s*(\d*\.\d*)\s*\]",line)
if mm != None:
rsdfac = float(mm.group(1))
mm = re.search("ScalingFactor.*\#HUMANE\s+\[\s*(\d*\.\d*)\s*\]",line)
if mm != None:
acheck = float(mm.group(1))
if (mpart is None)|(Lbox is None)|(rsdfac is None)|(acheck is None):
print(mpart,Lbox,rsdfac,acheck, flush=True)
raise RuntimeError("Unable to get conversions from attr-v2.")
return mpart, Lbox, rsdfac, acheck
#
def make_galcat(aa, mmin, m1f, alpha=-1, censuff=None, satsuff=None, ofolder=None, seed=3333):
'''Assign 0s to
'''
zz = 1/aa-1
halocat = BigFileCatalog(args['halofilez']%aa, dataset=args['halodataset'])
rank = halocat.comm.rank
mpart, Lbox, rsdfac, acheck = read_conversions(args['headerfilez']%aa)
halocat.attrs['BoxSize'] = [bs, bs, bs]
halocat.attrs['NC'] = nc
ghid = halocat.Index.compute()
halocat['GlobalIndex'] = ghid
halocat['Mass'] = halocat['Length'] * mpart
halocat['Position'] = halocat['Position']%bs # Wrapping positions assuming periodic boundary conditions
rank = halocat.comm.rank
halocat = halocat.to_subvolumes()
if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%zz, flush=True)
hmass = halocat['Mass'].compute()
hpos = halocat['Position'].compute()
hvel = halocat['Velocity'].compute()
rvir = HaloRadius(hmass, cosmo, 1/aa-1).compute()/aa
vdisp = HaloVelocityDispersion(hmass, cosmo, 1/aa-1).compute()
ghid = halocat['GlobalIndex'].compute()
print('In rank = %d, Catalog size = '%rank, hmass.size, flush=True)
#Do hod
start = time()
ncen = np.ones_like(hmass)
nsat = hod.nsat_martin(msat = mmin, mh=hmass, m1f=m1f, alpha=alpha).astype(int)
#Centrals
cpos, cvel, gchid, chid = hpos, hvel, ghid, np.arange(ncen.size)
spos, svel, shid = hod.mksat(nsat, pos=hpos, vel=hvel,
vdisp=vdisp, conc=7, rvir=rvir, vsat=0.5, seed=seed)
gshid = ghid[shid]
svelh1 = svel*2/3 + cvel[shid]/3.
smmax = hmass[shid]/10.
smmin = np.ones_like(smmax)*mmin
mask = smmin > smmax/3. #If Mmin and Mmax are too close for satellites, adjust Mmin
smmin[mask] = smmax[mask]/3.
smass = hod.get_msat(hmass[shid], smmax, smmin, alpha)
sathmass = np.zeros_like(hmass)
tot = np.bincount(shid, smass)
sathmass[np.unique(shid)] = tot
cmass = hmass - sathmass # assign remaining mass in centrals
print('In rank = %d, Time taken = '%rank, time()-start, flush=True)
print('In rank = %d, Number of centrals & satellites = '%rank, ncen.sum(), nsat.sum(), flush=True)
print('In rank = %d, Satellite occupancy: Max and mean = '%rank, nsat.max(), nsat.mean(), flush=True)
#
#Save
cencat = ArrayCatalog({'Position':cpos, 'Velocity':cvel, 'Mass':cmass, 'GlobalID':gchid,
'Nsat':nsat, 'HaloMass':hmass},
BoxSize=halocat.attrs['BoxSize'], Nmesh=halocat.attrs['NC'])
minid, maxid = cencat['GlobalID'].compute().min(), cencat['GlobalID'].compute().max()
if minid < 0 or maxid < 0:
print('before ', rank, minid, maxid, flush=True)
cencat = cencat.sort('GlobalID')
minid, maxid = cencat['GlobalID'].compute().min(), cencat['GlobalID'].compute().max()
if minid < 0 or maxid < 0:
print('after ', rank, minid, maxid, flush=True)
if censuff is not None:
colsave = [cols for cols in cencat.columns]
cencat.save(ofolder+'cencat'+censuff, colsave)
satcat = ArrayCatalog({'Position':spos, 'Velocity':svel, 'Velocity_HI':svelh1, 'Mass':smass,
'GlobalID':gshid, 'HaloMass':hmass[shid]},
BoxSize=halocat.attrs['BoxSize'], Nmesh=halocat.attrs['NC'])
minid, maxid = satcat['GlobalID'].compute().min(), satcat['GlobalID'].compute().max()
if minid < 0 or maxid < 0:
print('before ', rank, minid, maxid, flush=True)
satcat = satcat.sort('GlobalID')
minid, maxid = satcat['GlobalID'].compute().min(), satcat['GlobalID'].compute().max()
if minid < 0 or maxid < 0:
print('after ', rank, minid, maxid, flush=True)
if satsuff is not None:
colsave = [cols for cols in satcat.columns]
satcat.save(ofolder+'satcat'+satsuff, colsave)
if __name__=="__main__":
for aa in alist[:]:
#Parameters for populating with satellites
#sat hod : N = (M_h/m1)**alpha
mmin = 1e9*( 1.8 + 15*(3*aa)**8 ) * 0.1 #mcut * 0.1, 0.1 being mmin
alpha = -0.8
for m1fac in [0.03]:
censuff ='' #suffix for central catalog
satsuff ='' #suffix for satellite catalog
make_galcat(aa=aa, mmin=mmin, m1f=m1fac, alpha=alpha, censuff=censuff, satsuff=satsuff, ofolder=args['outfolder']%aa)
sys.exit(0)
|
{"hexsha": "99364bc67b016d8e8256f9b9c47f5429f23147ec", "size": 6348, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/hodgal.py", "max_stars_repo_name": "farnikn/MithraLIMSims", "max_stars_repo_head_hexsha": "6b11448b8859519edf9733b2e22bc5569356942f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/hodgal.py", "max_issues_repo_name": "farnikn/MithraLIMSims", "max_issues_repo_head_hexsha": "6b11448b8859519edf9733b2e22bc5569356942f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/hodgal.py", "max_forks_repo_name": "farnikn/MithraLIMSims", "max_forks_repo_head_hexsha": "6b11448b8859519edf9733b2e22bc5569356942f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7857142857, "max_line_length": 129, "alphanum_fraction": 0.6069628229, "include": true, "reason": "import numpy", "num_tokens": 2046}
|
\SecDef{symmetries}{Rotational Invariants in NORX}
In this section I describe rotational symmetries in the permutation $F$ of NORX. They exist both on the word level (inherited from $G$) and on the state level (structural).
\subsection{State Invariants}
We can see a 4x4 NORX state $S$ as a list of 4 columns: $S = (c_1, c_2, c_3, c_4)$.
\begin{definition}[Columns Rotation]
For an integer $n$ denote by $R_n$ the function rotating of the columns left by $n$ positions. For example $R_1(c_1, c_2, c_3, c_4) = (c_2, c_3, c_4, c_1)$ for arbitrary $c_1, c_2, c_3, c_4 \in (\field{w})^4$.
\end{definition}
The following proposition shows that the permutation $F$ is column rotation-symmetric.
\begin{proposition}
The permutations $R_n$ and $F^l$ commute for any integers $n$ and $l \ge 1$:
$$
F^l \circ R_n = R_n \circ F^l.
$$
\end{proposition}
\begin{proof}
Clearly, the rotation of columns does not affect the column step $\col$, since it transforms each column separately: $\col \circ R_n = R_n \circ \col$. Such rotations do not break the diagonals as well, because the diagonals are simply reordered. Therefore, $\diag \circ R_n = R_n \circ \diag$. It follows that $F$ commutes with $R_n$ and thus $F^l$ commutes with $R_n$ too.
\end{proof}
\begin{definition}
A state $s \in (\field{w})^{16}$ is said to be \emph{column $n$-rotation invariant} if
$$
R_n(s) = s.
$$
\end{definition}
Let $s \in (\field{w})^{16}$ be a column $n$-rotation invariant state for a fixed positive integer $n$. Observe that
$$
R_n(F(s)) = F(R_n(s)) = F(s),
$$
i.e. $F(s)$ is also column $n$-rotation invariant. It follows that the property of a state being column $n$-rotation invariant is an invariant of the round function $F$. It is easy to see that this invariant corresponds to an invariant subspace.
\begin{proposition}
For a fixed integer $n, 1 \le n \le 3$, the set of all column $n$-rotation invariant states is a linear subspace of $(\field{w})^{16}$.
For $n = 1$ or $n = 3$ this is the same subspace of dimension $4w$,
for $n = 2$ the invariant subspace has dimension $8w$.
\end{proposition}
\begin{proof}
If $n=1$ or $n=3$, then for any $c_1, c_2, c_3, c_4 \in (\field{w})^4$
$$
(c_1, c_2, c_3, c_4) = (c_2, c_3, c_4, c_1)
$$
and it follows that all columns are equal: $c_1 = c_2 = c_3 = c_4$. There are $2^{4w}$ out of $2^{16w}$ such states. The designers of NORX noted these states in~\cite{DBLP:conf/latincrypt/AumassonJN14}. A constraint $c_i = c_j$ consists of $4w$ linear equations $c_{i,y,x} \oplus c_{j,y,x} = 0$, where $1 \le y \le 4, 1 \le x \le w$. Therefore, these constraints define a linear subspace of dimension $16w - 3\cdot 4w = 4w$.
If $n = 2$, then for any $c_1, c_2, c_3, c_4 \in (\field{w})^4$ $$
(c_1, c_2, c_3, c_4) = (c_3, c_4, c_1, c_2)
$$
and it follows that the two pairs of columns are equal: $c_1 = c_3$ and $c_2 = c_4$. There are $2^{8w}$ out of $2^{16w}$ such states. Similarly, these constraints define a linear subspace of dimension $8w$.
\end{proof}
Hitting such a special state even for the case $n=2$ is not easy under the NORX security claims. However, $2^{8w}$ is a more serious fraction of states than the $2^{4w}$ weak states which were known to the designers. To illustrate possible dangers of such properties, I refer to the forgery attack~\cite{NORXfse} on the previous version of NORX exploiting this invariant, and I also describe two hypothetical attacks on NORX8~\cite{aumasson2015norx8}, a NORX version with 8-bit words for low-end devices. I remark that NORX8 is not a part of the CAESAR submission.
The fist attack shows a weak-key set, which could be exploited if the domain separation constants were rotation-invariant. The weak-key set is relatively small, $2^{32}$ keys out of $2^{80}$. The second attack is a state/key recovery attack in a known plaintext scenario. It succeeds with probability $2^{-64}$ for each two consequent known-plaintext blocks, and the total time complexity is $2^{72}$ to recover an 80-bit key. Note that the designers restrict the data per single key to $2^{24}$ message blocks, therefore, the attack can break a concrete key with probability only $2^{-40}$.
Both attacks are independent of the number of rounds $l$ used in the permutation.
\subsection{Hypothetical Weak-key Attack on NORX8 Initialization}
The initial state of NORX8 is given by
\begin{equation}
\begin{pmatrix}
n_1 & n_2 & n_3 & n_4 \\
k_1 & k_2 & k_3 & k_4 \\
k_5 & k_6 & k_7 & k_8 \\
k_9\oplus w & k_{10}\oplus l & u_{15}\oplus p & u_{16}\oplus t \\
\end{pmatrix} \in (\field{8})^{16},
\end{equation}
where $n_i$ and $k_i$ denote bytes of the nonce and the key respectively, $u_i$ are constants and $w,l,p,t$ are constants encoding parameters of NORX. It is possible to construct valid initial states with two equal halves, i.e. a column 2-rotation invariant state. Indeed, let us fix the four key bytes $(k_3, k_4, k_7, k_8)$ arbitrarily and let us choose the two nonce bytes $(n_3, n_4)$ arbitrarily. Then we can set the left half of the state equal to the right half, i.e.
\eq{
(n_1, n_2) &= (n_3, n_4),\\
(k_1, k_2) &= (k_3, k_4),\\
(k_5, k_6) &= (k_7, k_8),\\
(k_9, k_{10}) &= (u_{14} \oplus p \oplus w, u_{15} \oplus t \oplus l).
}
There are $2^{32}$ weak keys out of $2^{80}$ and $2^{16}$ nonces that result in such a weak state. The column 2-rotation invariant of such state is preserved through arbitrary number of rounds of $F$. However, after the first $F^l$ rounds the domain separation constant will be added to the last word of the state (see~\FigRef{sponge2}). This constant is not column 2-rotation invariant and therefore it will break the property. Therefore, we consider a slightly modified version of NORX8 where the domain separation constant is column 2-rotation invariant. For example, the original constant may be added not only to the last word, but to all words of the state or to all words in the last row. In such case the invariant is preserved through the next $F^l$ rounds and the rate part of the state is then observed by an adversary. This leads to a simple distinguisher: the adversary simply compares the left and right halves of the exposed part of the state. In NORX8 the rate part consists of only 5 bytes. It allows to check only the topmost 4 words with error probability $2^{-16}$. By using a few more encryptions with another weak nonces the error probability can be decreased to negligible.
I remark that the weak key space is very small and the attack requires symmetric domain separation constants. On the other hand, it is powerful in that it is independent of the number of rounds. The attack illustrates possible dangers of having such strong invariants in the permutation.
\subsection{State Recovery Attack on NORX8}
The column $2$-rotation invariant can be used to mount a state/key recovery attack on NORX8, though exceeding the data usage limit defined by the designers.
\begin{figure}[htbp]
\centering
\includegraphics[height=2.8cm]{\PathFig{layout.png}}
\FigDef{sponge2}{The NORX v2.0 AE scheme with parallelization parameter $p = 1$. NORX8 and NORX16 follow this scheme. (credits: NORX specification~\cite{NORX})}
\end{figure}
Assume that we have a two-block known-plaintext message. That is, we know the rate part before and after a call to the NORX8 core permutation $F^l$. Denote the input rate part by $a$ and the output rate part by $b$. Recall that the rate in NORX8 is 40 bits, which is five 8-bit words. With probability $2^{-16}$ we will observe $a_1 = a_3, a_2 = a_4$. Then there are two cases:
\begin{enumerate}
\item The whole state is column rotation-2 invariant. The probability of this is equal to $2^{-6\cdot8}=2^{-48}$, given the observed rate part. Indeed, a uniformly random state is column 2-rotation invariant with probability $2^{-64}$. In this case the output state will be also column rotation-2 invariant with probability 1 and we will observe $b_1 = b_3, b_2 = b_4$.
\item The whole state is not column rotation-2 invariant. Then with probability $2^{-16}$ we will observe $b_1 = b_3, b_2 = b_4$ as a false positive.
\end{enumerate}
As a result, when we observe both $a_1 = a_3, a_2 = a_4$ and $b_1 = b_3, b_2 = b_4$, the probability of the state being column rotation-2 invariant is equal to $2^{-32}$ and in the other cases it is a false positive. In the first case the state before the call to $F^l$ contains 5 unknown words $x_1,\ldots,x_5 \in \field{8}$:
$$
\begin{pmatrix}
a_1 & a_2 & a_3=a_1 & a_4=a_2 \\
a_5 & x_1 & a_5 & x_1 \\
x_2 & x_3 & x_2 & x_3 \\
x_4 & x_5 & x_4 & x_5 \\
\end{pmatrix}.
$$
We can exhaustively check all $2^{40}$ possibilities for $x_1, \ldots, x_5$ by encrypting through $F^l$ and obtaining extra filter with probability $2^{-24}$ from $b$. The remaining $2^{16}$ candidates can be checked by decrypting the state up to the initial state and matching the constants and further verifying the tag.
As a result, with probability $2^{-64}$ two consequent known-plaintext blocks allow to recover the full state and the secret key. The initial filter has strength $2^{-32}$ and the time complexity of checking a block pair is $2^{40}$. Note that the designers set a limit to $2^{24}$ data, therefore the attack succeeds for a concrete key only with probability around $2^{-40}$.
\subsection{Word Invariants}
\newcommand\rr{\mathbf{r}}
A similar rotational symmetry exists on the word level too.
Let $G'$ be the permutation of $(\field{w})^4$ to itself obtained from $G$ by replacing the four left shift operations by left rotations.
\begin{proposition}
\PropLabel{GprimeG}
$G' = G$ is conditioned by 4 bit equations, where each equation holds with probability $3/4$.
\end{proposition}
\begin{proof}
The left shift by one inserts a zero in the least significant bit of the result. If the most significant bit of the input is equal to 0, then the left shift is equivalent to the left rotation. There are 4 left shifts in $G$, each yields such bit equation. The input of a left shift in $G$ is simply an AND of two state bits, which are uniformly distributed.
\end{proof}
\begin{observation}
Experimentally, it is observed that $\Pr[G' = G]$ is close to $2^{-1.82}$, where the input is sampled uniformly at random, for all word sizes $w \in \{8,16,32,64\}$.
\end{observation}
Note that this observation shows the effect of dependency of the four quarter-steps in $G$. The probability that all these bits are equal to zero can be estimated as $(3/4)^4 \approx 2^{-1.66}$. However, the actual probability is lower due to the dependency of the equations.
\begin{definition}
Let $r_n\colon \field{w} \to \field{w}$ be the mapping which rotates a word left by $n$ bits and let $\rr_n \colon (\field{w})^4 \to (\field{w})^4$ be defined as
$$
\rr_n(a,b,c,d) \eqdef (r_n(a), r_n(b), r_n(c), r_n(d)).
$$
\end{definition}
\begin{proposition}
\PropLabel{Gcommute}
For any integer $n, 1 \le n < w$, $\rr_n$ commutes with $G'$:
$$
G' \circ \rr_n = \rr_n \circ G'.
$$
Furthermore, $\rr_n$ commutes with $G$ conditioned by 8 bit equations, each holding with probability $3/4$.
\end{proposition}
\begin{proof}
First, it is easy to verify that all operations in $G'$ commute with $\rr_n$. For a binary operation to commute it is required that $\rr_n$ applied to both inputs is equivalent to $\rr_n$ applied to the output.
The second claim follows by applying \PropRef{GprimeG} to the equation $G' \circ \rr_n = \rr_n \circ G'$ two times.
\end{proof}
\begin{observation}
Experimentally, it is observed that $\Pr[G' \circ \rr_n = \rr_n \circ G']$ varies from $2^{-3.84}$ to $2^{-3.59}$ depending on the word size and rotation amount $n$. The rotation amounts corresponding to the smallest probabilities are $1$ and $w-1$.
\end{observation}
Similarly to the column $n$-rotation invariant, define the word $n$-rotation invariant.
\begin{definition}
A columns $c \in (\field{w})^4$ (resp. a state $s \in (\field{w})^{16}$) is said to be word $n$-rotation invariant if for each its word $c_i$ (resp. $s_i$) the following holds:
$$
r_n(c_i) = c_i~~(\text{resp.}~r_n(s_i) = s_i).
$$
\end{definition}
\begin{proposition}
The set of all word $n$-rotation invariant states is a linear subspace of dimension $16\cdot gcd(n, w)$.
\end{proposition}
\begin{proof}
It is easy to see that a word $v \in \field{w}$ is word $n$-rotation invariant if and only if it is made of $w/gcd(n,w)$ copies of the same vector $u \in \field{gcd(n,w)}$. Clearly, all such words form a linear subspace of $\field{w}$ of dimension $gcd(n,w)$. As there are 16 words in the state, the proposition follows.
\end{proof}
Note that the property of a state or column being invariant requires only one approximation of $G$ by $G'$, i.e. it is approximately twice as more probable than the commutation.
\begin{proposition}
Let $c \in (\field{w})^4$ be a word $n$-rotation invariant column. Then
$$
\Pr[\rr_n(G(c)) = G(c)] \ge \Pr[G(c) = G'(c)],
$$
where the probabilities are taken over $c$ sampled uniformly at random from the set of all word $n$-rotation invariant columns.
\end{proposition}
\begin{proof}
Consider the following equation:
$$
\rr_n(G(c)) \approx \rr_n(G'(c)) = G'(\rr_n(c)) = G'(c) \approx G(c).
$$
The two approximations are applied to the same input: $G(c) \approx G'(c)$, therefore the equation holds with probability at least $\Pr[G(c) = G'(c)]$.
\end{proof}
Experimentally, no difference is observed on $\Pr[G(c) = G'(c)]$ when $c$ is sampled uniformly at random from $(\field{w})^4$ and when it is sampled uniformly at random from the set of all word $n$-rotation invariant columns. Therefore, it can be expected that a word $n$-rotation invariant is preserved through $F$ with a probability approximately $(2^{-1.82})^8 = 2^{-14.56}$. The commutation of $F$ and the $n$-rotation of each word can be expected to happen with probability approximately $(2^{-3.59})^8 = 2^{-28.72}$ if $1 < n < w-1$.
It is worth noting that the word $n$-rotation invariants can be seen as probabilistic invariant subspaces of $F$.
\subsection{Hypothetical Attack on NORX128 v2.0}
As the probability of $\rr_n$ commuting with $G'$ does not seem to depend on the word size, the distinguishing property is stronger for instances with larger words and key size. I consider an existential forgery attack similar to the one proposed in~\cite{NORXfse}. Similarly, I consider NORX v2.0 since NORX v3.0 breaks the attack by injecting the key in the finalization stage.
\newcommand\rrr{\mathbf{r}}
Consider the forgery attack scenario. The finalization stage of NORX consists of 8 iterations of $F$.
Let us assume that the words in the rate part of the state before the finalization are $w/2$-rotation invariant. This happens with probability $2^{-2w}$. Then we can attempt a forgery by rotating each word in the last ciphertext block by $w/2$. Then, with probability approximately $(2^{-3.59})^{64} = 2^{-229.76}$ we expect the rotation to commute with the finalization:
$$
F^8(\rrr_n(s)) = \rrr_n(F^8(s)),
$$
where $s$ is the state before the finalization stage. Since the tag is obtained by truncating the final state and we have observed the tag in the first encryption, we can expect the new tag to be equal to the word $w/2$-rotated version of the original tag.
For NORX64, the probability of the rate to be $32$-rotation invariant is equal to $2^{-128}$. Unfortunately, the attack's success probability then is worse than for a generic attack (i.e. $2^{-256}$). For this reason, I suggest to increase the word size even more and to consider NORX128, a generalization of NORX64 by increasing the word size to 128 bits. In this hypothetical cipher, the full attack success probability is approximately $2^{-256}\cdot 2^{-229.76} < 2^{-512}$, i.e. it is better than a generic attack.
This attack on the hypothetical instance of NORX shows the possibility of exploiting the word-level symmetries as well. The attack does not apply directly to main instances of NORX.
|
{"hexsha": "93d567652c6c5990671c1ff8a7911d1ba1f02d20", "size": 15994, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "thesis-source/9niNORX/3symmetries.tex", "max_stars_repo_name": "hellman/thesis", "max_stars_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-05-16T19:55:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:36:12.000Z", "max_issues_repo_path": "thesis-source/9niNORX/3symmetries.tex", "max_issues_repo_name": "hellman/thesis", "max_issues_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-09T11:26:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T11:26:45.000Z", "max_forks_repo_path": "thesis-source/9niNORX/3symmetries.tex", "max_forks_repo_name": "hellman/thesis", "max_forks_repo_head_hexsha": "6ba1c2b241e63c07cf76108481c1b67f21a50f12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-05T19:40:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-05T19:40:16.000Z", "avg_line_length": 75.8009478673, "max_line_length": 1196, "alphanum_fraction": 0.7238964612, "num_tokens": 4723}
|
"""
runabc(ABCsetup::ABCtype, targetdata; progress = false)
Run ABC with ABCsetup defining the algotrithm and inputs to algorithm, targetdata is the data we wish to fit the model to and will be used as an input for the simulation function defined in ABCsetup. If progress is set to `true` a progress meter will be shown.
"""
function runabc(ABCsetup::ABCRejection, targetdata; progress = false, verbose = false)
#initalize array of particles
particles = Array{ParticleRejection}(ABCsetup.nparticles)
particlesall = Array{ParticleRejection}(ABCsetup.maxiterations)
i = 1 #set particle indicator to 1
its = 0 #keep track of number of iterations
distvec = zeros(Float64, ABCsetup.nparticles) #store distances in an array
if progress == true
p = Progress(ABCsetup.nparticles, 1, "Running ABC rejection algorithm...", 30)
end
while (i < (ABCsetup.nparticles + 1)) & (its < ABCsetup.maxiterations)
its += 1
#get new proposal parameters
newparams = getproposal(ABCsetup.prior, ABCsetup.nparams)
#simulate with new parameters
dist, out = ABCsetup.simfunc(newparams, ABCsetup.constants, targetdata)
#keep track of all particles incase we don't reach nparticles with dist < ϵ
particlesall[its] = ParticleRejection(newparams, dist, out)
#if simulated data is less than target tolerance accept particle
if dist < ABCsetup.ϵ
particles[i] = ParticleRejection(newparams, dist, out)
distvec[i] = dist
i +=1
if progress == true
next!(p)
end
end
end
if i < ABCsetup.nparticles
warn("Only accepted $(i-1) particles with ϵ < $(ABCsetup.ϵ). \n\tDecrease ϵ or increase maxiterations if you require this. \n\t Resorting to taking the $(ABCsetup.nparticles) particles with smallest distance")
d = map(p -> p.distance, particlesall)
particles = particlesall[sortperm(d)[1:ABCsetup.nparticles]]
distvec = map(p -> p.distance, particles)
end
out = ABCrejectionresults(particles, its, ABCsetup, distvec)
return out
end
function runabc(ABCsetup::ABCRejectionModel, targetdata; progress = false, verbose = false)
ABCsetup.nmodels > 1 || error("Only 1 model specified, use ABCRejection method to estimate parameters for a single model")
#initalize array of particles
particles = Array{ParticleRejectionModel}(ABCsetup.Models[1].nparticles)
i = 1 #set particle indicator to 1
its = 0 #keep track of number of iterations
distvec = zeros(Float64, ABCsetup.Models[1].nparticles) #store distances in an array
if progress == true
p = Progress(ABCsetup.Models[1].nparticles, 1, "Running ABC rejection algorithm...", 30)
end
while (i < (ABCsetup.Models[1].nparticles + 1)) & (its < ABCsetup.Models[1].maxiterations)
its += 1
#sample uniformly from models
model = rand(1:ABCsetup.nmodels)
#get new proposal parameters
newparams = getproposal(ABCsetup.Models[model].prior, ABCsetup.Models[model].nparams)
#simulate with new parameters
dist, out = ABCsetup.Models[model].simfunc(newparams, ABCsetup.Models[model].constants, targetdata)
#if simulated data is less than target tolerance accept particle
if dist < ABCsetup.Models[1].ϵ
particles[i] = ParticleRejectionModel(newparams, model, dist, out)
distvec[i] = dist
i +=1
if progress == true
next!(p)
end
end
end
i > ABCsetup.Models[1].nparticles || error("Only accepted $(i-1) particles with ϵ < $(ABCsetup.Models[1].ϵ). \n\tDecrease ϵ or increase maxiterations ")
out = ABCrejectionmodelresults(particles, its, ABCsetup, distvec)
return out
end
function runabc(ABCsetup::ABCSMC, targetdata; verbose = false, progress = false)
#run first population with parameters sampled from prior
if verbose == true
println("##################################################")
println("Use ABC rejection to get first population")
end
ABCrejresults = runabc(ABCRejection(ABCsetup.simfunc, ABCsetup.nparams,
ABCsetup.ϵ1, ABCsetup.prior; nparticles = ABCsetup.nparticles,
maxiterations = ABCsetup.maxiterations, constants = ABCsetup.constants), targetdata, progress = progress);
oldparticles, weights = setupSMCparticles(ABCrejresults, ABCsetup)
ϵ = quantile(ABCrejresults.dist, ABCsetup.α) # set new ϵ to αth quantile
ϵvec = [ϵ] #store epsilon values
numsims = [ABCrejresults.numsims] #keep track of number of simualtions
particles = Array{ParticleSMC}(ABCsetup.nparticles) #define particles array
if verbose == true
println("Run ABC SMC \n")
end
popnum = 1
finalpop = false
if sum(ABCrejresults.dist .< ABCsetup.ϵT) == ABCsetup.nparticles
warn("Target ϵ reached with ABCRejection algorithm, no need to use ABC SMC algorithm, returning ABCRejection output...")
return ABCrejresults
end
while (ϵ > ABCsetup.ϵT) & (sum(numsims) < ABCsetup.maxiterations)
i = 1 #set particle indicator to 1
particles = Array{ParticleSMC}(ABCsetup.nparticles)
distvec = zeros(Float64, ABCsetup.nparticles)
its = 1
if progress == true
p = Progress(ABCsetup.nparticles, 1, "ABC SMC population $(popnum), new ϵ: $(round(ϵ, 2))...", 30)
end
while i < ABCsetup.nparticles + 1
j = wsample(1:ABCsetup.nparticles, weights)
particle = oldparticles[j]
newparticle = perturbparticle(particle)
priorp = priorprob(newparticle.params, ABCsetup.prior)
if priorp == 0.0 #return to beginning of loop if prior probability is 0
continue
end
#simulate with new parameters
dist, out = ABCsetup.simfunc(newparticle.params, ABCsetup.constants, targetdata)
#if simulated data is less than target tolerance accept particle
if dist < ϵ
particles[i] = newparticle
particles[i].other = out
particles[i].distance = dist
distvec[i] = dist
i += 1
if progress == true
next!(p)
end
end
its += 1
end
particles, weights = smcweights(particles, oldparticles, ABCsetup.prior)
particles = getscales(particles, ABCsetup)
oldparticles = particles
if finalpop == true
break
end
ϵ = quantile(distvec, ABCsetup.α)
if ϵ < ABCsetup.ϵT
ϵ = ABCsetup.ϵT
push!(ϵvec, ϵ)
push!(numsims, its)
popnum = popnum + 1
finalpop = true
continue
end
push!(ϵvec, ϵ)
push!(numsims, its)
if ((( abs(ϵvec[end - 1] - ϵ )) / ϵvec[end - 1]) < ABCsetup.convergence) == true
if verbose == true
println("New ϵ is within $(round(ABCsetup.convergence * 100, 2))% of previous population, stop ABC SMC")
end
break
end
popnum = popnum + 1
if verbose == true
println("##################################################")
show(ABCSMCresults(particles, numsims, ABCsetup, ϵvec))
println("##################################################\n")
end
end
out = ABCSMCresults(particles, numsims, ABCsetup, ϵvec)
return out
end
"""
runabc(ABCsetup::ABCtype, targetdata; progress = false, verbose = false)
When the SMC algorithms are used, a print out at the end of each population will be made if verbose = true.
"""
function runabc(ABCsetup::ABCSMCModel, targetdata; verbose = false, progress = false)
ABCsetup.nmodels > 1 || error("Only 1 model specified, use ABCSMC method to estimate parameters for a single model")
#run first population with parameters sampled from prior
if verbose == true
println("##################################################")
println("Use ABC rejection to get first population")
end
ABCrejresults = runabc(ABCRejectionModel(
map(x -> x.simfunc, ABCsetup.Models),
map(x -> x.nparams, ABCsetup.Models),
ABCsetup.Models[1].ϵ1,
map(x -> x.prior, ABCsetup.Models),
constants = map(x -> x.constants, ABCsetup.Models),
nparticles = ABCsetup.Models[1].nparticles,
maxiterations = ABCsetup.Models[1].maxiterations),
targetdata);
oldparticles, weights = setupSMCparticles(ABCrejresults, ABCsetup)
ϵ = quantile(ABCrejresults.dist, ABCsetup.α) # set new ϵ to αth quantile
ϵvec = [ϵ] #store epsilon values
numsims = [ABCrejresults.numsims] #keep track of number of simualtions
particles = Array{ParticleSMCModel}(ABCsetup.nparticles) #define particles array
weights, modelprob = getparticleweights(oldparticles, ABCsetup)
modelprob = ABCrejresults.modelfreq
if verbose == true
println("Run ABC SMC \n")
end
popnum = 1
finalpop = false
if verbose == true
show(ABCSMCmodelresults(oldparticles, numsims, ABCsetup, ϵvec))
end
if sum(ABCrejresults.dist .< ABCsetup.ϵT) == ABCsetup.nparticles
warn("Target ϵ reached with ABCRejection algorithm, no need to use ABC SMC algorithm, returning ABCRejection output...")
return ABCrejresults
end
while (ϵ >= ABCsetup.ϵT) & (sum(numsims) <= ABCsetup.maxiterations)
i = 1 #set particle indicator to 1
particles = Array{ParticleSMCModel}(ABCsetup.nparticles)
distvec = zeros(Float64, ABCsetup.nparticles)
its = 1
if progress == true
p = Progress(ABCsetup.nparticles, 1, "ABC SMC population $(popnum), new ϵ: $(round(ϵ, 2))...", 30)
end
while i < ABCsetup.nparticles + 1
#draw model from previous model probabilities
mstar = wsample(1:ABCsetup.nmodels, modelprob)
#perturb model
mdoublestar = perturbmodel(ABCsetup, mstar, modelprob)
# sample particle with correct model
j = wsample(1:ABCsetup.nparticles, weights[mdoublestar, :])
particletemp = oldparticles[j]
#perturb particle
newparticle = perturbparticle(particletemp)
#calculate priorprob
priorp = priorprob(newparticle.params, ABCsetup.Models[mdoublestar].prior)
if priorp == 0.0 #return to beginning of loop if prior probability is 0
continue
end
#simulate with new parameters
dist, out = ABCsetup.Models[mdoublestar].simfunc(newparticle.params, ABCsetup.Models[mdoublestar].constants, targetdata)
#if simulated data is less than target tolerance accept particle
if dist < ϵ
particles[i] = newparticle
particles[i].other = out
particles[i].distance = dist
distvec[i] = dist
i += 1
if progress == true
next!(p)
end
end
its += 1
end
particles, weights = smcweightsmodel(particles, oldparticles, ABCsetup, modelprob)
weights, modelprob = getparticleweights(particles, ABCsetup)
particles = getscales(particles, ABCsetup)
oldparticles = deepcopy(particles)
if finalpop == true
break
end
ϵ = quantile(distvec, ABCsetup.α)
if ϵ < ABCsetup.ϵT
ϵ = ABCsetup.ϵT
push!(ϵvec, ϵ)
push!(numsims, its)
popnum = popnum + 1
finalpop = true
continue
end
push!(ϵvec, ϵ)
push!(numsims, its)
if verbose == true
println("##################################################")
show(ABCSMCmodelresults(particles, numsims, ABCsetup, ϵvec))
println("##################################################\n")
end
if ((( abs(ϵvec[end - 1] - ϵ )) / ϵvec[end - 1]) < ABCsetup.convergence) == true
println("New ϵ is within $(round(ABCsetup.convergence * 100, 2))% of previous population, stop ABC SMC")
break
end
popnum = popnum + 1
end
out = ABCSMCmodelresults(particles, numsims, ABCsetup, ϵvec)
return out
end
function runabcCancer(ABCsetup::ABCRejectionModel, targetdata; progress = false)
ABCsetup.nmodels > 1 || error("Only 1 model specified, use ABCRejection method to estimate parameters for a single model")
#initalize array of particles
particles = Array{ParticleRejectionModel}(ABCsetup.Models[1].nparticles)
i = 1 #set particle indicator to 1
its = 0 #keep track of number of iterations
distvec = zeros(Float64, ABCsetup.Models[1].nparticles) #store distances in an array
if progress == true
p = Progress(ABCsetup.Models[1].nparticles, 1, "Running ABC rejection algorithm...", 30)
end
dist, out, newparams = 0.0,0.0,0.0
while (i < (ABCsetup.Models[1].nparticles + 1)) & (its < ABCsetup.Models[1].maxiterations)
its += 1
#sample uniformly from models
model = rand(1:ABCsetup.nmodels)
correctmodel = false
while correctmodel == false
#get new proposal parameters
newparams = getproposal(ABCsetup.Models[model].prior, ABCsetup.Models[model].nparams)
#simulate with new parameters
dist, out, cm = ABCsetup.Models[model].simfunc(newparams, ABCsetup.Models[model].constants, targetdata)
correctmodel = cm
end
#if simulated data is less than target tolerance accept particle
if dist < ABCsetup.Models[1].ϵ
particles[i] = ParticleRejectionModel(newparams, model, dist, out)
distvec[i] = dist
i +=1
if progress == true
next!(p)
end
end
end
i > ABCsetup.Models[1].nparticles || error("Only accepted $(i-1) particles with ϵ < $(ABCsetup.Models[1].ϵ). \n\tDecrease ϵ or increase maxiterations ")
out = ABCrejectionmodelresults(particles, its, ABCsetup, distvec)
return out
end
function runabcCancer(ABCsetup::ABCSMCModel, targetdata; verbose = false, progress = false)
ABCsetup.nmodels > 1 || error("Only 1 model specified, use ABCSMC method to estimate parameters for a single model")
#run first population with parameters sampled from prior
if verbose == true
println("##################################################")
println("Use ABC rejection to get first population")
end
ABCrejresults = runabcCancer(ABCRejectionModel(
map(x -> x.simfunc, ABCsetup.Models),
map(x -> x.nparams, ABCsetup.Models),
ABCsetup.Models[1].ϵ1,
map(x -> x.prior, ABCsetup.Models),
constants = map(x -> x.constants, ABCsetup.Models),
nparticles = ABCsetup.Models[1].nparticles,
maxiterations = ABCsetup.Models[1].maxiterations),
targetdata, progress = progress);
oldparticles, weights = setupSMCparticles(ABCrejresults, ABCsetup)
ϵ = quantile(ABCrejresults.dist, ABCsetup.α) # set new ϵ to αth quantile
ϵvec = [ϵ] #store epsilon values
numsims = [ABCrejresults.numsims] #keep track of number of simualtions
particles = Array{ParticleSMCModel}(ABCsetup.nparticles) #define particles array
weights, modelprob = getparticleweights(oldparticles, ABCsetup)
modelprob = ABCrejresults.modelfreq
if verbose == true
println("Run ABC SMC \n")
end
popnum = 1
finalpop = false
if verbose == true
show(ABCSMCmodelresults(oldparticles, numsims, ABCsetup, ϵvec))
end
newparticle, dist, out, priorp = 0.0,0.0,0.0,0.0
while (ϵ >= ABCsetup.ϵT) & (sum(numsims) <= ABCsetup.maxiterations)
if verbose == true
println("######################################## \n")
println("########################################")
println("Population number: $(popnum) \n")
end
i = 1 #set particle indicator to 1
particles = Array{ParticleSMCModel}(ABCsetup.nparticles)
distvec = zeros(Float64, ABCsetup.nparticles)
its = 1
if progress == true
p = Progress(ABCsetup.nparticles, 1, "ABC SMC population $(popnum), new ϵ: $(round(ϵ, 2))...", 30)
end
while i < ABCsetup.nparticles + 1
#draw model from previous model probabilities
mstar = wsample(1:ABCsetup.nmodels, modelprob)
#perturb model
mdoublestar = perturbmodel(ABCsetup, mstar, modelprob)
correctmodel = false
#sometimes even though the input is for multiple clones,
#simulations will be returned that don't have the requested number of clones,
#this can happen if for example the simulation finishes before t1 is reached.
#To overcome this we continually resample until we get simulations with the correct number of clones
while correctmodel == false
# sample particle with correct model
j = wsample(1:ABCsetup.nparticles, weights[mdoublestar, :])
particletemp = oldparticles[j]
#perturb particle
newparticle = perturbparticle(particletemp)
#calculate priorprob
priorp = priorprob(newparticle.params, ABCsetup.Models[mdoublestar].prior)
if priorp == 0.0 #return to beginning of loop if prior probability is 0
break
end
#simulate with new parameters
dist, out, cm = ABCsetup.Models[mdoublestar].simfunc(newparticle.params, ABCsetup.Models[mdoublestar].constants, targetdata)
correctmodel = cm
end
if priorp == 0.0 #return to beginning of loop if prior probability is 0
continue
end
#if simulated data is less than target tolerance accept particle
if dist < ϵ
particles[i] = newparticle
particles[i].other = out
particles[i].distance = dist
distvec[i] = dist
i += 1
if progress == true
next!(p)
end
end
its += 1
end
particles, weights = smcweightsmodel(particles, oldparticles, ABCsetup, modelprob)
weights, modelprob = getparticleweights(particles, ABCsetup)
particles = getscales(particles, ABCsetup)
oldparticles = deepcopy(particles)
if finalpop == true
break
end
ϵ = quantile(distvec, ABCsetup.α)
if ϵ < ABCsetup.ϵT
ϵ = ABCsetup.ϵT
push!(ϵvec, ϵ)
push!(numsims, its)
popnum = popnum + 1
finalpop = true
continue
end
push!(ϵvec, ϵ)
push!(numsims, its)
if ((( abs(ϵvec[end - 1] - ϵ )) / ϵvec[end - 1]) < ABCsetup.convergence) == true
println("New ϵ is within $(round(ABCsetup.convergence * 100, 2))% of previous population, stop ABC SMC")
break
end
popnum = popnum + 1
if verbose == true
show(ABCSMCmodelresults(oldparticles, numsims, ABCsetup, ϵvec))
end
#sometimes models die out early in the inference, restart the process if this happens
numdeadmodels = sum(modelprob.==0.0)
deadmodels = collect(0:(ABCsetup.nmodels-1))[modelprob.==0.0]
if (popnum <= 4) & (numdeadmodels > 0)
warn("One of the models died out, restarting inference...")
out = runabcCancer(ABCsetup, targetdata; verbose = verbose, progress = progress)
return out
end
end
out = ABCSMCmodelresults(particles, numsims, ABCsetup, ϵvec)
return out
end
function runabcCancer(ABCsetup::ABCSMC, targetdata; verbose = false, progress = false)
#run first population with parameters sampled from prior
if verbose == true
println("##################################################")
println("Use ABC rejection to get first population")
end
ABCrejresults = runabc(ABCRejection(ABCsetup.simfunc, ABCsetup.nparams,
ABCsetup.ϵ1, ABCsetup.prior; nparticles = ABCsetup.nparticles,
maxiterations = ABCsetup.maxiterations, constants = ABCsetup.constants), targetdata);
oldparticles, weights = setupSMCparticles(ABCrejresults, ABCsetup)
ϵ = quantile(ABCrejresults.dist, ABCsetup.α) # set new ϵ to αth quantile
ϵvec = [ϵ] #store epsilon values
numsims = [ABCrejresults.numsims] #keep track of number of simualtions
particles = Array{ParticleSMC}(ABCsetup.nparticles) #define particles array
if verbose == true
println("Run ABC SMC \n")
end
popnum = 1
finalpop = false
newparticle, dist, out, priorp = 0.0,0.0,0.0,0.0
while (ϵ > ABCsetup.ϵT) & (sum(numsims) < ABCsetup.maxiterations)
i = 1 #set particle indicator to 1
particles = Array{ParticleSMC}(ABCsetup.nparticles)
distvec = zeros(Float64, ABCsetup.nparticles)
its = 1
if progress == true
p = Progress(ABCsetup.nparticles, 1, "ABC SMC population $(popnum), new ϵ: $(round(ϵ, 2))...", 30)
end
while i < ABCsetup.nparticles + 1
correctmodel = false
while correctmodel == false
j = wsample(1:ABCsetup.nparticles, weights)
particle = oldparticles[j]
newparticle = perturbparticle(particle)
priorp = priorprob(newparticle.params, ABCsetup.prior)
if priorp == 0.0 #return to beginning of loop if prior probability is 0
break
end
#simulate with new parameters
dist, out, cm = ABCsetup.simfunc(newparticle.params, ABCsetup.constants, targetdata)
correctmodel = cm
end
if priorp == 0.0 #return to beginning of loop if prior probability is 0
continue
end
#if simulated data is less than target tolerance accept particle
if dist < ϵ
particles[i] = newparticle
particles[i].other = out
particles[i].distance = dist
distvec[i] = dist
i += 1
if progress == true
next!(p)
end
end
its += 1
end
particles, weights = smcweights(particles, oldparticles, ABCsetup.prior)
particles = getscales(particles, ABCsetup)
oldparticles = deepcopy(particles)
if finalpop == true
break
end
ϵ = quantile(distvec, ABCsetup.α)
if ϵ < ABCsetup.ϵT
ϵ = ABCsetup.ϵT
push!(ϵvec, ϵ)
push!(numsims, its)
popnum = popnum + 1
finalpop = true
continue
end
push!(ϵvec, ϵ)
push!(numsims, its)
if ((( abs(ϵvec[end - 1] - ϵ )) / ϵvec[end - 1]) < ABCsetup.convergence) == true
if verbose == true
println("New ϵ is within $(round(ABCsetup.convergence * 100, 2))% of previous population, stop ABC SMC")
end
break
end
popnum = popnum + 1
end
out = ABCSMCresults(particles, numsims, ABCsetup, ϵvec)
return out
end
|
{"hexsha": "be827c7e048db6552e26dc8cce2868107846f518", "size": 21710, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ABCalgorithm.jl", "max_stars_repo_name": "quipa/ApproxBayes.jl", "max_stars_repo_head_hexsha": "bde721fac3eeb071b2b945fa49e00c2b5c0820c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ABCalgorithm.jl", "max_issues_repo_name": "quipa/ApproxBayes.jl", "max_issues_repo_head_hexsha": "bde721fac3eeb071b2b945fa49e00c2b5c0820c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ABCalgorithm.jl", "max_forks_repo_name": "quipa/ApproxBayes.jl", "max_forks_repo_head_hexsha": "bde721fac3eeb071b2b945fa49e00c2b5c0820c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7111801242, "max_line_length": 260, "alphanum_fraction": 0.6559649931, "num_tokens": 5961}
|
#pragma once
#include <string>
#include <cstdint>
#include <memory>
#include <boost/asio.hpp>
#include <speaker_connection.hpp>
#include <aria/io_thread.hpp>
namespace aria {
class speaker_callbacks
{
public:
virtual void audio_received(const char * data, size_t length) = 0;
virtual void player_connected() = 0;
virtual void player_disconnected() = 0;
};
class speaker : public io_thread
{
public:
speaker(speaker_callbacks & callbacks);
protected:
void before_io_service() override;
private:
void accept();
boost::asio::ip::tcp::acceptor _acceptor;
speaker_callbacks & _callbacks;
// Note: _connection must only be accessed from the io thread
std::shared_ptr<speaker_connection> _connection;
};
}
|
{"hexsha": "73f00813f518503fa22ce1ef1d76f30b37acf3ec", "size": 828, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/aria/speaker.hpp", "max_stars_repo_name": "Andlon/aria", "max_stars_repo_head_hexsha": "30fcabf9a2f577c6bbaea66ca0b3ead21dba92e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/aria/speaker.hpp", "max_issues_repo_name": "Andlon/aria", "max_issues_repo_head_hexsha": "30fcabf9a2f577c6bbaea66ca0b3ead21dba92e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-08-26T15:20:32.000Z", "max_issues_repo_issues_event_max_datetime": "2015-08-26T15:20:32.000Z", "max_forks_repo_path": "include/aria/speaker.hpp", "max_forks_repo_name": "Andlon/aria", "max_forks_repo_head_hexsha": "30fcabf9a2f577c6bbaea66ca0b3ead21dba92e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2307692308, "max_line_length": 74, "alphanum_fraction": 0.652173913, "num_tokens": 180}
|
import numpy
import torch
import conf.config
import dataset.voc_dataset
import model.yolov3
if __name__ == "__main__":
# 0. 确保每次的伪随机数相同以便于问题的复现
numpy.random.seed(0)
torch.manual_seed(1)
# 1. 配置文件
Config = conf.config.VocConfig
# 2. 验证集
BATCH_SIZE = 16
voc_dataloader = dataset.voc_dataset.VOCDataset.TrainDataloader(
config=Config,
batch_size=BATCH_SIZE
)
# 3. 初始化模型
yolov3 = model.yolov3.YoloV3(Config)
# 4. 遍历数据集
EPOCH = 1
for epoch in range(EPOCH):
print("Epoch:", epoch)
for batch_index, (tensord_images, tensord_boxes_list) in enumerate(voc_dataloader):
print("batch_index:", batch_index)
for step in range(BATCH_SIZE):
print("step:", step)
# 4. 预测结果并记录
image = yolov3.predict_with_loss(
tensord_images[step],
tensord_boxes_list[step],
)
image.show()
exit(-1)
|
{"hexsha": "d3e9a563887151d357d04e16b2b65aa2846ff111", "size": 1017, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_voc_with_loss.py", "max_stars_repo_name": "lilinxi/210414_CfgYoloV3", "max_stars_repo_head_hexsha": "e6bbb64efa22e7d4c1f583f033370be4b16e548b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict_voc_with_loss.py", "max_issues_repo_name": "lilinxi/210414_CfgYoloV3", "max_issues_repo_head_hexsha": "e6bbb64efa22e7d4c1f583f033370be4b16e548b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict_voc_with_loss.py", "max_forks_repo_name": "lilinxi/210414_CfgYoloV3", "max_forks_repo_head_hexsha": "e6bbb64efa22e7d4c1f583f033370be4b16e548b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6511627907, "max_line_length": 91, "alphanum_fraction": 0.5821042281, "include": true, "reason": "import numpy", "num_tokens": 299}
|
[STATEMENT]
lemma Transset_INT: "(\<And>x. x \<in> A \<Longrightarrow> Transset (B x)) \<Longrightarrow> Transset (\<Sqinter> (B ` A))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>x. x \<in> A \<Longrightarrow> Transset (B x)) \<Longrightarrow> Transset (\<Sqinter> (B ` A))
[PROOF STEP]
by (metis Transset_Inf imageE)
|
{"llama_tokens": 126, "file": "ZFC_in_HOL_ZFC_in_HOL", "length": 1}
|
// Copyright (C) 2015, Pawel Tomulik <ptomulik@meil.pw.edu.pl>
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#define BOOST_TEST_MODULE test_txpl_vm_eval_binary_eq
#include <txpl/test_config.hpp>
#include <boost/test/unit_test.hpp>
#ifndef TXPL_TEST_SKIP_VM_EVAL_BINARY_EQ
#include <txpl/vm/eval_binary_op.hpp>
#include <txpl/vm/basic_types.hpp>
#include <txpl/vm/value.hpp>
#include <boost/variant/apply_visitor.hpp>
#include <boost/variant/get.hpp>
#include <type_traits>
using namespace txpl::vm;
typedef basic_types<>::char_type char_type;
typedef basic_types<>::int_type int_type;
typedef basic_types<>::bool_type bool_type;
typedef basic_types<>::real_type real_type;
typedef basic_types<>::string_type string_type;
typedef basic_types<>::regex_type regex_type;
typedef basic_types<>::blank_type blank_type;
typedef array<value<> > array_type;
typedef object<value<> > object_type;
BOOST_AUTO_TEST_CASE(char__eq__char)
{
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'\0'};
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (char_type{'\0'} == char_type{'\0'}));
}
}
BOOST_AUTO_TEST_CASE(char__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'\0'};
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (char_type{'\0'} == int_type{0}));
}
}
BOOST_AUTO_TEST_CASE(char__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'\0'};
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (char_type{'\0'} == bool_type{false}));
}
}
BOOST_AUTO_TEST_CASE(char__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'\0'};
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (char_type{'\0'} == real_type{0.0}));
}
}
BOOST_AUTO_TEST_CASE(char__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'a'};
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(char__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'a'};
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(char__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'a'};
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(char__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = char_type{'a'};
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(int__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (int_type{0} == char_type{'\0'}));
}
}
BOOST_AUTO_TEST_CASE(int__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (int_type{0} == int_type{0}));
}
}
BOOST_AUTO_TEST_CASE(int__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (int_type{0} == bool_type{false}));
}
}
BOOST_AUTO_TEST_CASE(int__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (int_type{0} == real_type{0.0}));
}
}
BOOST_AUTO_TEST_CASE(int__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(int__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(int__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(int__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = int_type{0};
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (char_type{false} == bool_type{'\0'}));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (bool_type{false} == int_type{0}));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (bool_type{false} == bool_type{false}));
}
{
const value<> v1 = bool_type{false};
const value<> v2 = bool_type{true};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{false};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (bool_type{false} == bool_type{true}));
}
{
const value<> v1 = bool_type{true};
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{false};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (bool_type{true} == bool_type{false}));
}
{
const value<> v1 = bool_type{true};
const value<> v2 = bool_type{true};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{false};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (bool_type{true} == bool_type{true}));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (bool_type{false} == bool_type{0.0}));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(bool__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = bool_type{false};
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(real__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (real_type{0.0} == char_type{'\0'}));
}
}
BOOST_AUTO_TEST_CASE(real__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (real_type{0.0} == int_type{0}));
}
}
BOOST_AUTO_TEST_CASE(real__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (real_type{0.0} == bool_type{false}));
}
}
BOOST_AUTO_TEST_CASE(real__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{true};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (real_type{0.0} == real_type{0.0}));
}
}
BOOST_AUTO_TEST_CASE(real__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(real__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(real__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(real__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = real_type{0.0};
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type("asd");
const value<> v2 = string_type("asd");
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{false};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (string_type("asd") == string_type("asd")));
}
{
const value<> v1 = string_type("asd");
const value<> v2 = string_type("qwe");
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
bool_type x = bool_type{false};
BOOST_CHECK_NO_THROW(x = boost::get<bool_type>(r));
BOOST_CHECK(x == (string_type("asd") == string_type("qwe")));
}
}
BOOST_AUTO_TEST_CASE(string__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(string__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = string_type();
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(regex__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = regex_type();
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
array_type x;
BOOST_CHECK_NO_THROW(x = boost::get<array_type>(r));
BOOST_CHECK_EQUAL(x.size(), 0);
}
{
array_type a1(3);
array_type a2(3);
a2[0] = a1[0] = char_type{'a'};
a2[1] = a1[1] = int_type{100};
a2[2] = real_type{321.0}; a1[2] = real_type{.123};
const value<> v1 = a1;
const value<> v2 = a2;
r = blank_type();
BOOST_CHECK(boost::apply_visitor(op, v1, v2));
array_type x;
BOOST_CHECK_NO_THROW(x = boost::get<array_type>(r));
BOOST_CHECK_EQUAL(x.size(), 3);
BOOST_CHECK(boost::get<bool_type>(x[0]));
BOOST_CHECK(boost::get<bool_type>(x[1]));
BOOST_CHECK(!boost::get<bool_type>(x[2]));
}
{
array_type a1(3);
array_type a2(2);
a2[0] = a1[0] = char_type{'a'};
a2[1] = a1[1] = int_type{100};
a1[2] = real_type{.123};
const value<> v1 = a1;
const value<> v2 = a2;
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(array__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = array_type();
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__char)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = char_type{'\0'};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__int)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = int_type{0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__bool)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = bool_type{false};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__real)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = real_type{0.0};
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__string)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = string_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__regex)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = regex_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__array)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = array_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
BOOST_AUTO_TEST_CASE(object__eq__object)
{
using namespace txpl::vm;
value<> r;
auto op = eval_binary_op<op_t::eq_>(r);
{
const value<> v1 = object_type();
const value<> v2 = object_type();
r = blank_type();
BOOST_CHECK(!boost::apply_visitor(op, v1, v2));
BOOST_CHECK_NO_THROW(boost::get<blank_type>(r));
}
}
#else
BOOST_AUTO_TEST_CASE(dummy)
{
BOOST_CHECK(true);
}
#endif
|
{"hexsha": "bc9198e27d42f5af5d02ee7a7ed4c807256c19bb", "size": 26480, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/txpl/vm/eval_binary_eq_test.cpp", "max_stars_repo_name": "ptomulik/txpl", "max_stars_repo_head_hexsha": "109b5847abe0d46c598ada46f411f98ebe8dc4c8", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/txpl/vm/eval_binary_eq_test.cpp", "max_issues_repo_name": "ptomulik/txpl", "max_issues_repo_head_hexsha": "109b5847abe0d46c598ada46f411f98ebe8dc4c8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 14.0, "max_issues_repo_issues_event_min_datetime": "2015-03-02T14:02:32.000Z", "max_issues_repo_issues_event_max_datetime": "2015-05-17T21:50:30.000Z", "max_forks_repo_path": "test/txpl/vm/eval_binary_eq_test.cpp", "max_forks_repo_name": "ptomulik/txpl", "max_forks_repo_head_hexsha": "109b5847abe0d46c598ada46f411f98ebe8dc4c8", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0756646217, "max_line_length": 65, "alphanum_fraction": 0.6501132931, "num_tokens": 8102}
|
"""
Title: Video Classification With TimeDistributed Layer
Author: [Sujoy K Goswami](https://www.linkedin.com/in/sujoy-kumar-goswami/)
Date created: 2022/01/09
Last modified: 2022/01/10
Description: Guide to examine any video-classification-model quickly without any GPU.
"""
"""
## Introduction
Video Classification DL Models are heavy and need huge size of data.
So it is time-consuming and expensive. Here it is shown, how to examine your model
quickly, before feeding the actual data, and that also without need of any GPU.
Here video dataset will be created; a white rectangle moving in different directions,
on a black canvas. The sample code for creating left-moving-rectangle videos is below.
"""
import numpy as np
import skvideo.io as sk
from IPython.display import Video
# creating sample video data
num_vids = 5
num_imgs = 50
img_size = 50
min_object_size = 1
max_object_size = 5
for i_vid in range(num_vids):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
vid_name = "vid" + str(i_vid) + ".mp4"
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while x > 0:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
x = x - 1
i_img = i_img + 1
sk.vwrite(vid_name, imgs.astype(np.uint8))
Video(
"vid3.mp4"
) # play a video; the script and video generated should be in same folder
"""
## Data Generation and Preparation
Now dataset with 4 classes will be created where, a rectangle is moving in 4
different directions in those classes respectively.
"""
# preparing dataset
X_train = []
Y_train = []
labels = {0: "left", 1: "right", 2: "up", 3: "down"} # 4 classes
num_vids = 40
num_imgs = 40
img_size = 40
min_object_size = 1
max_object_size = 5
# video frames with left moving object
for i_vid in range(num_vids):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
# vid_name = 'vid' + str(i_vid) + '.mp4'
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while x > 0:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
x = x - 1
i_img = i_img + 1
X_train.append(imgs)
for i in range(0, num_imgs):
Y_train.append(0)
# video frames with right moving object
for i_vid in range(num_vids):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
# vid_name = 'vid' + str(i_vid) + '.mp4'
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while x < img_size:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
x = x + 1
i_img = i_img + 1
X_train.append(imgs)
for i in range(0, num_imgs):
Y_train.append(1)
# video frames with up moving object
for i_vid in range(num_vids):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
# vid_name = 'vid' + str(i_vid) + '.mp4'
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while y > 0:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
y = y - 1
i_img = i_img + 1
X_train.append(imgs)
for i in range(0, num_imgs):
Y_train.append(2)
# video frames with down moving object
for i_vid in range(num_vids):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
# vid_name = 'vid' + str(i_vid) + '.mp4'
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while y < img_size:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
y = y + 1
i_img = i_img + 1
X_train.append(imgs)
for i in range(0, num_imgs):
Y_train.append(3)
# data pre-processing
from tensorflow.keras.utils import to_categorical
X_train = np.array(X_train, dtype=np.float32) / 255
X_train = X_train.reshape(X_train.shape[0], num_imgs, img_size, img_size, 1)
print(X_train.shape)
Y_train = np.array(Y_train, dtype=np.uint8)
Y_train = Y_train.reshape(X_train.shape[0], 1)
print(Y_train.shape)
Y_train = to_categorical(Y_train, 4)
"""
## Model Building and Training
TimeDistributed layer is used to pass temporal information of videos to the network.
**No GPU is needed.** Training gets completed within few minutes.
"""
# building model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import TimeDistributed
model = Sequential()
model.add(
TimeDistributed(
Conv2D(8, (3, 3), strides=(1, 1), activation="relu", padding="same"),
input_shape=(num_imgs, img_size, img_size, 1),
)
)
model.add(
TimeDistributed(
Conv2D(8, (3, 3), kernel_initializer="he_normal", activation="relu")
)
)
model.add(TimeDistributed(MaxPooling2D((1, 1), strides=(1, 1))))
model.add(TimeDistributed(Flatten()))
model.add(Dropout(0.3))
model.add(LSTM(64, return_sequences=False, dropout=0.3))
model.add(Dense(4, activation="softmax"))
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
# model training
model.fit(X_train, Y_train, epochs=40, verbose=1)
"""
## Model Inferencing
Model is tested on new generated video data.
"""
# model testing with new data (4 videos)
X_test = []
Y_test = []
for i_vid in range(2):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while x < img_size:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
x = x + 1
i_img = i_img + 1
X_test.append(imgs) # 2nd class - 'right'
for i_vid in range(2):
imgs = np.zeros((num_imgs, img_size, img_size)) # set background to 0
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, img_size - w)
y = np.random.randint(0, img_size - h)
i_img = 0
while y < img_size:
imgs[i_img, y : y + h, x : x + w] = 255 # set rectangle as foreground
y = y + 1
i_img = i_img + 1
X_test.append(imgs) # 4th class - 'down'
X_test = np.array(X_test, dtype=np.float32) / 255
X_test = X_test.reshape(X_test.shape[0], num_imgs, img_size, img_size, 1)
pred = np.argmax(model.predict(X_test), axis=-1)
for i in range(len(X_test)):
print(labels[pred[i]])
"""
Clearly, the model is examined well on this synthetic dataset.
"""
|
{"hexsha": "c02e17a5fbac572032f17b85c1dd31da84a98fd5", "size": 7315, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/vision/video_classify.py", "max_stars_repo_name": "SujoyKG/keras-io", "max_stars_repo_head_hexsha": "ca430025f0563907c7d6e57d66acde08c467ecac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/vision/video_classify.py", "max_issues_repo_name": "SujoyKG/keras-io", "max_issues_repo_head_hexsha": "ca430025f0563907c7d6e57d66acde08c467ecac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/vision/video_classify.py", "max_forks_repo_name": "SujoyKG/keras-io", "max_forks_repo_head_hexsha": "ca430025f0563907c7d6e57d66acde08c467ecac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5047169811, "max_line_length": 87, "alphanum_fraction": 0.6509911141, "include": true, "reason": "import numpy", "num_tokens": 2108}
|
[STATEMENT]
lemma valid_line_prepend_B_iff:
"valid_line (B # xs) \<longleftrightarrow> valid_line xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. valid_line (B # xs) = valid_line xs
[PROOF STEP]
using valid_line_prepend_B valid_line_drop_B
[PROOF STATE]
proof (prove)
using this:
valid_line ?xs \<Longrightarrow> valid_line (B # ?xs)
valid_line (B # ?xs) \<Longrightarrow> valid_line ?xs
goal (1 subgoal):
1. valid_line (B # xs) = valid_line xs
[PROOF STEP]
by metis
|
{"llama_tokens": 190, "file": "VerifyThis2018_Challenge2", "length": 2}
|
import os
import unittest
import numpy as np
from gnes.encoder.base import BaseEncoder
class TestPCAEncoder(unittest.TestCase):
def setUp(self):
dirname = os.path.dirname(__file__)
self.dump_path = os.path.join(dirname, 'pca_encoder.bin')
self.yaml_path = os.path.join(dirname, 'yaml', 'pca.yml')
self.test_numeric = np.random.randint(0, 255, (1000, 1024)).astype('float32')
def test_encoding(self):
self.encoder = BaseEncoder.load_yaml(self.yaml_path)
# train before encode to create pca_components
self.encoder.train(self.test_numeric)
vec = self.encoder.encode(self.test_numeric)
self.assertEqual(vec.shape, (1000, 300))
# dump after train with valied pca_components
self.encoder.dump(self.dump_path)
encoder2 = BaseEncoder.load(self.dump_path)
vec = encoder2.encode(self.test_numeric)
self.assertEqual(vec.shape, (1000, 300))
def tearDown(self):
if os.path.exists(self.dump_path):
os.remove(self.dump_path)
|
{"hexsha": "dd17cde3ed4c395d9240f2fda074633c86e123f7", "size": 1061, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_pca_encoder.py", "max_stars_repo_name": "micro-pixel/gnes", "max_stars_repo_head_hexsha": "388d1ba718ec04eedaaff3ce34da43689c197ee7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-05T03:51:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T05:56:37.000Z", "max_issues_repo_path": "tests/test_pca_encoder.py", "max_issues_repo_name": "cmy9068/gnes", "max_issues_repo_head_hexsha": "44a54be4c80108ac65b2450b4af8deded6da3339", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_pca_encoder.py", "max_forks_repo_name": "cmy9068/gnes", "max_forks_repo_head_hexsha": "44a54be4c80108ac65b2450b4af8deded6da3339", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-28T15:07:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-28T15:07:36.000Z", "avg_line_length": 34.2258064516, "max_line_length": 85, "alphanum_fraction": 0.6757775683, "include": true, "reason": "import numpy", "num_tokens": 235}
|
/*
Copyright (c) 2008, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/pch.hpp"
#ifdef _MSC_VER
#pragma warning(push, 1)
#endif
#include <boost/shared_ptr.hpp>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#include <vector>
#include <utility>
#include <numeric>
#include <cstdio>
#include "libtorrent/peer_connection.hpp"
#include "libtorrent/bt_peer_connection.hpp"
#include "libtorrent/hasher.hpp"
#include "libtorrent/bencode.hpp"
#include "libtorrent/torrent.hpp"
#include "libtorrent/extensions.hpp"
#include "libtorrent/extensions/ut_metadata.hpp"
#include "libtorrent/alert_types.hpp"
#ifdef TORRENT_STATS
#include "libtorrent/aux_/session_impl.hpp"
#endif
namespace libtorrent { namespace
{
bool send_tracker(announce_entry const& e)
{
// max_fails == 0 means that it's one
// of the trackers from the trackers
// from the torrent file
return e.fail_limit == 0 || e.verified;
}
struct lt_tracker_plugin : torrent_plugin
{
lt_tracker_plugin(torrent& t)
: m_torrent(t)
, m_updates(0)
, m_2_minutes(110)
{
m_old_trackers = t.trackers();
update_list_hash();
}
virtual boost::shared_ptr<peer_plugin> new_connection(
peer_connection* pc);
virtual void tick()
{
if (m_2_minutes++ < 120) return;
m_2_minutes = 0;
// build tracker diff
entry tex;
entry::list_type& added = tex["added"].list();
std::vector<announce_entry> const& trackers = m_torrent.trackers();
for (std::vector<announce_entry>::const_iterator i = trackers.begin()
, end(trackers.end()); i != end; ++i)
{
std::vector<announce_entry>::const_iterator k = std::find_if(
m_old_trackers.begin(), m_old_trackers.end()
, boost::bind(&announce_entry::url, _1) == i->url);
if (k != m_old_trackers.end()) continue;
if (!send_tracker(*i)) continue;
m_old_trackers.push_back(*i);
++m_updates;
added.push_back(i->url);
}
m_lt_trackers_msg.clear();
bencode(std::back_inserter(m_lt_trackers_msg), tex);
if (m_updates > 0) update_list_hash();
}
void update_list_hash()
{
std::vector<std::string> canonical_list;
std::transform(m_old_trackers.begin(), m_old_trackers.end(), back_inserter(canonical_list)
, boost::bind(&announce_entry::url, _1));
std::sort(canonical_list.begin(), canonical_list.end());
hasher h;
std::for_each(canonical_list.begin(), canonical_list.end()
, boost::bind(&hasher::update, &h, _1));
m_list_hash = h.final();
}
int num_updates() const { return m_updates; }
std::vector<char> const& get_lt_tex_msg() const { return m_lt_trackers_msg; }
sha1_hash const& list_hash() const { return m_list_hash; }
std::vector<announce_entry> const& trackers() const { return m_old_trackers; }
private:
torrent& m_torrent;
std::vector<announce_entry> m_old_trackers;
int m_updates;
int m_2_minutes;
std::vector<char> m_lt_trackers_msg;
sha1_hash m_list_hash;
};
struct lt_tracker_peer_plugin : peer_plugin
{
lt_tracker_peer_plugin(torrent& t, bt_peer_connection& pc, lt_tracker_plugin& tp)
: m_message_index(0)
, m_torrent(t)
, m_pc(pc)
, m_tp(tp)
, m_2_minutes(115)
, m_full_list(true)
{}
// can add entries to the extension handshake
virtual void add_handshake(entry& h)
{
entry& messages = h["m"];
messages["lt_tex"] = 3;
h["tr"] = m_tp.list_hash().to_string();
}
// called when the extension handshake from the other end is received
virtual bool on_extension_handshake(lazy_entry const& h)
{
m_message_index = 0;
if (h.type() != lazy_entry::dict_t) return false;
lazy_entry const* messages = h.dict_find("m");
if (!messages || messages->type() != lazy_entry::dict_t) return false;
int index = messages->dict_find_int_value("lt_tex", -1);
if (index == -1) return false;
m_message_index = index;
// if we have the same tracker list, don't bother sending the
// full list. Just send deltas
std::string tracker_list_hash = h.dict_find_string_value("tr");
if (tracker_list_hash.size() == 20
&& sha1_hash(tracker_list_hash) == m_tp.list_hash())
{
m_full_list = false;
}
return true;
}
virtual bool on_extended(int length
, int extended_msg, buffer::const_interval body)
{
if (extended_msg != 3) return false;
if (m_message_index == 0) return false;
if (!m_pc.packet_finished()) return true;
lazy_entry msg;
int ret = lazy_bdecode(body.begin, body.end, msg);
if (ret != 0 || msg.type() != lazy_entry::dict_t)
{
m_pc.disconnect(errors::invalid_lt_tracker_message, 2);
return true;
}
lazy_entry const* added = msg.dict_find_list("added");
#ifdef TORRENT_VERBOSE_LOGGING
std::stringstream log_line;
log_line << time_now_string() << " <== LT_TEX [ "
"added: ";
#endif
// invalid tex message
if (added == 0)
{
#ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " <== LT_TEX [ NOT A DICTIONARY ]\n";
#endif
return true;
}
for (int i = 0; i < added->list_size(); ++i)
{
announce_entry e(added->list_string_value_at(i));
if (e.url.empty()) continue;
e.fail_limit = 3;
e.send_stats = false;
e.source = announce_entry::source_tex;
m_torrent.add_tracker(e);
#ifdef TORRENT_VERBOSE_LOGGING
log_line << e.url << " ";
#endif
}
#ifdef TORRENT_VERBOSE_LOGGING
log_line << "]\n";
(*m_pc.m_logger) << log_line.str();
#endif
return true;
}
virtual void tick()
{
if (!m_message_index) return; // no handshake yet
if (++m_2_minutes <= 120) return;
m_2_minutes = 0;
if (m_full_list)
{
send_full_tex_list();
m_full_list = false;
}
else
{
send_lt_tex_diff();
}
}
private:
void send_lt_tex_diff()
{
// if there's no change in out tracker set, don't send anything
if (m_tp.num_updates() == 0) return;
std::vector<char> const& tex_msg = m_tp.get_lt_tex_msg();
buffer::interval i = m_pc.allocate_send_buffer(6 + tex_msg.size());
detail::write_uint32(1 + 1 + tex_msg.size(), i.begin);
detail::write_uint8(bt_peer_connection::msg_extended, i.begin);
detail::write_uint8(m_message_index, i.begin);
std::copy(tex_msg.begin(), tex_msg.end(), i.begin);
i.begin += tex_msg.size();
TORRENT_ASSERT(i.begin == i.end);
m_pc.setup_send();
}
void send_full_tex_list() const
{
if (m_tp.trackers().empty()) return;
#ifdef TORRENT_VERBOSE_LOGGING
std::stringstream log_line;
log_line << time_now_string() << " ==> LT_TEX [ "
"added: ";
#endif
entry tex;
entry::list_type& added = tex["added"].list();
for (std::vector<announce_entry>::const_iterator i = m_tp.trackers().begin()
, end(m_tp.trackers().end()); i != end; ++i)
{
if (!send_tracker(*i)) continue;
added.push_back(i->url);
#ifdef TORRENT_VERBOSE_LOGGING
log_line << i->url << " ";
#endif
}
std::vector<char> tex_msg;
bencode(std::back_inserter(tex_msg), tex);
#ifdef TORRENT_VERBOSE_LOGGING
log_line << "]\n";
(*m_pc.m_logger) << log_line.str();
#endif
buffer::interval i = m_pc.allocate_send_buffer(6 + tex_msg.size());
detail::write_uint32(1 + 1 + tex_msg.size(), i.begin);
detail::write_uint8(bt_peer_connection::msg_extended, i.begin);
detail::write_uint8(m_message_index, i.begin);
std::copy(tex_msg.begin(), tex_msg.end(), i.begin);
i.begin += tex_msg.size();
TORRENT_ASSERT(i.begin == i.end);
m_pc.setup_send();
}
// this is the message index the remote peer uses
// for metadata extension messages.
int m_message_index;
torrent& m_torrent;
bt_peer_connection& m_pc;
lt_tracker_plugin& m_tp;
int m_2_minutes;
bool m_full_list;
};
boost::shared_ptr<peer_plugin> lt_tracker_plugin::new_connection(
peer_connection* pc)
{
bt_peer_connection* c = dynamic_cast<bt_peer_connection*>(pc);
if (!c) return boost::shared_ptr<peer_plugin>();
return boost::shared_ptr<peer_plugin>(new lt_tracker_peer_plugin(m_torrent, *c, *this));
}
} }
namespace libtorrent
{
boost::shared_ptr<torrent_plugin> TORRENT_EXPORT create_lt_trackers_plugin(torrent* t, void*)
{
if (t->valid_metadata() && t->torrent_file().priv()) return boost::shared_ptr<torrent_plugin>();
return boost::shared_ptr<torrent_plugin>(new lt_tracker_plugin(*t));
}
}
|
{"hexsha": "7323569f46a2c88ed93b58db657c64be7b6c08c3", "size": 9734, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ThirdParty/libtorrent-rasterbar-0.15.6/src/lt_trackers.cpp", "max_stars_repo_name": "CarysT/medusa", "max_stars_repo_head_hexsha": "8e79f7738534d8cf60577ec42ed86621533ac269", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32.0, "max_stars_repo_stars_event_min_datetime": "2016-05-22T23:09:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T03:32:27.000Z", "max_issues_repo_path": "ThirdParty/libtorrent-rasterbar-0.15.6/src/lt_trackers.cpp", "max_issues_repo_name": "CarysT/medusa", "max_issues_repo_head_hexsha": "8e79f7738534d8cf60577ec42ed86621533ac269", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2016-05-30T19:45:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-24T22:29:51.000Z", "max_forks_repo_path": "ThirdParty/libtorrent-rasterbar-0.15.6/src/lt_trackers.cpp", "max_forks_repo_name": "CarysT/medusa", "max_forks_repo_head_hexsha": "8e79f7738534d8cf60577ec42ed86621533ac269", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17.0, "max_forks_repo_forks_event_min_datetime": "2016-05-27T11:01:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T03:32:30.000Z", "avg_line_length": 27.8911174785, "max_line_length": 98, "alphanum_fraction": 0.697554962, "num_tokens": 2663}
|
from numpy import array, sqrt, kron, eye, ones
from .abstract import AbstractEnv, organic, bandit, stop, f, env_args
# Default arguments for toy environment ------------------------------------
env_0_args = env_args
# users are grouped into distinct clusters to prevent mixing
env_args['num_clusters'] = 2
# variance of the difference between organic and bandit
env_args['phi_var'] = 0.1
# Environment definition ----------------------------------------------------
class RecoEnv0(AbstractEnv):
def set_static_params(self):
# State transition Matrix between Organic, Bandit, Leave
self.state_transition = array([
[0, self.prob_organic_to_bandit, self.prob_leave_organic],
[self.prob_bandit_to_organic, 0, self.prob_leave_organic],
[0.0, 0.0, 1.]
])
self.state_transition[0, 0] = 1 - sum(self.state_transition[0, :])
self.state_transition[1, 1] = 1 - sum(self.state_transition[1, :])
# Organic Transition Matrix
cluster_ratio = int(self.num_products/self.num_clusters)
ones_mat = ones((cluster_ratio, cluster_ratio))
T = kron(eye(self.num_clusters), ones_mat)
T = T / kron(T.sum(1), ones((self.num_products, 1))).T
self.product_transition = T
# creating click probability matrix
self.phi = self.rng.normal(
scale=sqrt(self.phi_var),
size=(self.num_products, self.num_products)
)
self.click_probs = f(self.num_products / 5. * (T + T.T) + self.phi)
self.initial_product_probs = \
ones((self.num_products)) / self.num_products
def reset(self):
super().reset()
# Current Organic product viewed, choose from initial probabilities
self.product_view = self.rng.choice(
self.num_products, p=self.initial_product_probs
)
def update_state(self):
"""Update Markov state between `organic`, `bandit`, or `stop`"""
self.state = self.rng.choice(3, p=self.state_transition[self.state, :])
def draw_click(self, recommendation):
p = self.click_probs[recommendation, self.product_view]
return self.rng.binomial(1, p)
def update_product_view(self):
probs = self.product_transition[self.product_view, :]
self.product_view = self.rng.choice(self.num_products, p=probs)
|
{"hexsha": "f9bf9cd08a413cf8aba50c5130e3d20aac43061f", "size": 2377, "ext": "py", "lang": "Python", "max_stars_repo_path": "reco_gym/envs/reco_env_v0.py", "max_stars_repo_name": "NunoEdgarGFlowHub/reco-gym", "max_stars_repo_head_hexsha": "42701b7ae115b879edf6881f368878c458a2a368", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reco_gym/envs/reco_env_v0.py", "max_issues_repo_name": "NunoEdgarGFlowHub/reco-gym", "max_issues_repo_head_hexsha": "42701b7ae115b879edf6881f368878c458a2a368", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reco_gym/envs/reco_env_v0.py", "max_forks_repo_name": "NunoEdgarGFlowHub/reco-gym", "max_forks_repo_head_hexsha": "42701b7ae115b879edf6881f368878c458a2a368", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9558823529, "max_line_length": 79, "alphanum_fraction": 0.6327303324, "include": true, "reason": "from numpy", "num_tokens": 544}
|
# general libraries
import warnings
import numpy as np
# image processing libraries
from scipy import ndimage, interpolate, fft, signal
from scipy.optimize import fsolve
from skimage.feature import match_template
from skimage.transform import radon
from skimage.measure import ransac
from sklearn.cluster import KMeans
from eratosthenes.generic.filtering_statistical import make_2D_Gaussian, \
mad_filtering
from eratosthenes.generic.handler_im import get_grad_filters
from eratosthenes.preprocessing.shadow_transforms import pca
# spatial sub-pixel allignment functions
def simple_optical_flow(I1, I2, window_size, sampleI, sampleJ, tau=1e-2): # processing
"""
displacement estimation through optical flow
following Lucas & Kanade 1981
input: I1 array (n x m) image with intensities
I2 array (n x m) image with intensities
window_size integer kernel size of the neighborhood
sampleI array (k x l) grid with image coordinates
sampleJ array (k x l) grid with image coordinates
tau float smoothness parameter
output: Ugrd array (k x l) displacement estimate
Vgrd array (k x l) displacement estimate
"""
kernel_x = np.array(
[[-1., 1.],
[-1., 1.]]
)
kernel_t = np.array(
[[1., 1.],
[1., 1.]]
) * .25
fx = ndimage.convolve(I1, kernel_x)
fy = ndimage.convolve(I1, np.flip(np.transpose(kernel_x), axis=0))
ft = ndimage.convolve(I2, kernel_t) + ndimage.convolve(I1, -kernel_t)
# grid or single estimation
Ugrd = np.zeros((len(sampleI), len(sampleJ)))
Vgrd = np.zeros((len(sampleI), len(sampleJ)))
radius = np.floor(window_size / 2).astype(
'int') # window_size should be odd
for iIdx in range(sampleI.size):
iIm = sampleI.flat[iIdx]
jIm = sampleJ.flat[iIdx]
(iGrd, jGrd) = np.unravel_index(iIdx, sampleI.shape)
# get templates
Ix = fx[iIm - radius:iIm + radius + 1,
jIm - radius:jIm + radius + 1].flatten()
Iy = fy[iIm - radius:iIm + radius + 1,
jIm - radius:jIm + radius + 1].flatten()
It = ft[iIm - radius:iIm + radius + 1,
jIm - radius:jIm + radius + 1].flatten()
# look if variation is present
if np.std(It) != 0:
b = np.reshape(It, (It.shape[0], 1)) # get b here
A = np.vstack((Ix, Iy)).T # get A here
# threshold tau should be larger
# than the smallest eigenvalue of A'A
if np.min(abs(np.linalg.eigvals(np.matmul(A.T, A)))) >= tau:
nu = np.matmul(np.linalg.pinv(A), b) # get velocity here
Ugrd[iGrd, jGrd] = nu[0]
Vgrd[iGrd, jGrd] = nu[1]
return (Ugrd, Vgrd)
def affine_optical_flow(I1, I2, model='Affine', iteration=15):
"""
displacement estimation through optical flow
following Lucas & Kanade 1981 with an affine model
reference:
:param I1: NP.ARRAY (_,_)
image with intensities
:param I2: NP.ARRAY (_,_)
image with intensities
:param model: STRING
:options : Affine - affine and translation
Rotation - TODO
:param iteration: INTEGER
number of iterations used
:return u: FLOAT
displacement estimate
:return v: FLOAT
displacement estimate
"""
(kernel_j,_) = get_grad_filters('kroon')
kernel_t = np.array(
[[1., 1., 1.],
[1., 2., 1.],
[1., 1., 1.]]
) / 10
# smooth to not have very sharp derivatives
I1 = ndimage.convolve(I1, make_2D_Gaussian((3,3),fwhm=3))
I2 = ndimage.convolve(I2, make_2D_Gaussian((3,3),fwhm=3))
# calculate spatial and temporal derivatives
I_dj = ndimage.convolve(I1, kernel_j)
I_di = ndimage.convolve(I1, np.flip(np.transpose(kernel_j), axis=0))
# create local coordinate grid
(mI,nI) = I1.shape
mnI = I1.size
(grd_i,grd_j) = np.meshgrid(np.linspace(-(mI-1)/2, +(mI-1)/2, mI), \
np.linspace(-(nI-1)/2, +(nI-1)/2, nI), \
indexing='ij')
grd_j = np.flipud(grd_j)
stk_ij = np.vstack( (grd_i.flatten(), grd_j.flatten()) ).T
p = np.zeros((1,6), dtype=float)
p_stack = np.zeros((iteration,6), dtype=float)
res = np.zeros((iteration,1), dtype=float) # look at iteration evolution
for i in np.arange(iteration):
# affine transform
Aff = np.array([[1, 0, 0], [0, 1, 0]]) + p.reshape(3,2).T
grd_new = np.matmul(Aff,
np.vstack((stk_ij.T,
np.ones(mnI))))
new_i = np.reshape(grd_new[0,:], (mI, nI))
new_j = np.reshape(grd_new[1,:], (mI, nI))
# construct new templates
try:
I2_new = interpolate.griddata(stk_ij, I2.flatten().T,
(new_i,new_j), method='cubic')
except:
print('different number of values and points')
I_di_new = interpolate.griddata(stk_ij, I_di.flatten().T,
(new_i,new_j), method='cubic')
I_dj_new = interpolate.griddata(stk_ij, I_dj.flatten().T,
(new_i,new_j), method='cubic')
# I_dt = ndimage.convolve(I2_new, kernel_t) +
# ndimage.convolve(I1, -kernel_t)
I_dt_new = I2_new - I1
# compose Jacobian and Hessian
dWdp = np.array([ \
I_di_new.flatten()*grd_i.flatten(),
I_dj_new.flatten()*grd_i.flatten(),
I_di_new.flatten()*grd_j.flatten(),
I_dj_new.flatten()*grd_j.flatten(),
I_di_new.flatten(),
I_dj_new.flatten()])
# dWdp = np.array([ \
# I_di.flatten()*grd_i.flatten(),
# I_dj.flatten()*grd_i.flatten(),
# I_di.flatten()*grd_j.flatten(),
# I_dj.flatten()*grd_j.flatten(),
# I_di.flatten(),
# I_dj.flatten()])
# remove data outside the template
A, y = dWdp.T, I_dt_new.flatten()
IN = ~(np.any(np.isnan(A), axis=1) | np.isnan(y))
A = A[IN,:]
y = y[~np.isnan(y)]
#(dp,res[i]) = least_squares(A, y, mode='andrews', iterations=3)
if y.size>=6: # structure should not become ill-posed
try:
(dp,res[i],_,_) = np.linalg.lstsq(A, y, rcond=None)#[0]
except ValueError:
pass #print('something wrong?')
else:
break
p += dp
p_stack[i,:] = p
# only convergence is allowed
(up_idx,_) = np.where(np.sign(res-np.vstack(([1e3],res[:-1])))==1)
if up_idx.size != 0:
res = res[:up_idx[0]]
if res.size == 0: # sometimes divergence occurs
A = np.array([[1, 0], [0, 1]])
u, v, snr = 0, 0, 0
else:
Aff = np.array([[1, 0, 0], [0, 1, 0]]) + \
p_stack[np.argmin(res),:].reshape(3,2).T
u, v = Aff[0,-1], Aff[1,-1]
A = np.linalg.inv(Aff[:,0:2]).T
snr = np.min(res)
return (u, v, A, snr)
# spatial pattern matching functions
def normalized_cross_corr(I1, I2):
"""
Simple normalized cross correlation
:param I1: NP.ARRAY (_,_)
image with intensities
:param I2: NP.ARRAY (_,_)
image with intensities
:return result: NP.ARRAY (_,_)
similarity surface
"""
result = match_template(I2, I1)
return result
def cumulative_cross_corr(I1, I2):
"""
doing normalized cross correlation on distance imagery
:param I1: NP.ARRAY (_,_)
binary array
:param I2: NP.ARRAY (_,_)
binary array
:return result: NP.ARRAY (_,_)
similarity surface
"""
if isinstance(I1, np.floating):
# get cut-off value
cu = np.quantile(I1, 0.5)
I1 = I1<cu
if isinstance(I1, np.floating):
# get cut-off value
cu = np.quantile(I2, 0.5)
I2 = I2<cu
I1new = ndimage.distance_transform_edt(I1)
I2new = ndimage.distance_transform_edt(I2)
result = match_template(I2new, I1new)
return result
def sum_sq_diff(I1, I2):
"""
Simple normalized cross correlation
:param I1: NP.ARRAY (_,_)
image with intensities
:param I2: NP.ARRAY (_,_)
image with intensities
:return ssd: NP.ARRAY (_,_)
dissimilarity surface
"""
t_size = I1.shape
y = np.lib.stride_tricks.as_strided(I2,
shape=(I2.shape[0] - t_size[0] + 1,
I2.shape[1] - t_size[1] + 1,) +
t_size,
strides=I2.strides * 2)
ssd = np.einsum('ijkl,kl->ij', y, I1)
ssd *= - 2
ssd += np.einsum('ijkl, ijkl->ij', y, y)
ssd += np.einsum('ij, ij', I1, I1)
return ssd
def get_integer_peak_location(C):
max_corr = np.amax(C)
snr = max_corr/np.mean(C)
ij = np.unravel_index(np.argmax(C), C.shape, order='F') # 'C'
di, dj = ij[::-1]
di -= C.shape[0] // 2
dj -= C.shape[1] // 2
return di, dj, snr, max_corr
# sub-pixel localization of the correlation peak
def get_top_moment(C, ds=1, top=np.array([])):
""" find location of highest score through bicubic fitting
Parameters
----------
C : np.array, size=(_,_)
similarity surface
ds : integer, default=1
size of the radius to use neighboring information
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Feng et al. "A subpixel registration algorithm for low PSNR images"
IEEE international conference on advanced computational intelligence,
pp. 626-630, 2012.
[2] Messerli & Grinstad, "Image georectification and feature tracking
toolbox: ImGRAFT" Geoscientific instrumentation, methods and data systems,
vol. 4(1) pp. 23-34, 2015.
"""
(subJ,subI) = np.meshgrid(np.linspace(-ds,+ds, 2*ds+1), np.linspace(-ds,+ds, 2*ds+1))
subI = subI.ravel()
subJ = subJ.ravel()
if top.size==0:
# find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:# estimate sub-pixel top
idx_mid = np.int(np.floor((2.*ds+1)**2/2))
Csub = C[i-ds:i+ds+1,j-ds:j+ds+1].ravel()
Csub = Csub - np.mean(np.hstack((Csub[0:idx_mid],Csub[idx_mid+1:])))
IN = Csub>0
m = np.array([ np.divide(np.sum(subI[IN]*Csub[IN]), np.sum(Csub[IN])) ,
np.divide(np.sum(subJ[IN]*Csub[IN]), np.sum(Csub[IN]))])
ddi, ddj = m[0], m[1]
return (ddi, ddj)
def get_top_blue(C, ds=1): # wip
(subJ,subI) = np.meshgrid(np.linspace(-ds,+ds, 2*ds+1), np.linspace(-ds,+ds, 2*ds+1))
subI = subI.ravel()
subJ = subJ.ravel()
# find highest score
y,x,max_corr,snr = get_integer_peak_location(C)
# estimate Jacobian
H_x = np.array([[-17., 0., 17.],
[-61., 0., 61.],
[-17., 0., 17.]]) / 95
# estimate Hessian
H_xx = 8 / np.array([[105, -46, 105],
[ 50, -23, 50],
[ 105, -46, 105]] )
H_xy = 11 / np.array([[-114, np.inf, +114],
[np.inf, np.inf, np.inf],
[+114, np.inf, -114]] )
# estimate sub-pixel top
Csub = C[y-ds:y+ds+1,x-ds:x+ds+1]
Jac = np.array([[Csub*H_x], [Csub*H_x.T]])
Hes = Jac = np.array([[Csub*H_xx , Csub*H_xy],
[Csub*H_xy.T, Csub*H_xx.T]])
m0 = np.array([[x], [y]]) - np.linalg.inv(Hes) * Jac
return (m0[0], m0[1])
def get_top_gaussian(C, top=np.array([])):
""" find location of highest score through 1D gaussian fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Argyriou & Vlachos, "A Study of sub-pixel motion estimation using
phase correlation" Proceeding of the British machine vision conference,
pp. 387-396), 2006.
"""
if top.size==0: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else: # estimate sub-pixel along each axis
ddi = (np.log(C[i+1,j]) - np.log(C[i-1,j])) / \
2*( (2*np.log(C[i,j])) -np.log(C[i-1,j]) -np.log(C[i+1,j]))
ddj = (np.log(C[i,j+1]) - np.log(C[i,j-1])) / \
2*( (2*np.log(C[i,j])) -np.log(C[i,j-1]) -np.log(C[i,j+1]))
return (ddi, ddj)
def get_top_parabolic(C, top=np.array([])):
""" find location of highest score through 1D parabolic fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Argyriou & Vlachos, "A Study of sub-pixel motion estimation using
phase correlation" Proceeding of the British machine vision conference,
pp. 387-396), 2006.
"""
if top.size==0: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else: # estimate sub-pixel along each axis
ddi = (C[i+1,j] - C[i-1,j]) / 2*( (2*C[i,j]) -C[i-1,j] -C[i+1,j])
ddj = (C[i,j+1] - C[i,j-1]) / 2*( (2*C[i,j]) -C[i,j-1] -C[i,j+1])
return (ddi, ddj)
def get_top_birchfield(C, top=np.array([])):
""" find location of highest score along each axis
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Birchfield & Tomasi. "Depth discontinuities by pixel-to-pixel stereo"
International journal of computer vision, vol. 35(3)3 pp. 269-293, 1999.
"""
if top.size==0: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:
# estimate sub-pixel along each axis
I_m,I_p = .5*(C[i-1,j] + C[i,j]), .5*(C[i+1,j] + C[i,j])
I_min,I_max = np.amin([I_m, I_p, C[i,j]]), np.amax([I_m, I_p, C[i,j]])
# swapped, since Birchfield uses dissimilarity
ddi = np.amax([0, I_max-C[i,j], C[i,j]-I_min])
I_m,I_p = .5*(C[i,j-1] + C[i,j]), .5*(C[i,j+1] + C[i,j])
I_min,I_max = np.amin([I_m, I_p, C[i,j]]), np.amax([I_m, I_p, C[i,j]])
ddj = np.amax([0, I_max-C[i,j], C[i,j]-I_min])
return (ddi, ddj)
def get_top_ren(C, top=None):
""" find location of highest score
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Ren et al. "High-accuracy sub-pixel motion estimation from noisy
images in Fourier domain." IEEE transactions on image processing,
vol. 19(5) pp. 1379-1384, 2010.
"""
if top.size is None: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:
# estimate sub-pixel along each axis
D_i = C[i+1,j] - C[i-1,j]
ddi = np.sign(D_i)/(1 + ( C[i,j] / np.abs(D_i) ))
D_j = C[i,j+1] - C[i,j-1]
ddj = np.sign(D_j)/(1 + ( C[i,j] / np.abs(D_j) ))
return (ddi, ddj)
def get_top_triangular(C, top=None):
""" find location of highest score through triangular fit
Parameters
----------
C : np.array, size=(_,_)
similarity surface
top : np.array, size=(1,2)
location of the maximum score
Returns
-------
ddi : float
estimated subpixel location on the vertical axis of the peak
ddj : float
estimated subpixel location on the horizontal axis of the peak
Notes
-----
[1] Olsen & Coombs, "Real-time vergence control for binocular robots"
International journal of computer vision, vol. 7(1), pp. 67-89, 1991.
"""
if top.size is None: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
ddi, ddj = 0, 0
else:
# estimate sub-pixel along each axis
I_m,I_p = C[i-1,j], C[i+1,j]
I_min,I_max = np.amin([I_m, I_p]), np.amax([I_m, I_p])
I_sign = 2*(I_p>I_m)-1
ddi = I_sign * (1- (I_max-I_min)/(C[i,j]-I_min) )
I_m,I_p = C[i,j-1], C[i,j+1]
I_min,I_max = np.amin([I_m, I_p, C[i,j]]), np.amax([I_m, I_p, C[i,j]])
I_sign = 2*(I_p>I_m)-1
ddj = I_sign * (1- (I_max-I_min)/(C[i,j]-I_min) )
return (ddi, ddj)
def get_top_esinc(C, ds=1, top=None):
'''
find location of highest score using exponential esinc function
following Argyriou & Vlachos 2006
"A study of sub-pixel motion estimation using phase correlation"
:param C: NP.ARRAY (_,_)
similarity surface
:param top: NP.ARRAY (1,2)
location of the maximum score
:return iC: FLOAT
estimated subpixel location on the vertical axis of the peak
:return jC: FLOAT
estimated subpixel location on the horizontal axis of the peak
'''
if top is None: # find highest score
i,j,max_corr,snr = get_integer_peak_location(C)
else:
i, j = top[0], top[1]
if (i==0) | (i!=C.shape[0]) | (j==0) | (j!=C.shape[1]): # top at the border
iC, jC = 0, 0
else:
# estimate sub-pixel per axis
Cj = C[i,j-ds:j+ds+1].ravel()
def funcJ(x):
a, b, c = x
return [(Cj[0] - a*np.exp(-(b*(-1-c))**2)* \
( np.sin(np.pi*(-1-c))/ np.pi*(-1-c)) )**2,
(Cj[1] - a*np.exp(-(b*(+0-c))**2)* \
( np.sin(np.pi*(+0-c))/ np.pi*(+0-c)) )**2,
(Cj[2] - a*np.exp(-(b*(+1-c))**2)* \
( np.sin(np.pi*(+1-c))/ np.pi*(+1-c)) )**2]
jA, jB, jC = fsolve(funcJ, (1.0, 1.0, 0.1))
Ci = C[i-ds:i+ds+1,j].ravel()
def funcI(x):
a, b, c = x
return [(Ci[0] - a*np.exp(-(b*(-1-c))**2)* \
( np.sin(np.pi*(-1-c))/ np.pi*(-1-c)) )**2,
(Ci[1] - a*np.exp(-(b*(+0-c))**2)* \
( np.sin(np.pi*(+0-c))/ np.pi*(+0-c)) )**2,
(Ci[2] - a*np.exp(-(b*(+1-c))**2)* \
( np.sin(np.pi*(+1-c))/ np.pi*(+1-c)) )**2]
iA, iB, iC = fsolve(funcI, (1.0, 1.0, 0.1))
return (iC, jC)
#todo: Nobach_05,
# paraboloid
# phase plane functions
def phase_tpss(Q, W, m, p=1e-4, l=4, j=5, n=3):
"""get phase plane of cross-spectrum through two point step size iteration
find slope of the phase plane through
two point step size for phase correlation minimization
Parameters
----------
following Leprince et al. 2007
:param Q: NP.ARRAY (_,_)
cross spectrum
:param m: NP.ARRAY (1,2)
similarity surface
:param p: FLOAT
closing error threshold
:param l: INTEGER
number of refinements in iteration
:param j: INTEGER
number of sub routines during an estimation
:param n: INTEGER
mask convergence factor
:return m: NP.ARRAY (2,1)
displacement estimate
:return snr: FLOAT
signal-to-noise ratio
Q : np.array, size=(_,_), dtype=complex
cross spectrum
m0 : np.array, size=(2,1)
initial displacement estimate
p : float, default=1e4
closing error threshold
l : integer, default=4
number of refinements in iteration
j : integer, default=5
number of sub routines during an estimation
n : integer, default=3
mask convergence factor
Returns
-------
m : np.array, size=(2,1)
sub-pixel displacement
snr: float
signal-to-noise ratio
See Also
--------
phase_svd, phase_radon, phase_difference
Notes
-----
[1] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images, application
to ground deformation measurements", IEEE Transactions on Geoscience and
Remote Sensing vol. 45.6 pp. 1529-1558, 2007.
"""
(m_Q, n_Q) = Q.shape
fy = 2*np.pi*(np.arange(0,m_Q)-(m_Q/2)) /m_Q
fx = 2*np.pi*(np.arange(0,n_Q)-(n_Q/2)) /n_Q
Fx = np.repeat(fx[np.newaxis,:],m_Q,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n_Q,axis=1)
Fx = np.fft.fftshift(Fx)
Fy = np.fft.fftshift(Fy)
# initialize
m_min = m + np.array([+.1, +.1])
C_min = 1j*-np.sin(Fx*m_min[1] + Fy*m_min[0])
C_min += np.cos(Fx*m_min[1] + Fy*m_min[0])
QC_min = Q-C_min # np.abs(Q-C_min) #Q-C_min np.abs(Q-C_min)
dXY_min = np.multiply(2*W, (QC_min * np.conjugate(QC_min)) )
g_min = np.real(np.array([np.nansum(Fy*dXY_min), \
np.nansum(Fx*dXY_min)]))
print(m)
for i in range(l):
k = 1
while True:
C = 1j*-np.sin(Fx*m[1] + Fy*m[0])
C += np.cos(Fx*m[1] + Fy*m[0])
QC = Q-C # np.abs(Q-C)#np.abs(Q-C)
dXY = 2*W*(QC*np.conjugate(QC))
g = np.real(np.array([np.nansum(np.multiply(Fy,dXY)), \
np.nansum(np.multiply(Fx,dXY))]))
# difference
dm = m - m_min
dg = g - g_min
alpha = np.dot(dm,dg)/np.dot(dg,dg)
#alpha = np.dot(dm,dm)/np.dot(dm,dg)
if (np.all(np.abs(m - m_min)<=p)) or (k>=j):
break
# update
m_min, g_min, dXY_min = np.copy(m), np.copy(g), np.copy(dXY)
m -= alpha*dg
print(m)
k += 1
# optimize weighting matrix
phi = np.abs(QC*np.conjugate(QC))/2
W = W*(1-(dXY/8))**n
snr = 1 - (np.sum(phi)/(4*np.sum(W)))
return (m, snr)
def phase_svd(Q, W, rad=0.1):
"""get phase plane of cross-spectrum through single value decomposition
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
cross spectrum
W : np.array, size=(m,n), dtype=float
weigthing matrix
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_difference
Notes
-----
[1] Hoge, W.S. "A subspace identification extension to the phase
correlation method", IEEE transactions on medical imaging, vol. 22.2
pp. 277-280, 2003.
"""
# filtering through magnitude
# W: M = thresh_masking(S1, m=th, s=ker) th=0.001, ker=10
(m,n) = Q.shape
Q,W = np.fft.fftshift(Q), np.fft.fftshift(W)
# decompose axis
n_elements = 1
u,s,v = np.linalg.svd(W*Q) # singular-value decomposition
sig = np.zeros((m,n))
sig[:m,:m] = np.diag(s)
sig = sig[:,:n_elements] # select first element only
# v = v[:n_elements,:]
# reconstruct
# b = u.dot(sig.dot(v))
t_m = np.transpose(v).dot(sig)
t_n = u.dot(sig)# transform
idx_sub = np.arange(np.ceil((0.5-rad)*len(t_n)), \
np.ceil((0.5+rad)*len(t_n))+1).astype(int)
y_ang = np.unwrap(np.angle(t_n[idx_sub]),axis=0)
A = np.vstack([np.transpose(idx_sub-1), np.ones((len(idx_sub)))]).T
(dx,_,_,_) = np.linalg.lstsq(A, y_ang, rcond=None)
idx_sub = np.arange(np.ceil((0.5-rad)*len(t_m)), \
np.ceil((0.5+rad)*len(t_m))+1).astype(int)
y_ang = np.unwrap(np.angle(t_m[idx_sub]), axis=0)
(dy,_,_,_) = np.linalg.lstsq(A, y_ang, rcond=None)
dj = dx[0]*n / (np.pi)
di = dy[0]*m / (np.pi)
return di, dj
def phase_difference_1d(Q, axis=0):
"""get displacement from phase plane along one axis through differencing
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
normalized cross spectrum
Returns
-------
dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] Kay, S. "A fast and accurate frequency estimator", IEEE transactions on
acoustics, speech and signal processing, vol.37(12) pp. 1987-1990, 1989.
"""
if axis==0:
Q = np.transpose(Q)
m,n = Q.shape
# find coherent data
C = local_coherence(Q, ds=1)
C = np.minimum(C, np.roll(C, (0,1)))
#estimate period
Q_dj = np.roll(Q, (0,1))
Q_diff = np.multiply(np.conj(Q),Q_dj)
Delta_dj = np.angle(Q_diff)/np.pi
IN = C>.9
dj = np.median(Delta_dj[IN])*(m//2)
return dj
def phase_difference(Q):
"""get displacement from phase plane through neighbouring vector difference
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
normalized cross spectrum
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] Kay, S. "A fast and accurate frequency estimator", IEEE transactions on
acoustics, speech and signal processing, vol.37(12) pp. 1987-1990, 1989.
"""
di = phase_difference_1d(Q, axis=0)
dj = phase_difference_1d(Q, axis=1)
return di,dj
def phase_lsq(I, J, Q):
"""get phase plane of cross-spectrum through least squares plane fitting
find slope of the phase plane through
principle component analysis
Parameters
----------
I : np.array, size=(mn,1), dtype=float
vertical coordinate list
J : np.array, size=(mn,1), dtype=float
horizontal coordinate list
Q : np.array, size=(mn,1), dtype=complex
list with cross-spectrum complex values
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_svd, phase_difference, phase_pca
"""
A = np.array([I, J, np.ones_like(I)]).T
M = A.transpose().dot(A)
V = A.transpose().dot(np.angle(Q) / (2*np.pi))
# pseudoinverse:
Mp = np.linalg.inv(M.transpose().dot(M)).dot(M.transpose())
#Least-squares Solution
plane_normal = Mp.dot(V)
di = plane_normal[0]
dj = plane_normal[1]
return di, dj
class BaseModel(object):
def __init__(self):
self.params = None
class PlaneModel(BaseModel):
"""Least squares estimator for phase plane.
Vectors/lines are parameterized using polar coordinates as functional model::
z = x * dx + y * dy
This estimator minimizes the squared distances from all points to the
line, independent of distance::
min{ sum((dist - x_i * cos(theta) + y_i * sin(theta))**2) }
A minimum number of 2 points is required to solve for the parameters.
Attributes
----------
params : tuple
Plane model parameters in the following order `dist`, `theta`.
"""
def estimate(self, data):
"""Estimate plane from data using least squares.
Parameters
----------
data : (N, 3) array
N points with ``(x, y)`` coordinates of vector, respectively.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
if data.shape[0] >= 2: # well determined
x_hat = np.linalg.lstsq(data[:,0:2], data[:,-1], rcond=None)[0]
else: # under-determined
raise ValueError('At least two vectors needed.')
self.params = (x_hat[0], x_hat[1])
return True
def residuals(self, data):
"""Determine residuals of data to model
For each point the shortest distance to the plane is returned.
Parameters
----------
data : (N, 3) array
N points with x, y coordinates and z values, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
x_hat = self.params
Q_hat = data[:,0]*x_hat[0] + data[:,1]*x_hat[1]
residuals = np.abs(data[:,-1] - Q_hat)
return residuals
def predict_xy(self, xy, params=None):
"""Predict vector using the estimated heading.
Parameters
----------
xy : array
x,y-coordinates.
params : (2, ) array, optional
Optional custom parameter set.
Returns
-------
Q_hat : array
Predicted plane height at x,y-coordinates.
"""
if params is None:
params = self.params
x_hat = params
if xy.ndim<2:
Q_hat = xy[0]*x_hat[0] + xy[1]*x_hat[1]
else:
Q_hat = xy[:,0]*x_hat[0] + xy[:,1]*x_hat[1]
return Q_hat
def phase_ransac(Q, precision_threshold=.05):
"""robustly fit plane using RANSAC algorithm
find slope of the phase plane through
random sampling and consensus
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
normalized cross spectrum
Returns
-------
dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] Fischler & Bolles. "Random sample consensus: a paradigm for model
fitting with applications to image analysis and automated cartography"
Communications of the ACM vol.24(6) pp.381-395, 1981.
[2] Tong et al. "A novel subpixel phase correlation method using singular
value decomposition and unified random sample consensus" IEEE transactions
on geoscience and remote sensing vol.53(8) pp.4143-4156, 2015.
"""
(m,n) = Q.shape
fy = np.flip((np.arange(0,m)-(m/2)) /m)
fx = np.flip((np.arange(0,n)-(n/2)) /n)
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
data = np.vstack((Fx.flatten(),
Fy.flatten(),
np.angle(Q).flatten())).T
ransac_model, inliers = ransac(data, PlaneModel,
min_samples=int(2),
residual_threshold=precision_threshold,
max_trials=int(1e3))
IN = np.reshape(inliers, (m,n)) # what data is within error bounds
di = ransac_model.params[0]/(2.*np.pi)
dj = ransac_model.params[1]/(2.*np.pi)
return di, dj
def phase_pca(I, J, Q): # wip
"""get phase plane of cross-spectrum through principle component analysis
find slope of the phase plane through
principle component analysis
Parameters
----------
I : np.array, size=(mn,1), dtype=float
vertical coordinate list
J : np.array, size=(mn,1), dtype=float
horizontal coordinate list
Q : np.array, size=(mn,1), dtype=complex
list with cross-spectrum complex values
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_svd, phase_difference
"""
eigen_vecs, eigen_vals = pca(np.array([I, J, np.angle(Q)/np.pi]).T)
e3 = eigen_vecs[:,np.argmin(eigen_vals)] # normal vector
e3 = eigen_vecs[:,np.argmin(np.abs(eigen_vecs[-1,:]))]
di = np.sign(e3[-1])*e3[1]
dj = np.sign(e3[-1])*e3[0]
return di, dj
def phase_radon(Q): # wip
"""get direction and magnitude from phase plane through Radon transform
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
cross spectrum
Returns
-------
rho,theta : float
magnitude and direction of displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
Notes
-----
[1] Balci & Foroosh. "Subpixel registration directly from the phase
difference" EURASIP journal on advances in signal processing, pp.1-11, 2006.
"""
(m, n) = Q.shape
half = m // 2
Q = np.fft.fftshift(Q)
# estimate direction, through the radon transform
W = np.fft.fftshift(raised_cosine(Q, beta=1e-5)).astype(bool)
Q[~W] = 0 # make circular domain
theta = np.linspace(0., 180., max(m,n), endpoint=False)
R = radon(np.angle(Q), theta) # sinogram
#plt.imshow(R[:half,:]), plt.show()
#plt.imshow(np.flipud(R[half:,:])), plt.show()
R_fold = np.abs(np.multiply(R[:half,:], R[half:,:]))
radon_score = np.sum(R_fold, axis=0)
score_idx = np.argmax(radon_score)
theta = theta[score_idx]
del R_fold, radon_score, score_idx
# rotating coordinate frame, angle difference
# estimate magnitude
# peaks can also be seen
# plt.plot(R[:,score_idx]), plt.show()
return theta
#todo: Gonzalez_10:RANSAC->LDA, Konstantinidis_ PAC, Fienup_82: Gradient-descend, Yousef_05:
# frequency preparation
def perdecomp(img):
"""calculate the periodic and smooth components of an image
Parameters
----------
img : np.array, size=(m,n)
array with intensities
Returns
-------
per : np.array, size=(m,n)
periodic component
cor : np.array, size=(m,n)
smooth component
Notes
-----
[1] Moisan, L. "Periodic plus smooth image decomposition", Journal of
mathematical imaging and vision vol. 39.2 pp. 161-179, 2011.
"""
img = img.astype(float)
if img.ndim==2:
(m, n) = img.shape
per = np.zeros((m, n), dtype=float)
per[+0,:] = +img[0,:] -img[-1,:]
per[-1,:] = -per[0,:]
per[:,+0] = per[:,+0] +img[:,+0] -img[:,-1]
per[:,-1] = per[:,-1] -img[:,+0] +img[:,-1]
elif img.ndim==3:
(m, n, b) = img.shape
per = np.zeros((m, n, b), dtype=float)
per[+0,:,:] = +img[0,:,:] -img[-1,:,:]
per[-1,:,:] = -per[0,:,:]
per[:,+0,:] = per[:,+0,:] +img[:,+0,:] -img[:,-1,:]
per[:,-1,:] = per[:,-1,:] -img[:,+0,:] +img[:,-1,:]
fy = np.cos( 2*np.pi*( np.arange(0,m) )/m )
fx = np.cos( 2*np.pi*( np.arange(0,n) )/n )
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
Fx[0,0] = 0
if img.ndim==3:
Fx = np.repeat(Fx[:,:,np.newaxis], b, axis=2)
Fy = np.repeat(Fy[:,:,np.newaxis], b, axis=2)
cor = np.real( np.fft.ifftn( np.fft.fft2(per) *.5/ (2-Fx-Fy)))
else:
cor = np.real( np.fft.ifft2( np.fft.fft2(per) *.5/ (2-Fx-Fy)))
per = img-cor
return (per, cor)
def normalize_power_spectrum(Q):
Qn = np.divide(Q, abs(Q), out=np.zeros_like(Q), where=Q!=0)
return Qn
# frequency matching filters
def raised_cosine(I, beta=0.35):
""" raised cosine filter
Parameters
----------
img : np.array, size=(m,n)
array with intensities
beta : float, default=0.35
roll-off factor
Returns
-------
W : np.array, size=(m,n), dtype=float
weighting mask
See Also
--------
tpss
Notes
-----
[1] Stone et al. "A fast direct Fourier-based algorithm for subpixel
registration of images." IEEE Transactions on geoscience and remote sensing
vol. 39(10) pp. 2235-2243, 2001.
[2] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images, application
to ground deformation measurements", IEEE Transactions on Geoscience and
Remote Sensing vol. 45.6 pp. 1529-1558, 2007.
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
Hamm = np.cos( (np.pi/(2*beta)) * (R - (.5-beta)))**2
selec = np.logical_and((.5 - beta) <= R , R<=.5)
# compose filter
W = np.zeros((m,n))
W[(.5 - beta) > R] = 1
W[selec] = Hamm[selec]
return W
# def hanning_window
def low_pass_rectancle(I, r=0.50):
""" create hard low-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_circle, low_pass_pyramid, low_pass_bell
Notes
-----
[1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of electronics,
communications and computer sciences, vol.86(8) pp.1925-1934, 2003.
"""
(m, n) = I.shape
fy = 2*np.mod(.5 + np.arange(0,m)/m , 1) -1 # fft shifted coordinate frame
fx = 2*np.mod(.5 + np.arange(0,n)/n , 1) -1
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
# filter formulation
W = np.logical_and(np.abs(Fx)<=r, np.abs(Fy)<=r)
return W
def low_pass_pyramid(I, r=0.50):
""" create low-pass filter with pyramid shape
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the mother rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_rectancle, low_pass_circle, low_pass_bell
Notes
-----
[1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of electronics,
communications and computer sciences, vol.86(8) pp.1925-1934, 2003.
"""
R = low_pass_rectancle(I, r)
W = signal.convolve2d(R.astype(float), R.astype(float), \
mode='same', boundary='wrap')
W = np.fft.fftshift(W/np.max(W))
return W
def low_pass_bell(I, r=0.50):
""" create low-pass filter with a bell shape
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the mother rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_rectancle, low_pass_circle, low_pass_pyramid
Notes
-----
[1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of electronics,
communications and computer sciences, vol.86(8) pp.1925-1934, 2003.
"""
R1 = low_pass_rectancle(I, r)
R2 = low_pass_pyramid(I, r)
W = signal.convolve2d(R1.astype(float), R2.astype(float), \
mode='same', boundary='wrap')
W = np.fft.fftshift(W/np.max(W))
return W
def low_pass_circle(I, r=0.50):
""" create hard low-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the circle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, high_pass_circle
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
W = R<=r
return W
def high_pass_circle(I, r=0.50):
""" create hard high-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the circle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, low_pass_circle
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
W = R>=r
return W
def cosine_bell(I):
""" cosine bell filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
Returns
-------
W : np.array, size=(m,n), dtype=float
weighting mask
See Also
--------
raised_cosine
"""
(m, n) = I.shape
fy = np.mod(.5 + np.arange(0,m)/m , 1) -.5 # fft shifted coordinate frame
fx = np.mod(.5 + np.arange(0,n)/n , 1) -.5
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
W = .5*np.cos(2*R*np.pi) + .5
W[R>.5] = 0
return W
def cross_shading_filter(Q): #, az_1, az_2): # wip
(m,n) = Q.shape
Coh = local_coherence(np.fft.fftshift(Q))
R = np.fft.fftshift(low_pass_circle(Q, r=0.50))
Coh[R==0] = 0
theta = np.linspace(0., 180., max(m,n), endpoint=False)
S = radon(Coh, theta)/m # sinogram
# classify
s = S[m//2,:]
min_idx,max_idx = np.argmin(s), np.argmax(s)
# create circle
x,y = np.sin(np.radians(2*theta)), np.cos(np.radians(2*theta))
coh_circle = np.vstack((x,y,(s+.1)**2)).T
kmeans = KMeans(n_clusters=2, \
init=np.array([coh_circle[min_idx,:],
coh_circle[max_idx,:]]),
n_init=1
).fit(coh_circle)
grouping = kmeans.labels_ #.astype(np.float)
OUT = grouping==grouping[min_idx]
# construct filter
fy = (np.arange(0,m)-(m/2)) /m
fx = (np.arange(0,n)-(n/2)) /n
Fx = np.flip(np.repeat(fx[np.newaxis,:],m,axis=0), axis=1)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
Theta = np.round(np.degrees(np.arctan2(Fx,Fy) % np.pi)/360 *m) *360 /m
W = np.isin(Theta, theta[~OUT])
return W
# cross-spectral and frequency signal metrics for filtering
def thresh_masking(S, m=1e-4, s=10):
""" mask significant intensities in spectrum
Parameters
----------
S : np.array, size=(m,n), dtype=complex
array with spectrum, i.e.: S = np.fft.fft2(I)
m : float, default=1e-3
cut-off intensity in respect to maximum
s : integer, default=10
kernel size of the median filter
Returns
-------
M : np.array, size=(m,n), dtype=bool
frequency mask
See Also
--------
tpss
Notes
-----
[1] Stone et al. "A fast direct Fourier-based algorithm for subpixel
registration of images." IEEE Transactions on geoscience and remote sensing
vol. 39(10) pp. 2235-2243, 2001.
[2] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images, application
to ground deformation measurements", IEEE Transactions on Geoscience and
Remote Sensing vol. 45.6 pp. 1529-1558, 2007.
"""
Sbar = np.abs(S)
th = np.max(Sbar)*m
# compose filter
M = Sbar>th
M = ndimage.median_filter(M, size=(s,s))
return M
def local_coherence(Q, ds=1):
""" estimate the local coherence of a spectrum
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
array with cross-spectrum
ds : integer, default=10
kernel radius to describe the neighborhood
Returns
-------
M : np.array, size=(m,n), dtype=float
vector coherence from no to ideal, i.e.: 0...1
See Also
--------
thresh_masking
"""
diam = 2*ds+1
C = np.zeros_like(Q)
(isteps,jsteps) = np.meshgrid(np.linspace(-ds,+ds,2*ds+1, dtype=int), \
np.linspace(-ds,+ds,2*ds+1, dtype=int))
IN = np.ones(diam**2, dtype=bool)
IN[diam**2//2] = False
isteps,jsteps = isteps.flatten()[IN], jsteps.flatten()[IN]
for idx, istep in enumerate(isteps):
jstep = jsteps[idx]
Q_step = np.roll(Q, (istep,jstep))
# if the spectrum is normalized, then no division is needed
C += Q*np.conj(Q_step)
C = np.abs(C)/np.sum(IN)
return C
# frequency/spectrum matching functions
def create_complex_DCT(I, Cc, Cs):
Ccc, Css = Cc*I*Cc.T, Cs*I*Cs.T
Csc, Ccs = Cs*I*Cc.T, Cc*I*Cs.T
C_dct = Ccc-Css + 1j*(-(Ccs+Csc))
return C_dct
def get_cosine_matrix(I,N=None):
(L,_) = I.shape
if N==None:
N = np.copy(L)
C = np.zeros((L,L))
for k in range(L):
for n in range(N):
if k == 0:
C[k,n] = np.sqrt(1/L)
else:
C[k,n] = np.sqrt(2/L)*np.cos((np.pi*k*(1/2+n))/L)
return(C)
def get_sine_matrix(I,N=None):
(L,_) = I.shape
if N==None:
# make a square matrix
N = np.copy(L)
C = np.zeros((L,L))
for k in range(L):
for n in range(N):
if k == 0:
C[k,n] = np.sqrt(1/L)
else:
C[k,n] = np.sqrt(2/L)*np.sin((np.pi*k*(1/2+n))/L)
return(C)
def cosi_corr(I1, I2, beta1=.35, beta2=.50, m=1e-4):
mt,nt = I1.shape[0], I1.shape[1] # dimensions of the template
W1 = raised_cosine(np.zeros((mt,nt)), beta1)
W2 = raised_cosine(np.zeros((mt,nt)), beta2)
if I1.size==I2.size: # if templates are same size, no refinement is done
tries = [0]
else:
tries = [0, 1]
di,dj, m0 = 0,0,np.array([0, 0])
for trying in tries: # implement refinement step to have more overlap
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = reposition_templates_from_center(I1,I2,di,dj)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (W1*S1)*np.conj((W2*S2))
else:
Q_b = (W1*S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = reposition_templates_from_center(I1,I2,di,dj)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (W1*S1)*np.conj((W2*S2))
# transform back to spatial domain
C = np.real(np.fft.fftshift(np.fft.ifft2(Q)))
ddi, ddj,_,_ = get_integer_peak_location(C)
m_int = np.round(np.array([ddi, ddj])).astype(int)
if np.amax(abs(np.array([ddi, ddj])))<.5:
break
else:
di,dj = m_int[0], m_int[1]
m0[0] += di
m0[1] += dj
WS = thresh_masking(S1, m)
Qn = normalize_power_spectrum(Q)
return Qn, WS, m0
def cosine_corr(I1, I2): # wip
""" match two imagery through discrete cosine transformation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
create_complex_DCT, sign_only_corr
Notes
-----
[1] Lie, et.al. "DCT-based phase correlation motion estimation",
IEEE international conference on image processing, vol. 1, 2004.
"""
# construct cosine and sine basis matrices
Cc, Cs = get_cosine_matrix(I1), get_sine_matrix(I1)
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
C1 = create_complex_DCT(I1bnd, Cc, Cs)
C2 = create_complex_DCT(I2bnd, Cc, Cs)
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
Q = (C1)*np.conj(C2)
return Q
def masked_cosine_corr(I1, I2, M1, M2): # wip
'''
work in progress
'''
M1, M2 = M1.astype(dtype=bool), M2.astype(dtype=bool)
# construct cosine and sine basis matrices
Cc, Cs = get_cosine_matrix(I1), get_sine_matrix(I1)
# look at how many frequencies can be estimated with this data
(m,n) = M1.shape
X1 = np.ones((m,n), dtype=bool)
min_span = int(np.floor(np.sqrt(min(np.sum(M1), np.sum(M2)))))
X1[min_span:,:] = False
X1[:,min_span:] = False
y = (I1[M1].astype(dtype=float)/255)-.5
# build matrix
Ccc = np.kron(Cc,Cc)
# shrink size
Ccc = Ccc[M1.flatten(),:] # remove rows, as these are missing
Ccc = Ccc[:,X1.flatten()] # remove collumns, since these can't be estimated
Icc = np.linalg.lstsq(Ccc, y, rcond=None)[0]
Icc = np.reshape(Icc, (min_span, min_span))
iCC = Ccc.T*y
np.reshape(Ccc.T*y, (min_span, min_span))
if I1.ndim==3: # multi-spectral frequency stacking
(mt,nt,bt) = I1.shape
(ms,ns,bs) = I2.shape
md, nd = np.round((ms-mt)/2).astype(int), np.round((ns-nt)/2).astype(int)
for i in range(bt): # loop through all bands
I1sub = I1[:,:,i]
I2sub = I2[md:-md, nd:-nd,i]
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
Q = (C1)*np.conj(C2)
return Q
def phase_only_corr(I1, I2):
""" match two imagery through phase only correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
phase_corr, symmetric_phase_corr, amplitude_comp_corr
Notes
-----
[1] Horner & Gianino, "Phase-only matched filtering", Applied optics,
vol. 23(6) pp.812--816, 1984.
[2] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W2 = np.divide(1, np.abs(I2bnd),
out=np.zeros_like(I2bnd), where=np.abs(I2bnd)!=0)
if i == 0:
Q = (S1)*np.conj((W2*S2))
else:
Q_b = (S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divide(1, np.abs(I2sub),
out=np.zeros_like(I2sub), where=np.abs(I2sub)!=0)
Q = (S1)*np.conj((W2*S2))
return Q
def sign_only_corr(I1, I2): # to do
""" match two imagery through phase only correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
C : np.array, size=(m,n), real
displacement surface
See Also
--------
cosine_corr
Notes
-----
[1] Ito & Kiya, "DCT sign-only correlation with application to image
matching and the relationship with phase-only correlation",
IEEE international conference on acoustics, speech and signal
processing, vol. 1, 2007.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
C1, C2 = np.sign(fft.dctn(I1bnd, 2)), np.sign(fft.dctn(I2bnd, 2))
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1, C2 = np.sign(fft.dctn(I1sub, 2)), np.sign(fft.dctn(I2sub, 2))
Q = (C1)*np.conj(C2)
C = fft.idctn(Q,2)
return C
def symmetric_phase_corr(I1, I2):
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divided(1, np.sqrt(abs(I1sub))*np.sqrt(abs(I2sub)) )
if i == 0:
Q = (S1)*np.conj((W2*S2))
else:
Q_b = (S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divide(1, np.sqrt(abs(I1sub))*np.sqrt(abs(I2sub)) )
Q = (S1)*np.conj((W2*S2))
return Q
def amplitude_comp_corr(I1, I2, F_0=0.04):
""" match two imagery through amplitude compensated phase correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
F_0 : float, default=4e-2
cut-off intensity in respect to maximum
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
Notes
-----
[1] Mu et al. "Amplitude-compensated matched filtering", Applied optics,
vol. 27(16) pp. 3461-3463, 1988.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
s_0 = F_0 * np.amax(abs(S2))
W = np.divide(1, abs(I2sub) )
A = np.divide(s_0, abs(I2sub)**2)
W[abs(S2)>s_0] = A
if i == 0:
Q = (S1)*np.conj((W*S2))
else:
Q_b = (S1)*np.conj((W*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
s_0 = F_0 * np.amax(abs(S2))
W = np.divide(1, abs(I2sub) )
A = np.divide(s_0, abs(I2sub)**2)
W[abs(S2)>s_0] = A[abs(S2)>s_0]
Q = (S1)*np.conj((W*S2))
return Q
def robust_corr(I1, I2):
""" match two imagery through fast robust correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
Notes
-----
[1] Fitch et al. "Fast robust correlation", IEEE transactions on image
processing vol. 14(8) pp. 1063-1073, 2005.
[2] Essannouni et al. "Adjustable SAD matching algorithm using frequency
domain" Journal of real-time image processing, vol.1 pp.257-265
"""
I1sub,I2sub = make_templates_same_size(I1,I2)
p_steps = 10**np.arange(0,1,.5)
for idx, p in enumerate(p_steps):
I1p = 1/p**(1/3) * np.exp(1j*(2*p -1)*I1sub)
I2p = 1/p**(1/3) * np.exp(1j*(2*p -1)*I2sub)
S1p, S2p = np.fft.fft2(I1p), np.fft.fft2(I2p)
if idx==0:
Q = (S1p)*np.conj(S2p)
else:
Q += (S1p)*np.conj(S2p)
return Q
def orientation_corr(I1, I2):
""" match two imagery through orientation correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
phase_corr, windrose_corr
Notes
-----
[1] Fitch et al. "Orientation correlation", Proceeding of the Britisch
machine vison conference, pp. 1--10, 2002.
[2] Heid & Kääb. "Evaluation of existing image matching methods for
deriving glacier surface displacements globally from optical satellite
imagery", Remote sensing of environment, vol. 118 pp. 339-355, 2012.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
S1,S2 = normalize_power_spectrum(S1),normalize_power_spectrum(S2)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
S1,S2 = normalize_power_spectrum(S1),normalize_power_spectrum(S2)
Q = (S1)*np.conj(S2)
return Q
def windrose_corr(I1, I2):
""" match two imagery through windrose phase correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
orientation_corr, phase_only_corr
Notes
-----
[1] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.sign(np.fft.fft2(I1sub)), np.sign(np.fft.fft2(I2sub))
Q = (S1)*np.conj(S2)
return Q
def phase_corr(I1, I2):
""" match two imagery through phase correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
orientation_corr, cross_corr
Notes
-----
[1] Kuglin & Hines. "The phase correlation image alignment method",
proceedings of the IEEE international conference on cybernetics and
society, pp. 163-165, 1975.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
Q = np.divide(Q, abs(Q))
else:
Q_b = (S1)*np.conj(S2)
Q_b = np.divide(Q_b, abs(Q))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
Q = np.divide(Q, abs(Q))
return Q
def cross_corr(I1, I2):
""" match two imagery through cross correlation in FFT
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
phase_corr
Notes
-----
[1] Heid & Kääb. "Evaluation of existing image matching methods for
deriving glacier surface displacements globally from optical satellite
imagery", Remote sensing of environment, vol. 118 pp. 339-355, 2012.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
return Q
def binary_orientation_corr(I1, I2):
""" match two imagery through binary phase only correlation
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
Returns
-------
Q : np.array, size=(m,n)
cross-spectrum
See Also
--------
orientation_corr, phase_only_corr
Notes
-----
[1] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
"""
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W = np.sign(np.real(S2))
if i == 0:
Q = (S1)*np.conj(W*S2)
else:
Q_b = (S1)*np.conj(W*S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W = np.sign(np.real(S2))
Q = (S1)*np.conj(W*S2)
return Q
def masked_corr(I1, I2, M1, M2):
""" match two imagery through masked normalized cross-correlation in FFT
Parameters
----------
I1 : np.array, size=(m,n)
array with intensities
I2 : np.array, size=(m,n)
array with intensities
M1 : np.array, size=(m,n)
array with mask
M2 : np.array, size=(m,n)
array with mask
Returns
-------
NCC : np.array, size=(m,n)
correlation surface
Notes
-----
[1] Padfield. "Masked object registration in the Fourier domain",
IEEE transactions on image processing, vol. 21(5) pp. 2706-2718, 2011.
"""
I1sub,I2sub = make_templates_same_size(I1,I2)
M1sub,M2sub = make_templates_same_size(M1,M2)
I1f, I2f = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
M1f, M2f = np.fft.fft2(M1sub), np.fft.fft2(M2sub)
fF1F2 = np.fft.ifft2( I1f*np.conj(I2f) )
fM1M2 = np.fft.ifft2( M1f*np.conj(M2f) )
fM1F2 = np.fft.ifft2( M1f*np.conj(I2f) )
fF1M2 = np.fft.ifft2( I1f*np.conj(M2f) )
ff1M2 = np.fft.ifft2( np.fft.fft2(I1sub**2)*np.conj(M2f) )
fM1f2 = np.fft.ifft2( M1f*np.fft.fft2( np.flipud(I2sub**2) ) )
NCC_num = fF1F2 - \
(np.divide(
np.multiply( fF1M2, fM1F2 ), fM1M2 ))
NCC_den = np.multiply( \
np.sqrt(ff1M2 - np.divide( fF1M2**2, fM1M2) ),
np.sqrt(fM1f2 - np.divide( fM1F2**2, fM1M2) ))
NCC = np.divide(NCC_num, NCC_den)
return NCC
# binary transform functions
def affine_binairy_registration(B1, B2):
# preparation
pT = np.sum(B1) # Lebesgue integral
pO = np.sum(B2)
Jac = pO/pT # Jacobian
x = np.linspace(0,B1.shape[1]-1,B1.shape[1])
y = np.linspace(0,B1.shape[0]-1,B1.shape[0])
X1, Y1 = np.meshgrid(x,y)
del x, y
# calculating moments of the template
x11 = Jac* np.sum(X1 * B1)
x12 = Jac* np.sum(X1**2 * B1)
x13 = Jac* np.sum(X1**3 * B1)
x21 = Jac* np.sum(Y1 * B1)
x22 = Jac* np.sum(Y1**2 * B1)
x23 = Jac* np.sum(Y1**3 * B1)
del X1, Y1
x = np.linspace(0,B2.shape[1]-1,B2.shape[1])
y = np.linspace(0,B2.shape[0]-1,B2.shape[0])
X2, Y2 = np.meshgrid(x,y)
del x, y
# calculating moments of the observation
y1 = np.sum(X2 * B2)
y12 = np.sum(X2**2 * B2)
y13 = np.sum(X2**3 * B2)
y12y2= np.sum(X2**2*Y2 * B2)
y2 = np.sum(Y2 * B2)
y22 = np.sum(Y2**2 * B2)
y23 = np.sum(Y2**3 * B2)
y1y22= np.sum(X2*Y2**2 * B2)
y1y2 = np.sum(X2*Y2 * B2)
del X2, Y2
# estimation
mu = pO
def func1(x):
q11, q12, q13 = x
return [mu*q11 + y1*q12 + y2*q13 - x11,
mu*q11**2 + y12*q12**2 + y22*q13**2 + 2*y1*q11*q12 + \
2*y2*q11*q13 + 2*y1y2*q12*q13 - x12,
mu*q11**3 + y13*q12**3 + y23*q13**3 + 3*y1*q11**2*q12 + \
3*y2*q11**2*q13 + 3*y12*q12**2*q11 + 3*y12y2*q12**2*q13 + \
3*y22*q11*q13**2 + 3*y1y22*q12*q13**2 + \
6*y1y2*q11*q12*q13 - x13]
Q11, Q12, Q13 = fsolve(func1, (1.0, 1.0, 1.0))
# test for complex solutions, which should be excluded
def func2(x):
q21, q22, q23 = x
return [mu*q21 + y1*q22 + y2*q23 - x21,
mu*q21**2 + y12*q22**2 + y22*q23**2 + 2*y1*q21*q22 + \
2*y2*q21*q23 + 2*y1y2*q22*q23 - x22,
mu*q21**3 + y13*q22**3 + y23*q23**3 + 3*y1*q21**2*q22 + \
3*y2*q21**2*q23 + 3*y12*q22**2*q21 + 3*y12y2*q22**2*q23 + \
3*y22*q21*q23**2 + 3*y1y22*q22*q23**2 + \
6*y1y2*q21*q22*q23 - x23]
Q21, Q22, Q23 = fsolve(func2, (1.0, 1.0, 1.0))
# test for complex solutions, which should be excluded
Q = np.array([[Q12, Q13, Q11], [Q22, Q23, Q21]])
return Q
# boundary describtors
def get_relative_group_distances(x, K=5):
for i in range(1,K+1):
if i ==1:
x_minus = np.expand_dims(np.roll(x, +i), axis= 1)
x_plus = np.expand_dims(np.roll(x, -i), axis= 1)
else:
x_new = np.expand_dims(np.roll(x, +i), axis=1)
x_minus = np.concatenate((x_minus, x_new), axis=1)
x_new = np.expand_dims(np.roll(x, -i), axis=1)
x_plus = np.concatenate((x_plus, x_new), axis=1)
del x_new
dx_minus = x_minus - np.repeat(np.expand_dims(x, axis=1), K, axis=1)
dx_plus = x_plus - np.repeat(np.expand_dims(x, axis=1), K, axis=1)
return dx_minus, dx_plus
def get_relative_distances(x, x_id, K=5):
# minus
start_idx = x_id-K
ending_idx = x_id
ids = np.arange(start_idx,ending_idx)
x_min = x[ids % len(x)]
# plus
start_idx = x_id
ending_idx = x_id + K
ids = np.arange(start_idx,ending_idx)
x_plu = x[ids % len(x)]
dx_minus = x_min - np.repeat(x[x_id], K)
dx_plus = x_plu - np.repeat(x[x_id], K)
return dx_minus, dx_plus
def beam_angle_statistics(x, y, K=5, xy_id=None):
"""
implements beam angular statistics (BAS)
input:
output:
see Arica & Vural, 2003
BAS: a perceptual shape descriptoy based on the beam angle statistics
Pattern Recognition Letters 24: 1627-1639
debug:
x = np.random.randint(20, size=12)-10
y = np.random.randint(20, size=12)-10
"""
if xy_id is None: # make descriptors for all coordinate
dx_minus, dx_plus = get_relative_group_distances(x, K)
dy_minus, dy_plus = get_relative_group_distances(y, K)
ax = 1
else: # make descriptor for single coordinate
dx_minus, dx_plus = get_relative_distances(x, xy_id, K)
dy_minus, dy_plus = get_relative_distances(y, xy_id, K)
ax = 0
# dot product instead of argument
C_minus = np.arctan2(dy_minus, dx_minus)
C_plus = np.arctan2(dy_plus, dx_plus)
C = C_minus-C_plus
C_1, C_2 = np.mean(C, axis=ax), np.std(C, axis=ax) # estimate moments
BAS = np.concatenate((np.expand_dims(C_1, axis=ax),
np.expand_dims(C_2, axis=ax)), axis=ax)
return BAS
def cast_angle_neighbours(x, y, sun, K=5, xy_id=None):
'''
debug:
x = np.random.randint(20, size=12)-10
y = np.random.randint(20, size=12)-10
sun = sun/np.sqrt(np.sum(np.square(sun)))
'''
if xy_id is None: # make descriptors for all coordinate
dx_minus, dx_plus = get_relative_group_distances(x, K)
dy_minus, dy_plus = get_relative_group_distances(y, K)
ax = 1
else: # make descriptor for single coordinate
dx_minus, dx_plus = get_relative_distances(x, xy_id, K)
dy_minus, dy_plus = get_relative_distances(y, xy_id, K)
ax = 0
# rotate towards the sun
CAN = np.concatenate((np.arctan2(sun[0]*dx_minus + sun[1]*dy_minus,
-sun[1]*dx_minus + sun[0]*dy_minus),
np.arctan2(sun[0]*dx_plus + sun[1]*dy_plus,
-sun[1]*dx_plus + sun[0]*dy_plus)), axis=ax)
return CAN
def neighbouring_cast_distances(x, y, sun, K=5, xy_id=None):
'''
debug:
x = np.random.randint(20, size=12)-10
y = np.random.randint(20, size=12)-10
sun = sun/np.sqrt(np.sum(np.square(sun)))
'''
if xy_id is None: # make descriptors for all coordinate
dx_minus, dx_plus = get_relative_group_distances(x, K)
dy_minus, dy_plus = get_relative_group_distances(y, K)
ax = 1
else: # make descriptor for single coordinate
dx_minus, dx_plus = get_relative_distances(x, xy_id, K)
dy_minus, dy_plus = get_relative_distances(y, xy_id, K)
ax = 0
# rotate towards the sun and take only one axes
CD = np.concatenate((sun[0]*dx_minus + sun[1]*dy_minus,
sun[0]*dx_plus + sun[1]*dy_plus), axis=ax)
return CD
# supporting functions
def make_templates_same_size(I1,I2):
mt,nt = I1.shape[0],I1.shape[1] # dimenstion of the template
ms,ns = I2.shape[0],I2.shape[1] # dimension of the search space
assert ms>=mt # search domain should be of equal size or bigger
assert ns>=nt
assert I1.ndim==I2.ndim # should be the same dimension
md, nd = (ms-mt)//2, (ns-nt)//2
if md==0 | nd==0: # I2[+0:-0, ... does not seem to work
I2sub = I2
else:
if I1.ndim==3:
I2sub = I2[+md:-md, +nd:-nd, :]
else:
I2sub = I2[+md:-md, +nd:-nd]
return I1, I2sub
def test_bounds_reposition(d, temp_size, search_size):
"""
See Also
--------
reposition_templates_from_center
"""
space_bound = (search_size-temp_size) // 2
if abs(d) > space_bound:
warnings.warn("part of the template will be out of the image" +
"with this displacement estimate")
reposition_dir = np.sign(d)
d = reposition_dir * np.minimum(abs(d), space_bound)
return d
def reposition_templates_from_center(I1,I2,di,dj):
mt,nt = I1.shape[0],I1.shape[1] # dimenstion of the template
ms,ns = I2.shape[0],I2.shape[1] # dimension of the search space
di,dj = int(di),int(dj)
di,dj = test_bounds_reposition(di,mt,ms), test_bounds_reposition(dj,nt,ns)
assert ms>=mt # search domain should be of equal size or bigger
assert ns>=nt
assert I1.ndim==I2.ndim # should be the same dimension
mc,nc = ms//2, ns//2 # center location
if I1.ndim==3:
I2sub = I2[mc-(mt//2)-di : mc+(mt//2)-di, \
nc-(nt//2)-dj : nc+(nt//2)-dj, :]
else:
I2sub = I2[mc-(mt//2)-di : mc+(mt//2)-di, \
nc-(nt//2)-dj : nc+(nt//2)-dj]
return I1, I2sub
def get_coordinates_of_template_centers(grid, temp_size):
"""
When tiling an array into small templates, this function
gives the locations of the centers.
input: grid array (n x m) array with data values
temp_size integer size of the kernel in pixels
output: Iidx array (k x l) array with row coordinates
Jidx array (k x l) array with collumn coordinates
"""
radius = np.floor(temp_size / 2).astype('int')
Iidx = np.arange(radius, grid.shape[0] - radius, temp_size)
Jidx = np.arange(radius, grid.shape[1] - radius, temp_size)
# FN ###################################
# are the following lines equivalent to:
# Iidx, Jidx = np.meshgrid(Iidx, Jidx)
# It looks like, but Iidx and Jidx are switched!
IidxNew = np.repeat(np.transpose([Iidx]), len(Jidx), axis=1)
Jidx = np.repeat([Jidx], len(Iidx), axis=0)
Iidx = IidxNew
return Iidx, Jidx
def get_grid_at_template_centers(grid, temp_size):
"""
When tiling an array into small templates, this function
gives the value of the pixel in its center.
input: grid array (n x m) array with data values
temp_size integer size of the kernel in pixels
output: gridnew array (k x l) data value of the pixel in the
kernels center
"""
(Iidx, Jidx) = get_coordinates_of_template_centers(grid, temp_size)
return grid[Iidx, Jidx]
|
{"hexsha": "0ab9fffce83fff3a990620bd09f7c25790955385", "size": 79340, "ext": "py", "lang": "Python", "max_stars_repo_path": "matching_tools.py", "max_stars_repo_name": "GO-Eratosthenes/start-code", "max_stars_repo_head_hexsha": "d40192a482a260676db9ec9b3ece6854c0d8ccf7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-09T14:45:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T14:45:05.000Z", "max_issues_repo_path": "matching_tools.py", "max_issues_repo_name": "GO-Eratosthenes/start-code", "max_issues_repo_head_hexsha": "d40192a482a260676db9ec9b3ece6854c0d8ccf7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-02-22T13:42:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T22:07:24.000Z", "max_forks_repo_path": "matching_tools.py", "max_forks_repo_name": "GO-Eratosthenes/start-code", "max_forks_repo_head_hexsha": "d40192a482a260676db9ec9b3ece6854c0d8ccf7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5843949045, "max_line_length": 93, "alphanum_fraction": 0.5353793799, "include": true, "reason": "import numpy,from scipy", "num_tokens": 23685}
|
import numpy
try:
import cupy
import cupyx
import cupy.cuda
from cupy.cuda.compiler import compile_with_cache # noqa: F401
has_cupy = True
# We no longer have to set up the memory pool, fortunately.
except ImportError:
cupy = None
cupyx = None
has_cupy = False
from .ops import Ops
from .numpy_ops import NumpyOps
from . import _custom_kernels
from ..util import get_array_module
from ..types import DeviceTypes
class CupyOps(Ops):
name = "cupy"
xp = cupy
_xp2 = cupyx
def __init__(
self, device_type: DeviceTypes = "gpu", device_id: int = 0, **kwargs
) -> None:
self.device_type = device_type
self.device_id = device_id
def to_numpy(self, data):
if isinstance(data, numpy.ndarray):
return data
else:
return data.get()
def gemm(self, x, y, out=None, trans1=False, trans2=False):
if isinstance(x, numpy.ndarray) or isinstance(y, numpy.ndarray):
raise ValueError(
"Encountered a numpy array when processing with cupy. "
"Did you call model.ops.asarray on your data?"
)
if trans1:
x = x.T
if trans2:
y = y.T
if out is None:
return self.xp.dot(x, y)
else:
self.xp.dot(x, y, out=out)
return out
def asarray(self, data, dtype=None):
# This is sort of frustrating, but we can't easily otherwise pass
# forward "unset".
dtype = {"dtype": dtype} if dtype is not None else {}
if isinstance(data, cupy.ndarray):
return self.xp.asarray(data, **dtype)
elif hasattr(data, "data_ptr"):
# Handles PyTorch Tensors
pointer = cupy.cuda.MemoryPointer(data.data_ptr())
shape = data.stride()
array = self.xp.ndarray(shape, memptr=pointer, **dtype)
return array
else:
result = self.xp.array(data, **dtype)
return result
def maxout(self, X):
return _custom_kernels.maxout(X)
def backprop_maxout(self, dY, which, P):
return _custom_kernels.backprop_maxout(dY, which, P)
def relu(self, X, inplace=False):
if not inplace:
return X * (X > 0)
else:
X *= X > 0
return X
def backprop_relu(self, dY, Y, inplace=False):
if not inplace:
return dY * (Y > 0)
dY *= Y > 0
return dY
def mish(self, X, threshold=20.0):
return _custom_kernels.mish(X, threshold=threshold, out=None)
def backprop_mish(self, dY, X, threshold=20.0, out=None):
return _custom_kernels.backprop_mish(dY, X, threshold=threshold, out=out)
def clip_gradient(self, gradient, threshold):
xp = get_array_module(gradient)
grad_norm = xp.linalg.norm(gradient)
if grad_norm >= threshold:
gradient *= threshold / grad_norm
return gradient
def seq2col(self, seq, nW):
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1)) sequence.
The new sequence is constructed by concatenating nW preceding and succeeding
vectors onto each column in the sequence, to extract a window of features.
"""
return _custom_kernels.seq2col(seq, nW)
def backprop_seq2col(self, dY, nW):
return _custom_kernels.backprop_seq2col(dY, nW)
def reduce_mean(self, X, lengths):
return _custom_kernels.reduce_mean(X, lengths)
def backprop_reduce_mean(self, d_means, lengths):
return _custom_kernels.backprop_reduce_mean(d_means, lengths)
def reduce_max(self, X, lengths):
return _custom_kernels.reduce_max(X, lengths)
def backprop_reduce_max(self, d_maxes, which, lengths):
return _custom_kernels.backprop_reduce_max(d_maxes, which, lengths)
def reduce_sum(self, X, lengths):
return _custom_kernels.reduce_sum(X, lengths)
def backprop_reduce_sum(self, d_sums, lengths):
return _custom_kernels.backprop_reduce_sum(d_sums, lengths)
def hash(self, ids, seed):
return _custom_kernels.hash(ids, seed)
def scatter_add(self, table, indices, values):
self._xp2.scatter_add(table, indices, values)
def adam(
self, weights, gradient, mom1, mom2, beta1, beta2, eps, learn_rate, mod_rate=1.0
):
cupy.ElementwiseKernel(
"T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps",
"T param, T m, T v",
"""m += one_minus_beta1 * (grad - m);
v += one_minus_beta2 * (grad * grad - v);
param -= lr * m / (sqrt(v) + eps);""",
"adam",
)(gradient, learn_rate, 1 - beta1, 1 - beta2, eps, weights, mom1, mom2)
gradient.fill(0)
return weights, gradient, mom1, mom2
def position_encode(self, N, D, period=10000, out=None):
positions = NumpyOps().position_encode(N, D, period=period, out=out)
return self.asarray(positions)
|
{"hexsha": "5a5ca32576695c900a9a894683366b37f63a06c3", "size": 5057, "ext": "py", "lang": "Python", "max_stars_repo_path": "thinc/backends/cupy_ops.py", "max_stars_repo_name": "justindujardin/thinc", "max_stars_repo_head_hexsha": "b3d641c4d42e430fe9917ded7cb2892256f9b7e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thinc/backends/cupy_ops.py", "max_issues_repo_name": "justindujardin/thinc", "max_issues_repo_head_hexsha": "b3d641c4d42e430fe9917ded7cb2892256f9b7e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thinc/backends/cupy_ops.py", "max_forks_repo_name": "justindujardin/thinc", "max_forks_repo_head_hexsha": "b3d641c4d42e430fe9917ded7cb2892256f9b7e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4166666667, "max_line_length": 88, "alphanum_fraction": 0.6102432272, "include": true, "reason": "import numpy,import cupy,from cupy", "num_tokens": 1293}
|
// Copyright (c) 2013, Thomas Goyne <plorkyeran@aegisub.org>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
// Aegisub Project http://www.aegisub.org/
#include "../config.h"
#include "libaegisub/karaoke_matcher.h"
#include "libaegisub/kana_table.h"
#include "libaegisub/util.h"
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/locale/boundary.hpp>
#include <boost/locale/collator.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <unicode/uchar.h>
#include <unicode/utf8.h>
namespace {
int32_t next_codepoint(const char *str, size_t *i) {
UChar32 c;
U8_NEXT_UNSAFE(str, *i, c);
return c;
}
bool is_whitespace(int32_t c) {
return !!u_isUWhiteSpace(c);
}
bool is_whitespace(std::string const& str) {
size_t i = 0;
while (auto c = next_codepoint(str.c_str(), &i)) {
if (!u_isUWhiteSpace(c))
return false;
}
return true;
}
// strcmp but ignoring case and accents
int compare(std::string const& a, std::string const& b) {
using namespace boost::locale;
return std::use_facet<collator<char>>(std::locale()).compare(collator_base::primary, a, b);
}
}
namespace agi {
karaoke_match_result auto_match_karaoke(std::vector<std::string> const& source_strings, std::string const& dest_string) {
karaoke_match_result result = { 0, 0 };
if (source_strings.empty()) return result;
using namespace boost::locale::boundary;
using boost::starts_with;
result.source_length = 1;
ssegment_index destination_characters(character, begin(dest_string), end(dest_string));
auto src = boost::to_lower_copy(source_strings[0]);
auto dst = destination_characters.begin();
auto dst_end = destination_characters.end();
// Eat all the whitespace at the beginning of the source and destination
// syllables and exit if either ran out.
auto eat_whitespace = [&]() -> bool {
size_t i = 0, first_non_whitespace = 0;
while (is_whitespace(next_codepoint(src.c_str(), &i)))
first_non_whitespace = i;
if (first_non_whitespace)
src = src.substr(first_non_whitespace);
while (dst != dst_end && is_whitespace(dst->str())) {
++dst;
++result.destination_length;
}
// If we ran out of dest then this needs to match the rest of the
// source syllables (this probably means the user did something wrong)
if (dst == dst_end) {
result.source_length = source_strings.size();
return true;
}
return src.empty();
};
if (eat_whitespace()) return result;
// We now have a non-whitespace character at the beginning of both source
// and destination. Check if the source starts with a romanized kana, and
// if it does then check if the destination also has the appropriate
// character. If it does, match them and repeat.
while (!src.empty()) {
// First check for a basic match of the first character of the source and dest
auto first_src_char = ssegment_index(character, begin(src), end(src)).begin()->str();
if (compare(first_src_char, dst->str()) == 0) {
++dst;
++result.destination_length;
src.erase(0, first_src_char.size());
if (eat_whitespace()) return result;
continue;
}
auto check = [&](kana_pair const& kp) -> bool {
if (!starts_with(&*dst->begin(), kp.kana)) return false;
src = src.substr(strlen(kp.romaji));
for (size_t i = 0; kp.kana[i]; ) {
i += dst->length();
++result.destination_length;
++dst;
}
return true;
};
bool matched = false;
for (auto const& match : romaji_to_kana(src)) {
if (check(match)) {
if (eat_whitespace()) return result;
matched = true;
break;
}
}
if (!matched) break;
}
// Source and dest are now non-empty and start with non-whitespace.
// If there's only one character left in the dest, it obviously needs to
// match all of the source syllables left.
if (std::distance(dst, dst_end) == 1) {
result.source_length = source_strings.size();
++result.destination_length;
return result;
}
// We couldn't match the current character, but if we can match the *next*
// syllable then we know that everything in between must belong to the
// current syllable. Do this by looking up to KANA_SEARCH_DISTANCE
// characters ahead in destination and seeing if we can match them against
// the beginning of a syllable after this syllable.
// If a match is found, make a guess at how much source and destination
// should be selected based on the distances it was found at.
// The longest kanji are 'uketamawa.ru' and 'kokorozashi', each with a
// reading consisting of five kana. This means each each character from
// the destination can match at most five syllables from the source.
static const int max_character_length = 5;
// Arbitrarily chosen limit on the number of dest characters to try
// skipping. Higher numbers probably increase false-positives.
static const int dst_lookahead_max = 3;
for (size_t lookahead = 0; lookahead < dst_lookahead_max; ++lookahead) {
if (++dst == dst_end) break;
// Transliterate this character if it's a known hiragana or katakana character
std::vector<const char *> translit;
auto next = std::next(dst);
if (next != dst_end)
boost::copy(kana_to_romaji(dst->str() + next->str()), back_inserter(translit));
boost::copy(kana_to_romaji(dst->str()), back_inserter(translit));
// Search for it and the transliterated version in the source
int src_lookahead_max = (lookahead + 1) * max_character_length;
int src_lookahead_pos = 0;
for (auto const& syl : source_strings) {
// Don't count blank syllables in the max search distance
if (is_whitespace(syl)) continue;
if (++src_lookahead_pos == 1) continue;
if (src_lookahead_pos > src_lookahead_max) break;
std::string lsyl = boost::to_lower_copy(syl);
if (!(starts_with(syl, dst->str()) || util::any_of(translit, [&](const char *str) { return starts_with(lsyl, str); })))
continue;
// The syllable immediately after the current one matched, so
// everything up to the match must go with the current syllable.
if (src_lookahead_pos == 2) {
result.destination_length += lookahead + 1;
return result;
}
// The match was multiple syllables ahead, so just divide the
// destination characters evenly between the source syllables
result.destination_length += 1;
result.source_length = static_cast<size_t>((src_lookahead_pos - 1.0) / (lookahead + 1.0) + .5);
return result;
}
}
// We wouldn't have gotten here if the dest was empty, so make sure at
// least one character is selected
result.destination_length = std::max<size_t>(result.destination_length, 1u);
return result;
}
}
|
{"hexsha": "68f2f35240d4d6cc91543cecfb80d7e35c967166", "size": 7274, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "aegisub/libaegisub/common/karaoke_matcher.cpp", "max_stars_repo_name": "rcombs/Aegisub", "max_stars_repo_head_hexsha": "58f35cd31c7f0f5728e0a28e6a7a9fd6fce70c50", "max_stars_repo_licenses": ["ISC"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2018-02-12T02:44:57.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-12T02:44:57.000Z", "max_issues_repo_path": "aegisub/libaegisub/common/karaoke_matcher.cpp", "max_issues_repo_name": "rcombs/Aegisub", "max_issues_repo_head_hexsha": "58f35cd31c7f0f5728e0a28e6a7a9fd6fce70c50", "max_issues_repo_licenses": ["ISC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aegisub/libaegisub/common/karaoke_matcher.cpp", "max_forks_repo_name": "rcombs/Aegisub", "max_forks_repo_head_hexsha": "58f35cd31c7f0f5728e0a28e6a7a9fd6fce70c50", "max_forks_repo_licenses": ["ISC"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2018-02-12T03:46:24.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-12T14:36:07.000Z", "avg_line_length": 34.6380952381, "max_line_length": 122, "alphanum_fraction": 0.7165246082, "num_tokens": 1868}
|
from google.protobuf import text_format
import numpy as np
import caffe_pb2
def parse_caffemodel(filepath):
'''
parses the trained .caffemodel file
filepath: /path/to/trained-model.caffemodel
returns: layers
'''
f = open(filepath, 'rb')
contents = f.read()
netparam = caffe_pb2.NetParameter()
netparam.ParseFromString(contents)
layers = find_layers(netparam)
return layers
def find_layers(netparam):
if len(netparam.layers) > 0:
return netparam.layers
elif len(netparam.layer) > 0:
return netparam.layer
else:
raise Exception ("Couldn't find layers")
def main():
param_dict = parse_caffemodel('xxx.caffemodel')
if __name__ == '__main__':
main()
|
{"hexsha": "865e047507df4099d8f658de9c31ca92567c1bf9", "size": 741, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/caffe_converter/caffe_parse/parse_from_protobuf.py", "max_stars_repo_name": "Liuxg16/BrainMatrix", "max_stars_repo_head_hexsha": "0ec70edd4e12dd3719d20dd14d4e24438c60326f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-06-12T12:12:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-26T01:45:15.000Z", "max_issues_repo_path": "tools/caffe_converter/caffe_parse/parse_from_protobuf.py", "max_issues_repo_name": "achao2013/mxnet-quantify", "max_issues_repo_head_hexsha": "ae77c896da6db35530390e3cf8e524d553bba112", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-26T19:53:49.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-26T19:53:49.000Z", "max_forks_repo_path": "tools/caffe_converter/caffe_parse/parse_from_protobuf.py", "max_forks_repo_name": "achao2013/mxnet-quantify", "max_forks_repo_head_hexsha": "ae77c896da6db35530390e3cf8e524d553bba112", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2016-11-18T07:21:41.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-30T08:48:22.000Z", "avg_line_length": 21.1714285714, "max_line_length": 51, "alphanum_fraction": 0.6761133603, "include": true, "reason": "import numpy", "num_tokens": 179}
|
import matplotlib.path
import numpy
# CSEP Imports
import numpy as np
import pyproj
from csep.utils.time_utils import datetime_to_utc_epoch, epoch_time_to_utc_datetime
from csep.utils import plots
class Simulation:
"""
View of CSEP Experiment. Contains minimal information required to perform evaluations of
CSEP Forecasts
"""
def __init__(self, filename='', min_mw=2.5, start_time=-1, sim_type='', name=''):
self.filename = filename
self.min_mw = min_mw
self.start_time = start_time
self.sim_type = sim_type
self.name = name
class Event:
def __init__(self, id=None, magnitude=None, latitude=None, longitude=None, time=None):
self.id = id
self.magnitude = magnitude
self.latitude = latitude
self.longitude = longitude
self.time = time
@classmethod
def from_dict(cls, adict):
return cls(id=adict['id'],
magnitude=adict['magnitude'],
latitude=adict['latitude'],
longitude=adict['longitude'],
time=epoch_time_to_utc_datetime(adict['time']))
def to_dict(self):
adict = {
'id': self.id,
'magnitude': self.magnitude,
'latitude': self.latitude,
'longitude': self.longitude,
'time': datetime_to_utc_epoch(self.time)
}
return adict
class EvaluationResult:
def __init__(self, test_distribution=None, name=None, observed_statistic=None, quantile=None, status="",
obs_catalog_repr='', sim_name=None, obs_name=None, min_mw=None):
"""
Stores the result of an evaluation.
Args:
test_distribution (1d array-like): collection of statistics computed from stochastic event sets
name (str): name of the evaluation
observed_statistic (float or int): statistic computed from target observed_catalog
quantile (tuple or float): quantile of observed statistic from test distribution
status (str): optional
obs_catalog_repr (str): text information about the observed_catalog used for the evaluation
sim_name (str): name of simulation
obs_name (str): name of observed observed_catalog
"""
self.test_distribution=test_distribution
self.name = name
self.observed_statistic = observed_statistic
self.quantile = quantile
self.status = status
self.obs_catalog_repr = obs_catalog_repr
self.sim_name = sim_name
self.obs_name = obs_name
self.min_mw = min_mw
# this will be used for object creation
self.named_type = self.__class__.__name__
def to_dict(self):
try:
td_list = self.test_distribution.tolist()
except AttributeError:
td_list = list(self.test_distribution)
adict = {
'name': self.name,
'sim_name': self.sim_name,
'obs_name': self.obs_name,
'obs_catalog_repr': self.obs_catalog_repr,
'quantile': self.quantile,
'observed_statistic': self.observed_statistic,
'test_distribution': td_list,
'status': self.status,
'min_mw': self.min_mw,
'type': self.named_type
}
return adict
@classmethod
def from_dict(cls, adict):
""" Creates evaluation result from a dictionary
Args:
adict (dict): stores information about classes
Returns:
"""
new = cls(test_distribution=(adict['test_distribution']),
name=adict['name'],
observed_statistic=adict['observed_statistic'],
quantile=adict['quantile'],
sim_name=adict['sim_name'],
obs_name=adict['obs_name'],
obs_catalog_repr=adict['obs_catalog_repr'],
status=adict['status'],
min_mw=adict['min_mw'])
return new
def plot(self):
raise NotImplementedError("plot not implemented on EvaluationResult class.")
class CatalogNumberTestResult(EvaluationResult):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, show=False, plot_args=None):
plot_args = plot_args or {}
td = self.test_distribution
min_bin, max_bin = numpy.min(td), numpy.max(td)
# hard-code some logic for bin size
bins = numpy.arange(min_bin, max_bin)
if len(bins) == 1:
bins = 3
# compute bin counts, this one is special because of integer values
plot_args_defaults = {'percentile': 95,
'title': f'Number Test',
'xlabel': 'Event count in catalog',
'bins': bins}
# looks funny, but will update the defaults with the user defined arguments
plot_args_defaults.update(plot_args)
ax = plots.plot_number_test(self, show=show, plot_args=plot_args)
return ax
class CatalogPseudolikelihoodTestResult(EvaluationResult):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, show=False, plot_args=None):
plot_args = plot_args or {}
# compute bin counts, this one is special because of integer values
plot_args_defaults = {'percentile': 95,
'title': 'Pseudolikelihood Test',
'bins': 'auto'}
# looks funny, but will update the defaults with the user defined arguments
plot_args_defaults.update(plot_args)
ax = plots.plot_likelihood_test(self, show=show, plot_args=plot_args)
return ax
class CatalogMagnitudeTestResult(EvaluationResult):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, show=False, plot_args=None):
plot_args = plot_args or {}
plot_args_defaults = {'percentile': 95,
'title': 'Magnitude Test',
'bins': 'auto'}
plot_args_defaults.update(plot_args)
ax = plots.plot_magnitude_test(self, show=show, plot_args=plot_args)
return ax
class CatalogSpatialTestResult(EvaluationResult):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, show=False, plot_args=None):
plot_args = plot_args or {}
# compute bin counts, this one is special because of integer values
plot_args_defaults = {
'percentile': 95,
'title': f'Spatial Test',
'bins': 'auto'
}
# looks funny, but will update the defaults with the user defined arguments
plot_args_defaults.update(plot_args)
ax = plots.plot_spatial_test(self, show=show, plot_args=plot_args)
return ax
class CalibrationTestResult(EvaluationResult):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, show=False, axes=None, plot_args=None):
plot_args = plot_args or {}
# set plotting defaults
plot_args_defaults = {
'label': self.sim_name,
'title': self.name
}
plot_args_defaults.update(plot_args)
ax = plots.plot_calibration_test(self, show=show, axes=axes, plot_args=plot_args)
return ax
class EvaluationConfiguration:
"""
Store information about the evaluation which will be used to store metadata about the evaluation.
"""
def __init__(self, compute_time=None, catalog_file=None, forecast_file=None, n_cat=None,
eval_start_epoch=None, eval_end_epoch=None, git_hash=None, evaluations=None, forecast_name=None):
"""
Constructor for EvaluationConfiguration object
Args:
compute_time (int): utc_epoch_time in millis indicating time plotting was completed
catalog_file (str): filename of the catalog used to evaluate forecast
forecast_file (str): filename of the forecast
n_cat (int): number of catalogs processed
eval_start_epoch (int): utc_epoch_time indicating start time of evaluations
eval_end_epoch (int): utc_epoch_time indiciating end time of evaluations
git_hash (str): hash indicating commit used for evaluations
evaluations (dict): version information about evaluations
"""
self.compute_time = compute_time
self.catalog_file = catalog_file
self.forecast_file = forecast_file
self.forecast_name = forecast_name
self.n_cat = n_cat
self.eval_start_epoch = eval_start_epoch
self.eval_end_epoch = eval_end_epoch
self.git_hash = git_hash
self.evaluations = evaluations or []
def to_dict(self):
adict = {
'compute_time': self.compute_time,
'forecast_file': self.forecast_file,
'catalog_file': self.catalog_file,
'n_cat': self.n_cat,
'forecast_name': self.forecast_name,
'eval_start_epoch': self.eval_start_epoch,
'eval_end_epoch': self.eval_end_epoch,
'git_hash': self.git_hash,
'evaluations': self.evaluations
}
return adict
@classmethod
def from_dict(cls, adict):
new = cls( compute_time=adict['compute_time'],
catalog_file=adict['catalog_file'],
forecast_file=adict['forecast_file'],
forecast_name=adict['forecast_name'],
n_cat=adict['n_cat'],
eval_start_epoch=adict['eval_start_epoch'],
eval_end_epoch=adict['eval_end_epoch'],
git_hash=adict['git_hash'],
evaluations=adict['evaluations'])
return new
def get_evaluation_version(self, name):
for e in self.evaluations:
if e['name'] == name:
return e['version']
return None
def get_fnames(self, name):
for e in self.evaluations:
if e['name'] == name:
return e['fnames']
return None
def update_version(self, name, version, fnames):
found = False
for e in self.evaluations:
if e['name'] == name:
e['version'] = version
e['fnames'] = fnames
found = True
if not found:
self.evaluations.append({'name': name, 'version': version, 'fnames': fnames})
class Polygon:
"""
Represents polygons defined through a collection of vertices.
This polygon is assumed to be 2d, but could contain an arbitrary number of vertices. The path is treated as not being
closed.
"""
def __init__(self, points):
# instance members
self.points = points
self.origin = self.points[0]
# https://matplotlib.org/3.1.1/api/path_api.html
self.path = matplotlib.path.Path(self.points)
def __str__(self):
return str(self.origin)
def contains(self, points):
""" Returns a bool array which is True if the path contains the corresponding point.
Args:
points: 2d numpy array
"""
nd_points = np.array(points)
if nd_points.ndim == 1:
nd_points = nd_points.reshape(1,-1)
return self.path.contains_points(nd_points)
def centroid(self):
""" return the centroid of the polygon."""
c0, c1 = 0, 0
k = len(self.points)
for p in self.points:
c0 = c0 + p[0]
c1 = c1 + p[1]
return c0 / k, c1 / k
def get_xcoords(self):
return np.array(self.points)[:,0]
def get_ycoords(self):
return np.array(self.points)[:,1]
@classmethod
def from_great_circle_radius(cls, centroid, radius, num_points=10):
"""
Generates a polygon object from a given radius and centroid location.
Args:
centroid: (lon, lat)
radius: should be in (meters)
num_points: more points is higher resolution polygon
Returns:
polygon
"""
geod = pyproj.Geod(ellps='WGS84')
azim = np.linspace(0, 360, num_points)
# create vectors with same length as azim for computations
center_lons = np.ones(num_points) * centroid[0]
center_lats = np.ones(num_points) * centroid[1]
radius = np.ones(num_points) * radius
# get new lons and lats
endlon, endlat, backaz = geod.fwd(center_lons, center_lats, azim, radius)
# class method
return cls(np.column_stack([endlon, endlat]))
|
{"hexsha": "5224a83d64fc5abc4aa02b4c3e496339f398a982", "size": 12647, "ext": "py", "lang": "Python", "max_stars_repo_path": "csep/models.py", "max_stars_repo_name": "wsavran/csep2", "max_stars_repo_head_hexsha": "daf44c8c31badf8474794a32f89f2c44dcbe1f61", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2020-12-16T06:23:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T10:23:12.000Z", "max_issues_repo_path": "csep/models.py", "max_issues_repo_name": "wsavran/csep2", "max_issues_repo_head_hexsha": "daf44c8c31badf8474794a32f89f2c44dcbe1f61", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 132, "max_issues_repo_issues_event_min_datetime": "2020-09-18T13:33:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T17:10:54.000Z", "max_forks_repo_path": "csep/models.py", "max_forks_repo_name": "wsavran/csep2", "max_forks_repo_head_hexsha": "daf44c8c31badf8474794a32f89f2c44dcbe1f61", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-09-10T11:35:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T14:40:14.000Z", "avg_line_length": 34.7445054945, "max_line_length": 121, "alphanum_fraction": 0.6091563217, "include": true, "reason": "import numpy", "num_tokens": 2758}
|
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class BanditEnv(gym.Env):
"""
Bandit environment base to allow agents to interact with the class n-armed bandit
p_dist:
A list of probabilities of the likelihood that a particular bandit will pay out
r_dist:
A list of either rewards (if number) or means and standard deviations (if list)
of the payout that bandit has
"""
def __init__(self, p_dist, r_dist):
self.p_dist = p_dist
self.r_dist = r_dist
self.n_bandits = len(p_dist)
self.action_space = spaces.Discrete(self.n_bandits)
self.observation_space = spaces.Discrete(1)
self._seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
done = False
if np.random.randn(1) > self.p_dist[action]:
reward = 1 # self.r_dist[0]
else:
reward = -1 # self.r_dist[1]
return 0.0, reward, done, {}
def reset(self):
return 0
def render(self, mode='human', close=False):
pass
class FourArmedBandit(BanditEnv):
"""Stochastic version of four-armed bandit where bandit four pays out with highest reward"""
def __init__(self):
BanditEnv.__init__(self, p_dist=[0.2, 0.0, -0.2, -5], r_dist=[1, -1])
|
{"hexsha": "dc418914b2159798e1908a0ad2ed680f4cbeab7a", "size": 2041, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tests/bandit.py", "max_stars_repo_name": "IBM-DSE/dybm", "max_stars_repo_head_hexsha": "3484e337954c017f0a20166403a6ddba4ce274c0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-01T13:12:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-01T13:12:48.000Z", "max_issues_repo_path": "src/tests/bandit.py", "max_issues_repo_name": "IBM-DSE/dybm", "max_issues_repo_head_hexsha": "3484e337954c017f0a20166403a6ddba4ce274c0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tests/bandit.py", "max_forks_repo_name": "IBM-DSE/dybm", "max_forks_repo_head_hexsha": "3484e337954c017f0a20166403a6ddba4ce274c0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3472222222, "max_line_length": 96, "alphanum_fraction": 0.6653601176, "include": true, "reason": "import numpy", "num_tokens": 511}
|
f = open("jawiki-country.txt", "r")
dict = Dict()
for line in readlines(f)
if ismatch(r"^\|.+\s=\s",line)
m = match(r"^\|(.+)\s=\s(.+)",line)
dict[m.captures[1]] = m.captures[2]
end
end
println("key\tvalue")
for k in keys(dict)
println(k, "\t", dict[k])
end
|
{"hexsha": "65fd9e84050a29aafcf68f4f5383a6d8dca1c298", "size": 274, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "code/25.jl", "max_stars_repo_name": "nzw0301/julia-nlp100kncok", "max_stars_repo_head_hexsha": "1fd992b28c702328177f1b9fa9c261c4c116149c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-05-30T20:04:49.000Z", "max_stars_repo_stars_event_max_datetime": "2015-07-22T09:34:05.000Z", "max_issues_repo_path": "code/25.jl", "max_issues_repo_name": "nzw0301/julia-nlp100knock", "max_issues_repo_head_hexsha": "1fd992b28c702328177f1b9fa9c261c4c116149c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/25.jl", "max_forks_repo_name": "nzw0301/julia-nlp100knock", "max_forks_repo_head_hexsha": "1fd992b28c702328177f1b9fa9c261c4c116149c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.2666666667, "max_line_length": 39, "alphanum_fraction": 0.5656934307, "num_tokens": 102}
|
import os
import numpy as np
from tensorflow.keras.callbacks import Callback
import tensorflow.keras.backend as K
from ..core import ops
from paz.models import SSD300
from paz.pipelines import SingleShotInference
from paz.evaluation import evaluate
from paz.datasets import VOC
class DrawInferences(Callback):
"""Saves an image with its corresponding inferences
# Arguments
save_path: String. Path in which the images will be saved.
sequencer: Sequencer with __getitem__ function for calling a batch.
inferencer: Paz Processor for performing inference.
verbose: Integer. If is bigger than 1 a message with the learning
rate decay will be displayed during optimization.
"""
def __init__(self, save_path, images, pipeline, input_topic='image',
label_topic='image', verbose=1):
super(DrawInferences, self).__init__()
self.save_path = os.path.join(save_path, 'images')
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.pipeline = pipeline
self.images = images
self.input_topic = input_topic
self.label_topic = label_topic
self.verbose = verbose
def on_epoch_end(self, epoch, logs=None):
for image_arg, image in enumerate(self.images.copy()):
inferences = self.pipeline({self.input_topic: image})
epoch_name = 'epoch_%03d' % epoch
save_path = os.path.join(self.save_path, epoch_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
image_name = 'image_%03d.png' % image_arg
image_name = os.path.join(save_path, image_name)
ops.save_image(image_name, inferences[self.label_topic])
if self.verbose:
print('Saving predicted images in:', self.save_path)
class LearningRateScheduler(Callback):
""" Callback for reducing learning rate at specific epochs.
# Arguments
learning_rate: float. Indicates the starting learning rate.
gamma_decay: float. In an scheduled epoch the learning rate
is multiplied by this factor.
scheduled_epochs: List of integers. Indicates in which epochs
the learning rate will be multiplied by the gamma decay factor.
verbose: Integer. If is bigger than 1 a message with the learning
rate decay will be displayed during optimization.
"""
def __init__(
self, learning_rate, gamma_decay, scheduled_epochs, verbose=1):
super(LearningRateScheduler, self).__init__()
self.learning_rate = learning_rate
self.gamma_decay = gamma_decay
self.scheduled_epochs = scheduled_epochs
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
learning_rate = float(K.get_value(self.model.optimizer.lr))
learning_rate = self.schedule(epoch)
if not isinstance(learning_rate, (float, np.float32, np.float64)):
raise ValueError('Learning rate should be float.')
K.set_value(self.model.optimizer.lr, learning_rate)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, learning_rate))
def schedule(self, epoch):
if epoch in self.scheduled_epochs:
self.learning_rate = self.learning_rate * self.gamma_decay
return self.learning_rate
class Evaluate(Callback):
def __init__(
self, file_path, class_names, data_split,
data_name, dataset_path, eval_per_epoch):
super(Evaluate, self).__init__()
self.file_path = file_path
self.class_names = class_names
self.data_split = data_split
self.data_name = data_name
self.dataset_path = dataset_path
self.eval_per_epoch = eval_per_epoch
def on_epoch_end(self, epoch, logs):
if (epoch+1) % self.eval_per_epoch == 0:
score_thresh, nms_thresh, labels = 0.01, .45, self.class_names
model = SSD300(
weights_path=self.file_path.format(
epoch=epoch + 1,
**logs)
)
detector = SingleShotInference(model, labels, score_thresh, nms_thresh)
class_dict = {
class_name: class_arg for class_arg, class_name in enumerate(self.class_names)
}
data_manager = VOC(self.dataset_path, self.data_split, name=self.data_name, evaluate=True)
dataset = data_manager.load_data()
result = evaluate(
detector,
dataset,
class_dict,
iou_thresh=0.5,
use_07_metric=True)
result_str = "mAP: {:.4f}\n".format(result["map"])
metrics = {'mAP': result["map"]}
for arg, ap in enumerate(result["ap"]):
if arg == 0 or np.isnan(ap): # skip background
continue
metrics[self.class_names[arg]] = ap
result_str += "{:<16}: {:.4f}\n".format(self.class_names[arg], ap)
print(result_str)
|
{"hexsha": "c798b82ad86a0e33b377f50e586d2a1f5e5d81c6", "size": 5358, "ext": "py", "lang": "Python", "max_stars_repo_path": "paz/optimization/callbacks.py", "max_stars_repo_name": "SushmaDG/MaskRCNN", "max_stars_repo_head_hexsha": "10f27fed31a2927b585aa1815cb5e096da540952", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-30T03:40:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T03:40:35.000Z", "max_issues_repo_path": "paz/optimization/callbacks.py", "max_issues_repo_name": "SushmaDG/MaskRCNN", "max_issues_repo_head_hexsha": "10f27fed31a2927b585aa1815cb5e096da540952", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paz/optimization/callbacks.py", "max_forks_repo_name": "SushmaDG/MaskRCNN", "max_forks_repo_head_hexsha": "10f27fed31a2927b585aa1815cb5e096da540952", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-22T01:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T01:54:31.000Z", "avg_line_length": 40.2857142857, "max_line_length": 102, "alphanum_fraction": 0.6270996641, "include": true, "reason": "import numpy", "num_tokens": 1107}
|
[STATEMENT]
lemma deadlock_free_skip:"DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>F\<^sub>D SKIP"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>F\<^sub>D SKIP
[PROOF STEP]
by(simp add:DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P_def failure_divergence_refine_def, rule fix_ind, simp_all)
|
{"llama_tokens": 157, "file": "HOL-CSP_Assertions", "length": 1}
|
"""
Brains of the operation, runs tensorflow to determine a phonetic
spelling for any made up word
"""
#SAmple: https://github.com/Piasy/Udacity-DLND/blob/master/language-translation/dlnd_language_translation.ipynb
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import numpy as np
tf.reset_default_graph() #Clears the default graph stack and resets the global default graph.
sess = tf.InteractiveSession() #initializes a tensorflow session
data_file = open('cmudict.dict')
data=[]
l1=-1
l2=-1
for line in data_file:
tok = line.split()
word = tok[0]
if '(' in word:
word=word[:word.index('(')]
if len(word) > l1:
l1 = len(word)
phones = tok[1:]
if len(phones) > l2:
l2=len(phones)
data.append(list([word,phones]))
print("Enc:",l1)
print("Dec:",l2)
data_file.close()
print(data[0])
f1 = open('src.txt',"w")
f2 = open('tar.txt',"w")
src__txt_to_int={}
src__int_to_txt={}
cnt=1
for pair in data:
for char in pair[0]:
f1.write(char+" ")
if char not in src__txt_to_int:
src__txt_to_int[char]=cnt
src__int_to_txt[cnt]=char
cnt+=1
f1.write("\n")
for phone in pair[1]:
f2.write(phone+" ")
f2.write("\n")
src__txt_to_int['<PAD>']=0
src__int_to_txt[0]='<PAD>'
src__txt_to_int['<UNK>']=cnt
src__int_to_txt[cnt]='<UNK>'
print("src__txt_to_int:",src__txt_to_int)
print("src__int_to_txt:",src__int_to_txt)
f1.close()
f2.close()
phonef = open('cmudict.symbols','r')
tar__txt_to_int={}
tar__int_to_txt={}
cnt=1
for phone in phonef:
phone=phone.split()
tar__txt_to_int[phone[0]]=cnt
tar__int_to_txt[cnt]=phone[0]
cnt+=1
tar__txt_to_int['<EOS>']=cnt
tar__int_to_txt[cnt]='<EOS>'
cnt+=1
tar__txt_to_int['<GO>']=cnt
tar__int_to_txt[cnt]='<GO>'
cnt+=1
tar__txt_to_int['<UNK>']=cnt
tar__int_to_txt[cnt]='<UNK>'
tar__txt_to_int['<PAD>']=0
tar__int_to_txt[0]='<PAD>'
phonef.close()
print("txt->int:",tar__txt_to_int)
print("int->txt:",tar__int_to_txt)
#Write new files in a way that tf can understand...
f1ints = open('int_src.txt','w')
f1 = open('src.txt',"r")
src_f=[]
tar_f=[]
for line in f1:
temp=[]
toks = line.split()
for tok in toks:
f1ints.write(str(src__txt_to_int[tok])+" ")
temp.append(src__txt_to_int[tok])
src_f.append(temp)
f1ints.write("\n")
f2ints = open('int_tar.txt','w')
f2 = open('tar.txt',"r")
for line in f2:
temp=[]
toks = line.split()
for tok in toks:
if tok == '#':
break
f2ints.write(str(tar__txt_to_int[tok])+" ")
temp.append(tar__txt_to_int[tok])
temp.append(tar__txt_to_int['<EOS>'])
f2ints.write(str(tar__txt_to_int['<EOS>'])+'\n')
tar_f.append(temp)
f1.close()
f2.close()
f1ints.close()
f2ints.close()
'''
Preprocess finished...
Split cmudict.dict into source sequence (words are letters in this sequence(makes sense!?))
and target sequences (these are the phones) in two files:
src sequences: src.txt
tar sequences: tar.txt
'''
src_path='src.txt'
tar_path='tar.txt'
'''
!!!IMPORTANT INFORMATION!!!
datapoints with multi prounciations: src1 sequence = scr2 sequence but matching tar1 sequence != tar2 sequence
How does this affect learning?
datapoints with forgin pronunications simmilar
How does this affect learning?
should we just remove this points altogether?
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Source:
text_to_int dict: src__txt_to_int
int_to_text dict: src__int_to_txt
Target:
text_to_int dict: tar__txt_to_int
int_to_tect dict: tar__int_to_txt
Used dictionaries to map sequences in a way that tf can understand:
Source_id'd: int_src.txt
Target_id'd: int_tar.txt
'''
print(src_f[:10])
'''
Now lets save this in a pickle file...
'''
#first get id's in a list:
src_ids=[]
tar_ids=[]
f1 = open('int_src.txt','r')
f2 = open('int_tar.txt','r')
for line in f1:
toks = line.split()
src_ids.append(toks)
for line in f2:
toks = line.split()
tar_ids.append(toks)
f1.close()
f2.close()
import pickle
with open('preprocess.p', 'wb') as out_file:
pickle.dump((
(src_f, tar_f),
(src__txt_to_int, tar__txt_to_int),
(src__int_to_txt, tar__int_to_txt)), out_file)
#From Sample XXXX
def model_inputs():
"""
Create TF Placeholders for:
input,
targets,
learning rate,
lengths of source
target sequences
return Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
# TODO: Implement Function
inputs = tf.placeholder(tf.int32,[None,None],name="input")
targets = tf.placeholder(tf.int32,[None,None])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32,name="keep_prob")
target_seq = tf.placeholder(tf.int32,[None],name="target_sequence_length")
max_target = tf.reduce_max(target_seq,name="max_target_len")
source_seq = tf.placeholder(tf.int32,[None],name="source_sequence_length")
return (inputs, targets, learning_rate, keep_prob, target_seq, max_target, source_seq)
#From Sample XXXX
def process_decoder_input(target_data, tar__txt_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
cut_off = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], tar__txt_to_int['<GO>']), cut_off], 1)
return dec_input
#From Sample XXXX
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
# RNN cell
def make_cell(rnn_size,keep_prob):
enc_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
drop = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob = keep_prob)
return drop
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size,keep_prob) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return enc_output, enc_state
#From Sample XXXX
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# TODO: Implement Function
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
encoder_state,
output_layer)
BasicDecoderOutput = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_summary_length)[0]
return BasicDecoderOutput
#From Sample XXXX
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
with tf.variable_scope("decode", reuse=True):
start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size], name='start_tokens')
# TODO: Implement Function
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
end_of_sequence_id)
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
encoder_state,
output_layer)
# Perform dynamic decoding using the decoder
BasicDecoderOutput = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
return BasicDecoderOutput
#From Sample XXXX
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# 2. Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return dec_cell
rnnCell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
output_layer = Dense(target_vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
Training_BasicDecoderOutput = decoding_layer_train(encoder_state, rnnCell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)
Inference_BasicDecoderOutput = decoding_layer_infer(encoder_state, rnnCell, dec_embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], max_target_sequence_length, target_vocab_size, output_layer, batch_size, keep_prob)
return Training_BasicDecoderOutput, Inference_BasicDecoderOutput
#From Sample XXXX
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
_, enc_state= encoding_layer(input_data, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, enc_embedding_size)
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
Training_BasicDecoderOutput, Inference_BasicDecoderOutput = decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)
return Training_BasicDecoderOutput, Inference_BasicDecoderOutput
# Number of Epochs
epochs = 5
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 30
decoding_embedding_size = 30
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.5
display_step = 10
#From Sample XXXX
save_path = 'checkpoints/dev'
with open('preprocess.p', mode='rb') as in_file:
LOADED = pickle.load(in_file)
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = LOADED
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# In[19]:
#From Sample XXXX
#XXXXXX MODIFIED FOR OUT PROBLEM XXXXXXX#
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad char seq with <PAD> so that each char sequence of a batch has the same length"""
max_seq = max([len(seq) for seq in sentence_batch])
padded_batch=[]
for seq in sentence_batch:
temp=[]
for char in seq:
temp.append(char)
for x in range(len(temp),max_seq):
temp.append(pad_int)
padded_batch.append(temp)
return padded_batch
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
# In[20]:
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,valid_target,batch_size,source_vocab_to_int['<PAD>'],target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
print('Done!')
#save model
pickle.dump(save_path, open('params.p','wb'))
|
{"hexsha": "3e39d5809667e7bd59beb544da7708298af311cf", "size": 22136, "ext": "py", "lang": "Python", "max_stars_repo_path": "TF_code/TFLanguageTranslation.py", "max_stars_repo_name": "Cory-Edgett/IntroToAI-Text-to-Speech", "max_stars_repo_head_hexsha": "8c1e05b5e83cbb8d4cc24f7d81445cb3215e4448", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TF_code/TFLanguageTranslation.py", "max_issues_repo_name": "Cory-Edgett/IntroToAI-Text-to-Speech", "max_issues_repo_head_hexsha": "8c1e05b5e83cbb8d4cc24f7d81445cb3215e4448", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TF_code/TFLanguageTranslation.py", "max_forks_repo_name": "Cory-Edgett/IntroToAI-Text-to-Speech", "max_forks_repo_head_hexsha": "8c1e05b5e83cbb8d4cc24f7d81445cb3215e4448", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4078947368, "max_line_length": 257, "alphanum_fraction": 0.6517437658, "include": true, "reason": "import numpy", "num_tokens": 4863}
|
[STATEMENT]
lemma agree_weakening_1:
assumes "\<Gamma> \<turnstile> e, eP, eV : \<tau>" "atom y \<sharp> e" "atom y \<sharp> eP" "atom y \<sharp> eV"
shows "\<Gamma>(y $$:= \<tau>') \<turnstile> e, eP, eV : \<tau>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Gamma>(y $$:= \<tau>') \<turnstile> e, eP, eV : \<tau>
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<Gamma> \<turnstile> e, eP, eV : \<tau>
atom y \<sharp> e
atom y \<sharp> eP
atom y \<sharp> eV
goal (1 subgoal):
1. \<Gamma>(y $$:= \<tau>') \<turnstile> e, eP, eV : \<tau>
[PROOF STEP]
proof (nominal_induct \<Gamma> e eP eV \<tau> avoiding: y \<tau>' rule: agree.strong_induct)
[PROOF STATE]
proof (state)
goal (17 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> \<tau>\<^sub>1 e eP eV \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Lam x e; atom y \<sharp> Lam x eP; atom y \<sharp> Lam x eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Lam x e, Lam x eP, Lam x eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; atom y \<sharp> App e\<^sub>1 e\<^sub>2; atom y \<sharp> App eP\<^sub>1 eP\<^sub>2; atom y \<sharp> App eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> App e\<^sub>1 e\<^sub>2, App eP\<^sub>1 eP\<^sub>2, App eV\<^sub>1 eV\<^sub>2 : \<tau>\<^sub>2
5. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
6. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
10. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
A total of 17 subgoals...
[PROOF STEP]
case (a_Lam x \<Gamma> \<tau>\<^sub>1 e eP eV \<tau>\<^sub>2)
[PROOF STATE]
proof (state)
this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
\<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e, eP, eV : \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> e; atom ?b \<sharp> eP; atom ?b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(?b $$:= ?ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2
atom y \<sharp> Lam x e
atom y \<sharp> Lam x eP
atom y \<sharp> Lam x eV
goal (17 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> \<tau>\<^sub>1 e eP eV \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Lam x e; atom y \<sharp> Lam x eP; atom y \<sharp> Lam x eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Lam x e, Lam x eP, Lam x eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; atom y \<sharp> App e\<^sub>1 e\<^sub>2; atom y \<sharp> App eP\<^sub>1 eP\<^sub>2; atom y \<sharp> App eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> App e\<^sub>1 e\<^sub>2, App eP\<^sub>1 eP\<^sub>2, App eV\<^sub>1 eV\<^sub>2 : \<tau>\<^sub>2
5. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
6. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
10. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
A total of 17 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
\<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e, eP, eV : \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> e; atom ?b \<sharp> eP; atom ?b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(?b $$:= ?ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2
atom y \<sharp> Lam x e
atom y \<sharp> Lam x eP
atom y \<sharp> Lam x eV
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
\<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e, eP, eV : \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> e; atom ?b \<sharp> eP; atom ?b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(?b $$:= ?ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2
atom y \<sharp> Lam x e
atom y \<sharp> Lam x eP
atom y \<sharp> Lam x eV
goal (1 subgoal):
1. \<Gamma>(y $$:= \<tau>') \<turnstile> Lam x e, Lam x eP, Lam x eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
[PROOF STEP]
by (force simp add: fresh_at_base fresh_fmap_update fmupd_reorder_neq)
[PROOF STATE]
proof (state)
this:
\<Gamma>(y $$:= \<tau>') \<turnstile> Lam x e, Lam x eP, Lam x eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
goal (16 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; atom y \<sharp> App e\<^sub>1 e\<^sub>2; atom y \<sharp> App eP\<^sub>1 eP\<^sub>2; atom y \<sharp> App eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> App e\<^sub>1 e\<^sub>2, App eP\<^sub>1 eP\<^sub>2, App eV\<^sub>1 eV\<^sub>2 : \<tau>\<^sub>2
4. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
5. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
9. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
10. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
A total of 16 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (16 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; atom y \<sharp> App e\<^sub>1 e\<^sub>2; atom y \<sharp> App eP\<^sub>1 eP\<^sub>2; atom y \<sharp> App eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> App e\<^sub>1 e\<^sub>2, App eP\<^sub>1 eP\<^sub>2, App eV\<^sub>1 eV\<^sub>2 : \<tau>\<^sub>2
4. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
5. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
9. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
10. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
A total of 16 subgoals...
[PROOF STEP]
case (a_App v\<^sub>1 v\<^sub>2 vP\<^sub>1 vP\<^sub>2 vV\<^sub>1 vV\<^sub>2 \<Gamma> \<tau>\<^sub>1 \<tau>\<^sub>2)
[PROOF STATE]
proof (state)
this:
v\<^sub>1 \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<^sub>1; atom ?b \<sharp> vP\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
v\<^sub>1 \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
\<lbrakk>atom ?b \<sharp> \<Gamma>; atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
atom y \<sharp> App v\<^sub>2 \<Gamma>
atom y \<sharp> App vP\<^sub>1 \<tau>\<^sub>1
atom y \<sharp> App vP\<^sub>2 \<tau>\<^sub>2
goal (16 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>1; atom y \<sharp> App e\<^sub>1 e\<^sub>2; atom y \<sharp> App eP\<^sub>1 eP\<^sub>2; atom y \<sharp> App eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> App e\<^sub>1 e\<^sub>2, App eP\<^sub>1 eP\<^sub>2, App eV\<^sub>1 eV\<^sub>2 : \<tau>\<^sub>2
4. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
5. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
9. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
10. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
A total of 16 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v\<^sub>1 \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<^sub>1; atom ?b \<sharp> vP\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
v\<^sub>1 \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
\<lbrakk>atom ?b \<sharp> \<Gamma>; atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
atom y \<sharp> App v\<^sub>2 \<Gamma>
atom y \<sharp> App vP\<^sub>1 \<tau>\<^sub>1
atom y \<sharp> App vP\<^sub>2 \<tau>\<^sub>2
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
v\<^sub>1 \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<^sub>1; atom ?b \<sharp> vP\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
v\<^sub>1 \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
\<lbrakk>atom ?b \<sharp> \<Gamma>; atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
atom y \<sharp> App v\<^sub>2 \<Gamma>
atom y \<sharp> App vP\<^sub>1 \<tau>\<^sub>1
atom y \<sharp> App vP\<^sub>2 \<tau>\<^sub>2
goal (1 subgoal):
1. v\<^sub>1(y $$:= \<tau>') \<turnstile> App v\<^sub>2 \<Gamma>, App vP\<^sub>1 \<tau>\<^sub>1, App vP\<^sub>2 \<tau>\<^sub>2 : vV\<^sub>2
[PROOF STEP]
using term.fresh(9)
[PROOF STATE]
proof (prove)
using this:
v\<^sub>1 \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<^sub>1; atom ?b \<sharp> vP\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> v\<^sub>2, vP\<^sub>1, vP\<^sub>2 : Fun vV\<^sub>1 vV\<^sub>2
v\<^sub>1 \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
\<lbrakk>atom ?b \<sharp> \<Gamma>; atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2\<rbrakk> \<Longrightarrow> v\<^sub>1(?b $$:= ?ba) \<turnstile> \<Gamma>, \<tau>\<^sub>1, \<tau>\<^sub>2 : vV\<^sub>1
atom y \<sharp> App v\<^sub>2 \<Gamma>
atom y \<sharp> App vP\<^sub>1 \<tau>\<^sub>1
atom y \<sharp> App vP\<^sub>2 \<tau>\<^sub>2
?a \<sharp> App ?term1.0 ?term2.0 = (?a \<sharp> ?term1.0 \<and> ?a \<sharp> ?term2.0)
goal (1 subgoal):
1. v\<^sub>1(y $$:= \<tau>') \<turnstile> App v\<^sub>2 \<Gamma>, App vP\<^sub>1 \<tau>\<^sub>1, App vP\<^sub>2 \<tau>\<^sub>2 : vV\<^sub>2
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
v\<^sub>1(y $$:= \<tau>') \<turnstile> App v\<^sub>2 \<Gamma>, App vP\<^sub>1 \<tau>\<^sub>1, App vP\<^sub>2 \<tau>\<^sub>2 : vV\<^sub>2
goal (15 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
4. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
8. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
10. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
A total of 15 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (15 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
4. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
8. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
10. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
A total of 15 subgoals...
[PROOF STEP]
case (a_Let x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2)
[PROOF STATE]
proof (state)
this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
atom x \<sharp> e\<^sub>1
atom x \<sharp> eP\<^sub>1
atom x \<sharp> eV\<^sub>1
\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1
\<lbrakk>atom ?b \<sharp> e\<^sub>1; atom ?b \<sharp> eP\<^sub>1; atom ?b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(?b $$:= ?ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1
\<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> e\<^sub>2; atom ?b \<sharp> eP\<^sub>2; atom ?b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(?b $$:= ?ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2
atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2
atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2
atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2
goal (15 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>atom x \<sharp> y; atom x \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom x \<sharp> e\<^sub>1; atom x \<sharp> eP\<^sub>1; atom x \<sharp> eV\<^sub>1; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2; atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2; atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
4. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
8. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
10. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
A total of 15 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
atom x \<sharp> e\<^sub>1
atom x \<sharp> eP\<^sub>1
atom x \<sharp> eV\<^sub>1
\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1
\<lbrakk>atom ?b \<sharp> e\<^sub>1; atom ?b \<sharp> eP\<^sub>1; atom ?b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(?b $$:= ?ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1
\<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> e\<^sub>2; atom ?b \<sharp> eP\<^sub>2; atom ?b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(?b $$:= ?ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2
atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2
atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2
atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
atom x \<sharp> e\<^sub>1
atom x \<sharp> eP\<^sub>1
atom x \<sharp> eV\<^sub>1
\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1
\<lbrakk>atom ?b \<sharp> e\<^sub>1; atom ?b \<sharp> eP\<^sub>1; atom ?b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(?b $$:= ?ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1
\<Gamma>(x $$:= \<tau>\<^sub>1) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> e\<^sub>2; atom ?b \<sharp> eP\<^sub>2; atom ?b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= \<tau>\<^sub>1)(?b $$:= ?ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2
atom y \<sharp> Syntax.Let e\<^sub>1 x e\<^sub>2
atom y \<sharp> Syntax.Let eP\<^sub>1 x eP\<^sub>2
atom y \<sharp> Syntax.Let eV\<^sub>1 x eV\<^sub>2
goal (1 subgoal):
1. \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
[PROOF STEP]
by (auto simp add: fresh_at_base fresh_Pair fresh_fmap_update fmupd_reorder_neq[of x y]
intro!: a_Let(10) agree.a_Let[of x])
[PROOF STATE]
proof (state)
this:
\<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Let e\<^sub>1 x e\<^sub>2, Syntax.Let eP\<^sub>1 x eP\<^sub>2, Syntax.Let eV\<^sub>1 x eV\<^sub>2 : \<tau>\<^sub>2
goal (14 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
7. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
10. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
A total of 14 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (14 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
7. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
10. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
A total of 14 subgoals...
[PROOF STEP]
case (a_Rec x \<Gamma> z \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV)
[PROOF STATE]
proof (state)
this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom z \<sharp> y
atom z \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
atom z \<sharp> \<Gamma>
atom z \<sharp> x
\<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam z e, Lam z eP, Lam z eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> Lam z e; atom ?b \<sharp> Lam z eP; atom ?b \<sharp> Lam z eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(?b $$:= ?ba) \<turnstile> Lam z e, Lam z eP, Lam z eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Rec x (Lam z e)
atom y \<sharp> Rec x (Lam z eP)
atom y \<sharp> Rec x (Lam z eV)
goal (14 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>x \<Gamma> y \<tau>\<^sub>1 \<tau>\<^sub>2 e eP eV ya \<tau>'. \<lbrakk>atom x \<sharp> ya; atom x \<sharp> \<tau>'; atom y \<sharp> ya; atom y \<sharp> \<tau>'; atom x \<sharp> \<Gamma>; atom y \<sharp> \<Gamma>; atom y \<sharp> x; \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> Lam y e; atom b \<sharp> Lam y eP; atom b \<sharp> Lam y eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(b $$:= ba) \<turnstile> Lam y e, Lam y eP, Lam y eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2; atom ya \<sharp> Rec x (Lam y e); atom ya \<sharp> Rec x (Lam y eP); atom ya \<sharp> Rec x (Lam y eV)\<rbrakk> \<Longrightarrow> \<Gamma>(ya $$:= \<tau>') \<turnstile> Rec x (Lam y e), Rec x (Lam y eP), Rec x (Lam y eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
7. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
9. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
10. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
A total of 14 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom z \<sharp> y
atom z \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
atom z \<sharp> \<Gamma>
atom z \<sharp> x
\<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam z e, Lam z eP, Lam z eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> Lam z e; atom ?b \<sharp> Lam z eP; atom ?b \<sharp> Lam z eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(?b $$:= ?ba) \<turnstile> Lam z e, Lam z eP, Lam z eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Rec x (Lam z e)
atom y \<sharp> Rec x (Lam z eP)
atom y \<sharp> Rec x (Lam z eV)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
atom x \<sharp> y
atom x \<sharp> \<tau>'
atom z \<sharp> y
atom z \<sharp> \<tau>'
atom x \<sharp> \<Gamma>
atom z \<sharp> \<Gamma>
atom z \<sharp> x
\<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2) \<turnstile> Lam z e, Lam z eP, Lam z eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> Lam z e; atom ?b \<sharp> Lam z eP; atom ?b \<sharp> Lam z eV\<rbrakk> \<Longrightarrow> \<Gamma>(x $$:= Fun \<tau>\<^sub>1 \<tau>\<^sub>2)(?b $$:= ?ba) \<turnstile> Lam z e, Lam z eP, Lam z eV : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Rec x (Lam z e)
atom y \<sharp> Rec x (Lam z eP)
atom y \<sharp> Rec x (Lam z eV)
goal (1 subgoal):
1. \<Gamma>(y $$:= \<tau>') \<turnstile> Rec x (Lam z e), Rec x (Lam z eP), Rec x (Lam z eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
[PROOF STEP]
by (auto simp add: fresh_at_base fresh_Pair fresh_fmap_update fmupd_reorder_neq[of x y]
intro!: a_Rec(9) agree.a_Rec[of x])
[PROOF STATE]
proof (state)
this:
\<Gamma>(y $$:= \<tau>') \<turnstile> Rec x (Lam z e), Rec x (Lam z eP), Rec x (Lam z eV) : Fun \<tau>\<^sub>1 \<tau>\<^sub>2
goal (13 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
6. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
9. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
10. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
A total of 13 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (13 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
6. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
9. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
10. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
A total of 13 subgoals...
[PROOF STEP]
case (a_Case v v\<^sub>1 v\<^sub>2 vP vP\<^sub>1 vP\<^sub>2 vV vV\<^sub>1 vV\<^sub>2 \<Gamma> \<tau>\<^sub>1 \<tau>\<^sub>2 \<tau>)
[PROOF STATE]
proof (state)
this:
v \<turnstile> v\<^sub>1, v\<^sub>2, vP : Syntax.Sum vP\<^sub>1 vP\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>1; atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> v\<^sub>1, v\<^sub>2, vP : Syntax.Sum vP\<^sub>1 vP\<^sub>2
v \<turnstile> vV, vV\<^sub>1, vV\<^sub>2 : Fun vP\<^sub>1 \<Gamma>
\<lbrakk>atom ?b \<sharp> vV; atom ?b \<sharp> vV\<^sub>1; atom ?b \<sharp> vV\<^sub>2\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vV, vV\<^sub>1, vV\<^sub>2 : Fun vP\<^sub>1 \<Gamma>
v \<turnstile> \<tau>\<^sub>1, \<tau>\<^sub>2, \<tau> : Fun vP\<^sub>2 \<Gamma>
\<lbrakk>atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2; atom ?b \<sharp> \<tau>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> \<tau>\<^sub>1, \<tau>\<^sub>2, \<tau> : Fun vP\<^sub>2 \<Gamma>
atom y \<sharp> Case v\<^sub>1 vV \<tau>\<^sub>1
atom y \<sharp> Case v\<^sub>2 vV\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Case vP vV\<^sub>2 \<tau>
goal (13 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau> e\<^sub>2 eP\<^sub>2 eV\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2; \<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : Fun \<tau>\<^sub>1 \<tau>; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : Fun \<tau>\<^sub>2 \<tau>; atom y \<sharp> Case e e\<^sub>1 e\<^sub>2; atom y \<sharp> Case eP eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Case eV eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Case e e\<^sub>1 e\<^sub>2, Case eP eP\<^sub>1 eP\<^sub>2, Case eV eV\<^sub>1 eV\<^sub>2 : \<tau>
6. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
8. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
9. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
10. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
A total of 13 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v \<turnstile> v\<^sub>1, v\<^sub>2, vP : Syntax.Sum vP\<^sub>1 vP\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>1; atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> v\<^sub>1, v\<^sub>2, vP : Syntax.Sum vP\<^sub>1 vP\<^sub>2
v \<turnstile> vV, vV\<^sub>1, vV\<^sub>2 : Fun vP\<^sub>1 \<Gamma>
\<lbrakk>atom ?b \<sharp> vV; atom ?b \<sharp> vV\<^sub>1; atom ?b \<sharp> vV\<^sub>2\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vV, vV\<^sub>1, vV\<^sub>2 : Fun vP\<^sub>1 \<Gamma>
v \<turnstile> \<tau>\<^sub>1, \<tau>\<^sub>2, \<tau> : Fun vP\<^sub>2 \<Gamma>
\<lbrakk>atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2; atom ?b \<sharp> \<tau>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> \<tau>\<^sub>1, \<tau>\<^sub>2, \<tau> : Fun vP\<^sub>2 \<Gamma>
atom y \<sharp> Case v\<^sub>1 vV \<tau>\<^sub>1
atom y \<sharp> Case v\<^sub>2 vV\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Case vP vV\<^sub>2 \<tau>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
v \<turnstile> v\<^sub>1, v\<^sub>2, vP : Syntax.Sum vP\<^sub>1 vP\<^sub>2
\<lbrakk>atom ?b \<sharp> v\<^sub>1; atom ?b \<sharp> v\<^sub>2; atom ?b \<sharp> vP\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> v\<^sub>1, v\<^sub>2, vP : Syntax.Sum vP\<^sub>1 vP\<^sub>2
v \<turnstile> vV, vV\<^sub>1, vV\<^sub>2 : Fun vP\<^sub>1 \<Gamma>
\<lbrakk>atom ?b \<sharp> vV; atom ?b \<sharp> vV\<^sub>1; atom ?b \<sharp> vV\<^sub>2\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vV, vV\<^sub>1, vV\<^sub>2 : Fun vP\<^sub>1 \<Gamma>
v \<turnstile> \<tau>\<^sub>1, \<tau>\<^sub>2, \<tau> : Fun vP\<^sub>2 \<Gamma>
\<lbrakk>atom ?b \<sharp> \<tau>\<^sub>1; atom ?b \<sharp> \<tau>\<^sub>2; atom ?b \<sharp> \<tau>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> \<tau>\<^sub>1, \<tau>\<^sub>2, \<tau> : Fun vP\<^sub>2 \<Gamma>
atom y \<sharp> Case v\<^sub>1 vV \<tau>\<^sub>1
atom y \<sharp> Case v\<^sub>2 vV\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Case vP vV\<^sub>2 \<tau>
goal (1 subgoal):
1. v(y $$:= \<tau>') \<turnstile> Case v\<^sub>1 vV \<tau>\<^sub>1, Case v\<^sub>2 vV\<^sub>1 \<tau>\<^sub>2, Case vP vV\<^sub>2 \<tau> : \<Gamma>
[PROOF STEP]
by (fastforce simp: fresh_at_base)
[PROOF STATE]
proof (state)
this:
v(y $$:= \<tau>') \<turnstile> Case v\<^sub>1 vV \<tau>\<^sub>1, Case v\<^sub>2 vV\<^sub>1 \<tau>\<^sub>2, Case vP vV\<^sub>2 \<tau> : \<Gamma>
goal (12 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
8. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
9. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
10. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
A total of 12 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (12 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
8. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
9. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
10. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
A total of 12 subgoals...
[PROOF STEP]
case (a_Prj1 v vP vV \<Gamma> \<tau>\<^sub>1 \<tau>\<^sub>2)
[PROOF STATE]
proof (state)
this:
v \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> vP; atom ?b \<sharp> vV; atom ?b \<sharp> \<Gamma>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Prj1 vP
atom y \<sharp> Prj1 vV
atom y \<sharp> Prj1 \<Gamma>
goal (12 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj1 e; atom y \<sharp> Prj1 eP; atom y \<sharp> Prj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj1 e, Prj1 eP, Prj1 eV : \<tau>\<^sub>1
7. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
8. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
9. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
10. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
A total of 12 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> vP; atom ?b \<sharp> vV; atom ?b \<sharp> \<Gamma>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Prj1 vP
atom y \<sharp> Prj1 vV
atom y \<sharp> Prj1 \<Gamma>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
v \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> vP; atom ?b \<sharp> vV; atom ?b \<sharp> \<Gamma>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Prj1 vP
atom y \<sharp> Prj1 vV
atom y \<sharp> Prj1 \<Gamma>
goal (1 subgoal):
1. v(y $$:= \<tau>') \<turnstile> Prj1 vP, Prj1 vV, Prj1 \<Gamma> : \<tau>\<^sub>1
[PROOF STEP]
by (fastforce simp: fresh_at_base)
[PROOF STATE]
proof (state)
this:
v(y $$:= \<tau>') \<turnstile> Prj1 vP, Prj1 vV, Prj1 \<Gamma> : \<tau>\<^sub>1
goal (11 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
7. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
8. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
9. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
10. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : AuthT \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : AuthT \<tau>; atom y \<sharp> Unauth e; atom y \<sharp> Unauth eP; atom y \<sharp> Unauth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unauth e, Unauth eP, Unauth eV : \<tau>
A total of 11 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (11 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
7. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
8. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
9. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
10. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : AuthT \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : AuthT \<tau>; atom y \<sharp> Unauth e; atom y \<sharp> Unauth eP; atom y \<sharp> Unauth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unauth e, Unauth eP, Unauth eV : \<tau>
A total of 11 subgoals...
[PROOF STEP]
case (a_Prj2 v vP vV \<Gamma> \<tau>\<^sub>1 \<tau>\<^sub>2)
[PROOF STATE]
proof (state)
this:
v \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> vP; atom ?b \<sharp> vV; atom ?b \<sharp> \<Gamma>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Prj2 vP
atom y \<sharp> Prj2 vV
atom y \<sharp> Prj2 \<Gamma>
goal (11 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2; atom y \<sharp> Prj2 e; atom y \<sharp> Prj2 eP; atom y \<sharp> Prj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Prj2 e, Prj2 eP, Prj2 eV : \<tau>\<^sub>2
7. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
8. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
9. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
10. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : AuthT \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : AuthT \<tau>; atom y \<sharp> Unauth e; atom y \<sharp> Unauth eP; atom y \<sharp> Unauth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unauth e, Unauth eP, Unauth eV : \<tau>
A total of 11 subgoals...
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
v \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> vP; atom ?b \<sharp> vV; atom ?b \<sharp> \<Gamma>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Prj2 vP
atom y \<sharp> Prj2 vV
atom y \<sharp> Prj2 \<Gamma>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
v \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
\<lbrakk>atom ?b \<sharp> vP; atom ?b \<sharp> vV; atom ?b \<sharp> \<Gamma>\<rbrakk> \<Longrightarrow> v(?b $$:= ?ba) \<turnstile> vP, vV, \<Gamma> : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
atom y \<sharp> Prj2 vP
atom y \<sharp> Prj2 vV
atom y \<sharp> Prj2 \<Gamma>
goal (1 subgoal):
1. v(y $$:= \<tau>') \<turnstile> Prj2 vP, Prj2 vV, Prj2 \<Gamma> : \<tau>\<^sub>2
[PROOF STEP]
by (fastforce simp: fresh_at_base)
[PROOF STATE]
proof (state)
this:
v(y $$:= \<tau>') \<turnstile> Prj2 vP, Prj2 vV, Prj2 \<Gamma> : \<tau>\<^sub>2
goal (10 subgoals):
1. \<And>\<Gamma> y \<tau>'. \<lbrakk>atom y \<sharp> Unit; atom y \<sharp> Unit; atom y \<sharp> Unit\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unit, Unit, Unit : One
2. \<And>\<Gamma> x \<tau> y \<tau>'. \<lbrakk>\<Gamma> $$ x = Some \<tau>; atom y \<sharp> Var x; atom y \<sharp> Var x; atom y \<sharp> Var x\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Var x, Var x, Var x : \<tau>
3. \<And>\<Gamma> e eP eV \<tau>\<^sub>1 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>1; atom y \<sharp> Inj1 e; atom y \<sharp> Inj1 eP; atom y \<sharp> Inj1 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj1 e, Inj1 eP, Inj1 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
4. \<And>\<Gamma> e eP eV \<tau>\<^sub>2 \<tau>\<^sub>1 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>\<^sub>2; atom y \<sharp> Inj2 e; atom y \<sharp> Inj2 eP; atom y \<sharp> Inj2 eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Inj2 e, Inj2 eP, Inj2 eV : Syntax.Sum \<tau>\<^sub>1 \<tau>\<^sub>2
5. \<And>\<Gamma> e\<^sub>1 eP\<^sub>1 eV\<^sub>1 \<tau>\<^sub>1 e\<^sub>2 eP\<^sub>2 eV\<^sub>2 \<tau>\<^sub>2 y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>1; atom b \<sharp> eP\<^sub>1; atom b \<sharp> eV\<^sub>1\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>1, eP\<^sub>1, eV\<^sub>1 : \<tau>\<^sub>1; \<Gamma> \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; \<And>b ba. \<lbrakk>atom b \<sharp> e\<^sub>2; atom b \<sharp> eP\<^sub>2; atom b \<sharp> eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e\<^sub>2, eP\<^sub>2, eV\<^sub>2 : \<tau>\<^sub>2; atom y \<sharp> Syntax.Pair e\<^sub>1 e\<^sub>2; atom y \<sharp> Syntax.Pair eP\<^sub>1 eP\<^sub>2; atom y \<sharp> Syntax.Pair eV\<^sub>1 eV\<^sub>2\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Syntax.Pair e\<^sub>1 e\<^sub>2, Syntax.Pair eP\<^sub>1 eP\<^sub>2, Syntax.Pair eV\<^sub>1 eV\<^sub>2 : Syntax.Prod \<tau>\<^sub>1 \<tau>\<^sub>2
6. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>; atom y \<sharp> Roll e; atom y \<sharp> Roll eP; atom y \<sharp> Roll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Roll e, Roll eP, Roll eV : Mu \<alpha> \<tau>
7. \<And>\<alpha> \<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>atom \<alpha> \<sharp> y; atom \<alpha> \<sharp> \<tau>'; atom \<alpha> \<sharp> \<Gamma>; \<Gamma> \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : Mu \<alpha> \<tau>; atom y \<sharp> Unroll e; atom y \<sharp> Unroll eP; atom y \<sharp> Unroll eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unroll e, Unroll eP, Unroll eV : subst_type \<tau> (Mu \<alpha> \<tau>) \<alpha>
8. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : \<tau>; atom y \<sharp> Auth e; atom y \<sharp> Auth eP; atom y \<sharp> Auth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Auth e, Auth eP, Auth eV : AuthT \<tau>
9. \<And>\<Gamma> e eP eV \<tau> y \<tau>'. \<lbrakk>\<Gamma> \<turnstile> e, eP, eV : AuthT \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> e; atom b \<sharp> eP; atom b \<sharp> eV\<rbrakk> \<Longrightarrow> \<Gamma>(b $$:= ba) \<turnstile> e, eP, eV : AuthT \<tau>; atom y \<sharp> Unauth e; atom y \<sharp> Unauth eP; atom y \<sharp> Unauth eV\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> Unauth e, Unauth eP, Unauth eV : \<tau>
10. \<And>v vP \<tau> h \<Gamma> y \<tau>'. \<lbrakk>{$$} \<turnstile> v, vP, \<lparr>vP\<rparr> : \<tau>; \<And>b ba. \<lbrakk>atom b \<sharp> v; atom b \<sharp> vP; atom b \<sharp> \<lparr>vP\<rparr>\<rbrakk> \<Longrightarrow> {$$}(b $$:= ba) \<turnstile> v, vP, \<lparr>vP\<rparr> : \<tau>; hash \<lparr>vP\<rparr> = h; value v; value vP; atom y \<sharp> v; atom y \<sharp> Hashed h vP; atom y \<sharp> Hash h\<rbrakk> \<Longrightarrow> \<Gamma>(y $$:= \<tau>') \<turnstile> v, Hashed h vP, Hash h : AuthT \<tau>
[PROOF STEP]
qed (auto simp: fresh_fmap_update)
|
{"llama_tokens": 64519, "file": "LambdaAuth_Agreement", "length": 38}
|
import time
import numpy as np
from multiprocessing.dummy import Pool as ThreadPool
from mcts import MCTS
from play import play_match
from players.uninformed_mcts_player import UninformedMCTSPlayer
from players.deep_mcts_player import DeepMCTSPlayer
# Object that coordinates AlphaZero training.
class Trainer:
def __init__(self, game, nn, num_simulations, num_games, num_updates, buffer_size_limit, cpuct, num_threads):
self.game = game
self.nn = nn
self.num_simulations = num_simulations
self.num_games = num_games
self.num_updates = num_updates
self.buffer_size_limit = buffer_size_limit
self.training_data = np.zeros((0,3))
self.cpuct = cpuct
self.num_threads = num_threads
self.error_log = []
# Does one game of self play and generates training samples.
def self_play(self, temperature):
s = self.game.get_initial_state()
tree = MCTS(self.game, self.nn)
data = []
scores = self.game.check_game_over(s)
root = True
alpha = 1
weight = .25
while scores is None:
# Think
for _ in range(self.num_simulations):
tree.simulate(s, cpuct=self.cpuct)
# Fetch action distribution and append training example template.
dist = tree.get_distribution(s, temperature=temperature)
# Add dirichlet noise to root
if root:
noise = np.random.dirichlet(np.array(alpha*np.ones_like(dist[:,1].astype(np.float32))))
dist[:,1] = dist[:,1]*(1-weight) + noise*weight
root = False
data.append([s, dist[:,1], None]) # state, prob, outcome
# Sample an action
idx = np.random.choice(len(dist), p=dist[:,1].astype(np.float))
a = tuple(dist[idx, 0])
# Apply action
available = self.game.get_available_actions(s)
template = np.zeros_like(available)
template[a] = 1
s = self.game.take_action(s, template)
# Check scores
scores = self.game.check_game_over(s)
# Update training examples with outcome
for i, _ in enumerate(data):
data[i][-1] = scores
return np.array(data)
# Performs one iteration of policy improvement.
# Creates some number of games, then updates network parameters some number of times from that training data.
def policy_iteration(self, verbose=False):
temperature = 1
if verbose:
print("SIMULATING " + str(self.num_games) + " games")
start = time.time()
if self.num_threads > 1:
jobs = [temperature]*self.num_games
pool = ThreadPool(self.num_threads)
new_data = pool.map(self.self_play, jobs)
pool.close()
pool.join()
self.training_data = np.concatenate([self.training_data] + new_data, axis=0)
else:
for _ in range(self.num_games): # Self-play games
new_data = self.self_play(temperature)
self.training_data = np.concatenate([self.training_data, new_data], axis=0)
if verbose:
print("Simulating took " + str(int(time.time()-start)) + " seconds")
# Prune oldest training samples if a buffer size limit is set.
if self.buffer_size_limit is not None:
self.training_data = self.training_data[-self.buffer_size_limit:,:]
if verbose:
print("TRAINING")
start = time.time()
mean_loss = None
count = 0
for _ in range(self.num_updates):
self.nn.train(self.training_data)
new_loss = self.nn.latest_loss.item()
if mean_loss is None:
mean_loss = new_loss
else:
(mean_loss*count + new_loss)/(count+1)
count += 1
self.error_log.append(mean_loss)
if verbose:
print("Training took " + str(int(time.time()-start)) + " seconds")
print("Average train error:", mean_loss)
|
{"hexsha": "0bf4320dd5cac88814b1c524a685eaa9d7381135", "size": 4155, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer.py", "max_stars_repo_name": "Counterfeiter/multiplayer-alphazero", "max_stars_repo_head_hexsha": "e89477975958d995772487ad77fe8859a3778a36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2019-11-29T03:46:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T19:59:01.000Z", "max_issues_repo_path": "trainer.py", "max_issues_repo_name": "Counterfeiter/multiplayer-alphazero", "max_issues_repo_head_hexsha": "e89477975958d995772487ad77fe8859a3778a36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-21T19:11:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-20T17:40:46.000Z", "max_forks_repo_path": "trainer.py", "max_forks_repo_name": "Counterfeiter/multiplayer-alphazero", "max_forks_repo_head_hexsha": "e89477975958d995772487ad77fe8859a3778a36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-11-26T05:05:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T22:08:53.000Z", "avg_line_length": 35.5128205128, "max_line_length": 113, "alphanum_fraction": 0.5959085439, "include": true, "reason": "import numpy", "num_tokens": 895}
|
import numpy as np
import pytest
import xarray as xr
from climpred import PerfectModelEnsemble
from climpred.exceptions import DatasetError
from climpred.metrics import DETERMINISTIC_PM_METRICS
xr.set_options(display_style="text")
comparison_dim_PM = [
("m2m", "init"),
("m2m", "member"),
("m2m", ["init", "member"]),
("m2e", "init"),
("m2e", "member"),
("m2e", ["init", "member"]),
("m2c", "init"),
("m2c", "member"),
("m2c", ["init", "member"]),
("e2c", "init"),
]
references = [
"uninitialized",
"persistence",
"climatology",
["climatology", "uninitialized", "persistence"],
]
references_ids = [
"uninitialized",
"persistence",
"climatology",
"climatology, uninitialized, persistence",
]
category_edges = np.array([9.5, 10.0, 10.5])
ITERATIONS = 3
def test_perfectModelEnsemble_init(PM_ds_initialized_1d):
"""Test to see if perfect model ensemble can be initialized"""
pm = PerfectModelEnsemble(PM_ds_initialized_1d)
print(PerfectModelEnsemble)
assert pm
def test_perfectModelEnsemble_init_da(PM_da_initialized_1d):
"""Test to see if perfect model ensemble can be initialized with da"""
pm = PerfectModelEnsemble(PM_da_initialized_1d)
assert pm
def test_add_control(perfectModelEnsemble_initialized_control):
"""Test to see if control can be added to PerfectModelEnsemble"""
assert perfectModelEnsemble_initialized_control.get_control()
def test_generate_uninit(perfectModelEnsemble_initialized_control):
"""Test to see if uninitialized ensemble can be bootstrapped"""
pm = perfectModelEnsemble_initialized_control
pm = pm.generate_uninitialized()
assert pm.get_uninitialized()
def test_compute_persistence(perfectModelEnsemble_initialized_control):
"""Test that compute persistence can be run for perfect model ensemble"""
perfectModelEnsemble_initialized_control._compute_persistence(metric="acc")
def test_get_initialized(PM_ds_initialized_1d):
"""Test whether get_initialized function works."""
pm = PerfectModelEnsemble(PM_ds_initialized_1d)
init = pm.get_initialized()
assert init == pm._datasets["initialized"]
def test_get_uninitialized(perfectModelEnsemble_initialized_control):
"""Test whether get_uninitialized function works."""
pm = perfectModelEnsemble_initialized_control
pm = pm.generate_uninitialized()
uninit = pm.get_uninitialized()
assert uninit == pm._datasets["uninitialized"]
def test_get_control(perfectModelEnsemble_initialized_control):
"""Test whether get_control function works."""
ctrl = perfectModelEnsemble_initialized_control.get_control()
assert ctrl == perfectModelEnsemble_initialized_control._datasets["control"]
def test_inplace(PM_ds_initialized_1d, PM_ds_control_1d):
"""Tests that inplace operations do not work."""
pm = PerfectModelEnsemble(PM_ds_initialized_1d)
# Adding a control.
pm.add_control(PM_ds_control_1d)
with_ctrl = pm.add_control(PM_ds_control_1d)
assert pm != with_ctrl
# Adding an uninitialized ensemble.
pm = pm.add_control(PM_ds_control_1d)
pm.generate_uninitialized()
with_uninit = pm.generate_uninitialized()
assert pm != with_uninit
# Applying arbitrary func.
pm.sum("init")
summed = pm.sum("init")
assert pm != summed
def test_verify(perfectModelEnsemble_initialized_control):
"""Test that verify works."""
assert perfectModelEnsemble_initialized_control.verify(
metric="mse", comparison="m2e", dim=["init", "member"]
)
def test_verify_metric_kwargs(perfectModelEnsemble_initialized_control):
"""Test that verify with metric_kwargs works."""
pm = perfectModelEnsemble_initialized_control
pm = pm - pm.mean("time").mean("init")
assert pm.verify(
metric="threshold_brier_score",
comparison="m2c",
dim=["init", "member"],
threshold=0.5,
)
@pytest.mark.parametrize("reference", references, ids=references_ids)
def test_verify_reference(perfectModelEnsemble_initialized_control, reference):
"""Test that verify works with references given."""
pm = perfectModelEnsemble_initialized_control.generate_uninitialized()
skill = (
pm.verify(
metric="rmse", comparison="m2e", dim=["init", "member"], reference=reference
)
.expand_dims(["lon", "lat"])
.isel(lon=[0] * 2, lat=[0] * 2)
) # make geospatial
if isinstance(reference, str):
reference = [reference]
elif reference is None:
reference = []
if len(reference) == 0:
assert "skill" not in skill.dims
else:
assert skill.skill.size == len(reference) + 1
# test skills not none
assert skill.notnull().all()
assert "dayofyear" not in skill.coords
def test_verify_fails_expected_metric_kwargs(perfectModelEnsemble_initialized_control):
"""Test that verify without metric_kwargs fails."""
pm = perfectModelEnsemble_initialized_control
pm = pm - pm.mean("time").mean("init")
with pytest.raises(ValueError) as excinfo:
pm.verify(
metric="threshold_brier_score", comparison="m2c", dim=["init", "member"]
)
assert "Please provide threshold." == str(excinfo.value)
def test_compute_uninitialized_metric_kwargs(perfectModelEnsemble_initialized_control):
"Test that _compute_uninitialized with metric_kwargs works"
pm = perfectModelEnsemble_initialized_control
pm = pm - pm.mean("time").mean("init")
pm = pm.generate_uninitialized()
assert pm._compute_uninitialized(
metric="threshold_brier_score",
comparison="m2c",
threshold=0.5,
dim=["init", "member"],
)
def test_bootstrap_metric_kwargs(perfectModelEnsemble_initialized_control):
"""Test that bootstrap with metric_kwargs works."""
pm = perfectModelEnsemble_initialized_control
pm = pm - pm.mean("time").mean("init")
pm = pm.generate_uninitialized()
assert pm.bootstrap(
metric="threshold_brier_score",
comparison="m2c",
threshold=0.5,
iterations=ITERATIONS,
dim=["init", "member"],
)
def test_calendar_matching_control(PM_da_initialized_1d, PM_ds_control_1d):
"""Tests that error is thrown if calendars mismatch when adding observations."""
pm = PerfectModelEnsemble(PM_da_initialized_1d)
PM_ds_control_1d["time"] = xr.cftime_range(
start="1950", periods=PM_ds_control_1d.time.size, freq="MS", calendar="all_leap"
)
with pytest.raises(ValueError) as excinfo:
pm = pm.add_control(PM_ds_control_1d)
assert "does not match" in str(excinfo.value)
def test_persistence_dim(perfectModelEnsemble_initialized_control):
pm = perfectModelEnsemble_initialized_control.expand_dims(
"lon"
).generate_uninitialized()
assert "lon" in pm.get_initialized().dims
dim = ["lon"]
metric = "rmse"
comparison = "m2e"
actual = pm._compute_persistence(metric=metric, dim=dim)
assert "lon" not in actual.dims
assert "init" in actual.dims
actual = pm.verify(
metric=metric,
comparison=comparison,
dim=dim,
reference=["persistence", "uninitialized"],
)
assert "lon" not in actual.dims
assert "init" in actual.dims
pm = perfectModelEnsemble_initialized_control.expand_dims("lon")
# fix _resample_iterations_idx doesnt work with singular dimension somewhere.
pm = pm.isel(lon=[0, 0])
actual = pm.bootstrap(metric=metric, comparison=comparison, dim=dim, iterations=2)
assert "lon" not in actual.dims
assert "init" in actual.dims
def test_HindcastEnsemble_as_PerfectModelEnsemble(hindcast_recon_1d_mm):
"""Test that initialized dataset for HindcastEnsemble can also be used for
PerfectModelEnsemble."""
v = "SST"
alignment = "maximize"
hindcast = hindcast_recon_1d_mm.isel(lead=[0, 1])
assert (
not hindcast.verify(
metric="acc", comparison="e2o", dim="init", alignment=alignment
)[v]
.isnull()
.any()
)
# try PerfectModelEnsemble predictability
init = hindcast.get_initialized()
pm = PerfectModelEnsemble(init)
assert (
not pm.verify(metric="acc", comparison="m2e", dim=["member", "init"])[v]
.isnull()
.any()
)
def test_verify_no_need_for_control(PM_da_initialized_1d, PM_da_control_1d):
"""Tests that no error is thrown when no control present
when calling verify(reference=['uninitialized'])."""
v = "tos"
comparison = "m2e"
pm = PerfectModelEnsemble(PM_da_initialized_1d).isel(lead=[0, 1, 2])
# verify needs to control
skill = pm.verify(metric="mse", comparison=comparison, dim="init")
assert not skill[v].isnull().any()
# control not needed for normalized metrics as normalized
# with verif which is the verification member in PM and
# not the control simulation.
assert (
not pm.verify(metric="nmse", comparison=comparison, dim="init")[v]
.isnull()
.any()
)
with pytest.raises(DatasetError) as e:
pm.verify(
metric="mse", comparison=comparison, dim="init", reference=["persistence"]
)
assert "at least one control dataset" in str(e.value)
# unlikely case that control gets deleted after generating uninitialized
pm = pm.add_control(PM_da_control_1d).generate_uninitialized()
pm._datasets["control"] = {}
assert (
not pm._compute_uninitialized(metric="mse", comparison=comparison, dim="init")[
v
]
.isnull()
.any()
)
assert (
not pm.verify(
metric="mse", comparison=comparison, dim="init", reference=["uninitialized"]
)[v]
.isnull()
.any()
)
def test_verify_reference_same_dims(perfectModelEnsemble_initialized_control):
"""Test that verify returns the same dimensionality regardless of reference."""
pm = perfectModelEnsemble_initialized_control.generate_uninitialized()
pm = pm.isel(lead=[0, 1, 2], init=[0, 1, 2])
metric = "mse"
comparison = "m2e"
dim = "init"
actual_no_ref = pm.verify(
metric=metric, comparison=comparison, dim=dim, reference=None
)
actual_uninit_ref = pm.verify(
metric=metric, comparison=comparison, dim=dim, reference="uninitialized"
)
actual_pers_ref = pm.verify(
metric=metric, comparison=comparison, dim=dim, reference="persistence"
)
actual_clim_ref = pm.verify(
metric=metric, comparison=comparison, dim=dim, reference="climatology"
)
assert actual_uninit_ref.skill.size == 2
assert actual_pers_ref.skill.size == 2
assert actual_clim_ref.skill.size == 2
# no additional dimension, +1 because initialized squeezed
assert len(actual_no_ref.dims) + 1 == len(actual_pers_ref.dims)
assert len(actual_no_ref.dims) + 1 == len(actual_uninit_ref.dims)
assert len(actual_no_ref.dims) + 1 == len(actual_clim_ref.dims)
assert len(actual_pers_ref.dims) == len(actual_uninit_ref.dims)
assert len(actual_clim_ref.dims) == len(actual_uninit_ref.dims)
@pytest.mark.parametrize("reference", references, ids=references_ids)
@pytest.mark.parametrize("comparison,dim", comparison_dim_PM)
@pytest.mark.parametrize("metric", DETERMINISTIC_PM_METRICS)
def test_PerfectModel_verify_bootstrap_deterministic(
perfectModelEnsemble_initialized_control, comparison, metric, dim, reference
):
"""
Checks that PerfectModel.verify() and PerfectModel.bootstrap() for
deterministic metrics is not NaN.
"""
pm = perfectModelEnsemble_initialized_control.isel(lead=[0, 1, 2], init=range(6))
if isinstance(reference, str):
reference = [reference]
if metric == "contingency":
metric_kwargs = {
"forecast_category_edges": category_edges,
"observation_category_edges": category_edges,
"score": "accuracy",
}
elif metric == "roc":
metric_kwargs = {"bin_edges": category_edges}
else:
metric_kwargs = {}
# acc on dim member only is ill defined
pearson_r_containing_metrics = [
"pearson_r",
"spearman_r",
"pearson_r_p_value",
"spearman_r_p_value",
"msess_murphy",
"bias_slope",
"conditional_bias",
"std_ratio",
"conditional_bias",
"uacc",
]
if dim == "member" and metric in pearson_r_containing_metrics:
dim = ["init", "member"]
actual = pm.verify(
comparison=comparison,
metric=metric,
dim=dim,
reference=reference,
**metric_kwargs
).tos
if metric in ["contingency"] or metric in pearson_r_containing_metrics:
# less strict here with all NaNs, pearson_r yields NaNs for climatology
if "climatology" in reference:
actual = actual.drop_sel(skill="climatology")
assert not actual.isnull().all()
else:
assert not actual.isnull().any()
# bootstrap()
actual = pm.bootstrap(
comparison=comparison,
metric=metric,
dim=dim,
iterations=ITERATIONS,
reference=reference,
**metric_kwargs
).tos
if len(reference) > 0:
actual = actual.drop_sel(results="p")
if metric in ["contingency"] or metric in pearson_r_containing_metrics:
# less strict here with all NaNs, pearson_r yields NaNs for climatology
if "climatology" in reference:
actual = actual.drop_sel(skill="climatology")
assert not actual.sel(results="verify skill").isnull().all()
else:
assert not actual.sel(results="verify skill").isnull().any()
@pytest.mark.parametrize("metric", ("rmse", "pearson_r"))
def test_pvalue_from_bootstrapping(perfectModelEnsemble_initialized_control, metric):
"""Test that pvalue of initialized ensemble first lead is close to 0."""
sig = 95
pm = perfectModelEnsemble_initialized_control.isel(lead=[0, 1, 2])
actual = (
pm.bootstrap(
metric=metric,
iterations=ITERATIONS,
comparison="e2c",
sig=sig,
dim="init",
reference="uninitialized",
)
.sel(skill="uninitialized", results="p")
.isel(lead=0)
)
# check that significant p-value
assert actual.tos.values < 2 * (1 - sig / 100)
# lead units keep
assert actual.lead.attrs["units"] == "years"
|
{"hexsha": "1a126c3ca58e0752bbb430a0739e980e05b4c988", "size": 14492, "ext": "py", "lang": "Python", "max_stars_repo_path": "climpred/tests/test_PerfectModelEnsemble_class.py", "max_stars_repo_name": "rom-py/climpred", "max_stars_repo_head_hexsha": "9b168992704b1b7627aa82a08edd5faecfeee7ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 104, "max_stars_repo_stars_event_min_datetime": "2020-09-17T16:46:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T16:49:44.000Z", "max_issues_repo_path": "climpred/tests/test_PerfectModelEnsemble_class.py", "max_issues_repo_name": "rom-py/climpred", "max_issues_repo_head_hexsha": "9b168992704b1b7627aa82a08edd5faecfeee7ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 303, "max_issues_repo_issues_event_min_datetime": "2020-09-17T16:05:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:59:31.000Z", "max_forks_repo_path": "climpred/tests/test_PerfectModelEnsemble_class.py", "max_forks_repo_name": "kpegion/climpred", "max_forks_repo_head_hexsha": "b3562311af253b9ee0e0cd97d196b0fd34936031", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2020-10-08T15:40:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T19:07:54.000Z", "avg_line_length": 33.8598130841, "max_line_length": 88, "alphanum_fraction": 0.6804443831, "include": true, "reason": "import numpy", "num_tokens": 3492}
|
import numpy as np
import matplotlib.pyplot as plt
moldata = np.loadtxt('sirius_data.dat')
for i in range(len(moldata)):
pKa = moldata[i,1]
logp0 = moldata[i,2]
logp1 = moldata[i,3]
p0 = 10**(+logp0)
p1 = 10**(+logp1)
ka = 10**(-pKa)
pHexp = [1.0,1.2,2.0,3.0,4.0,5.0,6.0,6.5,7.0,7.4,8.0,9.0,10.0,11.0,12.0]
pH = np.array(pHexp)
cH = 10**(pH)
#print(-np.log10(cH))
logD = np.log10((p0 + p1*cH*ka)/(1.0 + cH*ka))
#logD = logp1 - 10**(pH-pKa*np.ones(len(pH)))
#logDref = np.loadtxt('sirius_data_SM41.dat')
logDexp = np.loadtxt('logD_sirius_SM%d.dat'%moldata[i,0])
plt.figure()
plt.plot(pHexp,logDexp,label='Experimental')
plt.plot(pH,logD,label='Predicted from Eqn')
# plt.plot(logDref[:,0],logDref[:,1],label='Sirius data')
# plt.ylim(-6,1)
plt.legend(fontsize=14)
plt.xlabel('pH',fontsize=16)
plt.ylabel('logD',fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# plt.show()
plt.savefig('lipophilicity_SM%d.png'%moldata[i,0],dpi=120,bbox_inches='tight')
|
{"hexsha": "28b87cbb5466fd1fc45443bd453f24d156ca311c", "size": 1062, "ext": "py", "lang": "Python", "max_stars_repo_path": "physical_property/logD/theory/logDplot.py", "max_stars_repo_name": "samplchallenges/SAMPL7", "max_stars_repo_head_hexsha": "1feed16ed8502a3519559fbdcc23812f21c64be1", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2019-10-23T17:59:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T18:42:14.000Z", "max_issues_repo_path": "physical_property/logD/theory/logDplot.py", "max_issues_repo_name": "samplchallenges/SAMPL7", "max_issues_repo_head_hexsha": "1feed16ed8502a3519559fbdcc23812f21c64be1", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2019-10-16T18:42:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-05T23:28:04.000Z", "max_forks_repo_path": "physical_property/logD/theory/logDplot.py", "max_forks_repo_name": "samplchallenges/SAMPL7", "max_forks_repo_head_hexsha": "1feed16ed8502a3519559fbdcc23812f21c64be1", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2019-10-07T08:47:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T14:15:07.000Z", "avg_line_length": 28.7027027027, "max_line_length": 82, "alphanum_fraction": 0.6167608286, "include": true, "reason": "import numpy", "num_tokens": 421}
|
"""
Script for extracting 256 x 256 pixel, plaque-centered images from the 1536 x 1536 color-normalized images
"""
import csv
import glob, os
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage import measure
from tqdm import tqdm
import random
# Set random seeds
np.random.seed(42)
IMG_DIR = 'data/normalized_tiles/' ##ziqis normalized 1536 tiles
SAVE_DIR = 'data/tile_seg/'
BLOBS_DIR = SAVE_DIR + 'blobs/'
IMG_BBOXES = SAVE_DIR + 'blobs_bboxes/'
NEGATIVES = SAVE_DIR + 'negatives/'
def plot_image(imagepath,
figsize=(5,5)):
"""
Convenient function to plot images
loaded with OpenCV
(converts BGR2RGB)
IMAGEPATH: path of image
FIGSIZE: size of figure to plot
"""
im = cv2.imread(imagepath)
im2 = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
plt.figure(figsize=figsize)
plt.imshow(im2)
plt.show()
def hsv_mask(im,
hue=[0, 40],
sat=[10, 255],
val=[0, 220],
figsize=(5,5),
show=False):
"""
converts image to HSV colorspace,
applies color filters based on input range
and returns mask
IM: image
HUE, SAT, VAL: the hue, saturation, and value
FIGSIZE: size of figure to plot
SHOW: whether to show image
"""
# convert to HSV colorspace
im2 = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# define mask ranges
lower = np.array([hue[0], sat[0], val[0]])
upper = np.array([hue[1], sat[1], val[1]])
mask = cv2.inRange(im2, lower, upper)
if show:
im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
output = cv2.bitwise_and(im_rgb, im_rgb, mask = mask)
plt.figure(figsize=(15,5))
plt.subplot(131)
plt.title('original')
plt.imshow(im_rgb)
plt.subplot(132)
plt.title('mask ')
plt.imshow(mask)
plt.show()
plt.subplot(133)
plt.title('mask applied')
plt.imshow(output)
plt.show()
return mask
def clean_mask(mask,
mask_threshold=0.25):
"""
function that applies opencv operations
to reduce noise and create smoother segments
MASK: the mask to smooth
MASK_THRESHOLD: threshold for applying an erosion
"""
# define a kernel structure
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
mask_nonzero = cv2.countNonZero(mask)
# If the mask has a large amount of brown hue
# (above MASK_THRESHOLD of the mask), then apply an erosion
if mask_nonzero > mask_threshold * mask.size:
mask = cv2.erode(mask, kernel, iterations=1)
# Apply morphological closing, then opening operations
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
cleaned = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
# If this doesn't result in a cleaner mask (fewer pixels)
# then the image is relatively "dirty" and we should apply
# opening before closing to reduce noise levels
if cv2.countNonZero(cleaned) > mask_nonzero:
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
cleaned = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
return cleaned
def show_bboxes(im, bboxes):
"""
draws bboxes on image object
IM: image
BBOXES: coordinates
"""
im_rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(20,10))
plt.subplot(121)
plt.title('original')
plt.imshow(im_rgb)
for box in bboxes:
x1 = box[0]
y1 = box[1]
x2 = box[0] + box[2]
y2 = box[1] + box[3]
cv2.rectangle(im_rgb, (x1, y1), (x2, y2), (0, 0, 255), 2)
plt.subplot(122)
plt.title('bounding boxes')
plt.imshow(im_rgb)
plt.show()
return im_rgb
def crop_from_img(img,
bboxes,
size=(128,128),
show=False):
"""
center crops of a fixed size from an image
IM: image
BBOXES: format is (x, y, w, h) where x,y is left, top
SIZE: size of bbox
SHOW: whether to show image
"""
cropped_images = []
cropped_coords = []
for bbox in bboxes:
# get the x,y of the centerpoint of the bounding box
box_center_x = bbox[0] + 0.5 * bbox[2]
box_center_y = bbox[1] + 0.5 * bbox[3]
# ensure that the bounding box of the desired size
# stays within the shape of the image
centerpoint_x = np.clip(box_center_x,
a_min = size[0] / 2,
a_max = img.shape[0] - size[0] / 2)
centerpoint_y = np.clip(box_center_y,
a_min = size[1] / 2,
a_max = img.shape[1] - size[1] / 2)
# top, left of crop box
# cast to int
x_crop = int(centerpoint_x - 0.5 * size[0])
y_crop = int(centerpoint_y - 0.5 * size[1])
cropped_coords.append((x_crop, y_crop, size[0], size[1]))
crop_img = img[y_crop:y_crop + size[1],
x_crop:x_crop + size[0]]
if show:
plt.figure()
im2 = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)
plt.imshow(im2)
cropped_images.append(crop_img)
return cropped_images, np.array(cropped_coords)
def get_negative_bboxes(mask,
pixel_threshold=500,
img_size=(128,128),
num_negatives=5,
max_guesses=1000):
"""
Randomly samples a binary mask to produce
negative examples below a given threshold.
"""
mask = mask.copy()
negatives = []
# Define the relevant sampling ranges
x_range = mask.shape[0] - img_size[0]
y_range = mask.shape[1] - img_size[1]
iteration = 0
while len(negatives) < num_negatives and iteration < max_guesses:
# grab a random (x,y) point
x = np.random.randint(x_range)
y = np.random.randint(y_range)
# if the
random_crop = mask[y: y + img_size[0],
x: x + img_size[1]]
nonzero = np.count_nonzero(random_crop)
if nonzero < pixel_threshold:
negatives.append((x, y, img_size[0], img_size[1]))
mask[y: y + img_size[0], x: x + img_size[1]] = 255 # prevent overlap by setting mask
iteration += 1
return np.array(negatives)
def watershed_mask(mask,
img,
dist_thresh=0.4,
show=False):
"""
Applies watershed algorithm
MASK: mask to apply
IMG: the image
DIST_THRESHOLD: distance threshold for image thresholding
SHOW: whether to show image
"""
kernel = np.ones((8,8),np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# sure background area
sure_bg = cv2.dilate(mask, kernel, iterations=3)
dist_transform = cv2.distanceTransform(mask,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,
dist_thresh * dist_transform.max(),
255, cv2.THRESH_BINARY)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers + 1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img, markers)
if show:
plt.figure(figsize=(20,10))
plt.subplot(121)
plt.imshow(markers)
plt.subplot(122)
plt.imshow(img)
plt.show()
return markers, img
def threshold_and_bound(mask,
img,
pixel_thresholds=[300,1500]):
"""
Thresholds and bounds IMG with MASK
"""
labels = measure.label(mask, neighbors=8, background=0)
new_mask = np.zeros(mask.shape, dtype='uint8')
large_mask = np.zeros(mask.shape, dtype='uint8')
ltwh = []
centerpoints = []
sizes = []
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(mask.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > pixel_thresholds[0]:
sizes.append(numPixels)
if numPixels > pixel_thresholds[1]:
large_mask = cv2.add(large_mask, labelMask)
continue
new_mask = cv2.add(new_mask, labelMask)
y, x = np.where(labels == label)
left, top = np.min(x), np.min(y)
width, height = (np.max(x) - left), (np.max(y) - top)
centerpoint = left + 0.5 * width, top + 0.5 * height
ltwh.append((left, top, np.max(x) - left, np.max(y) - top))
centerpoints.append(centerpoint)
watershed = watershed_mask(large_mask, img)
for label in np.unique(watershed[0]):
# if this is the background label, ignore it
if label == -1 or label == 1:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(mask.shape, dtype="uint8")
labelMask[watershed[0] == label] = 255
numPixels = cv2.countNonZero(labelMask)
if numPixels > pixel_thresholds[0]:
sizes.append(numPixels)
new_mask = cv2.add(new_mask, labelMask)
y, x = np.where(watershed[0] == label)
left, top = np.min(x), np.min(y)
width, height = (np.max(x) - left), (np.max(y) - top)
centerpoint = left + 0.5 * width, top + 0.5 * height
ltwh.append((left, top, np.max(x) - left, np.max(y) - top))
centerpoints.append(centerpoint)
return new_mask, np.array(ltwh), np.array(centerpoints), np.array(sizes)
def draw_bboxes(cropped_imgs,
bboxes,
orig_size=(1536,1536),
size=(256,256)):
"""
Draws bounding boxes
CROPPED_IMGS: the cropped images
BBOXES: the bounding boxes
ORIG_SIZE: original size
SIZE: new size
"""
drawn = []
for cropped_img, bbox in zip(cropped_imgs, bboxes):
cropped_img = cropped_img.copy()
# recalculate coordinates based on crop
# get the x,y of the centerpoint of the bounding box
box_center_x = bbox[0] + 0.5 * bbox[2]
box_center_y = bbox[1] + 0.5 * bbox[3]
# ensure that the bounding box of the desired size
# stays within the shape of the image
centerpoint_x = np.clip(box_center_x,
a_min = size[0] / 2,
a_max = orig_size[0] - size[0] / 2)
centerpoint_y = np.clip(box_center_y,
a_min = size[1] / 2,
a_max = orig_size[1] - size[1] / 2)
# cast to int
x_crop = int(centerpoint_x - 0.5 * size[0])
y_crop = int(centerpoint_y - 0.5 * size[1])
x1 = bbox[0] - x_crop
y1 = bbox[1] - y_crop
x2 = bbox[0] + bbox[2] - x_crop
y2 = bbox[1] + bbox[3] - y_crop
cv2.rectangle(cropped_img, (x1, y1), (x2, y2),(0, 0, 255), 1)
drawn.append(cropped_img)
return drawn
def crop_imageset(imagepath_list,
blobs_dir,
negatives_dir,
blobs_bboxes,
negatives=True,
rescale_factor=2,
rescale_dims=(768, 768),
hue=[0,40],
sat=[0,255],
val=[0,255],
thresholds=[100, 1600],
negative_details=SAVE_DIR+'negative_details.csv',
image_details=SAVE_DIR+'image_details.csv'):
"""
Main integration code that crops our images to 256 x 256 pixel, plaque-centered images
"""
for imagepath in tqdm(imagepath_list):
img = cv2.imread(imagepath)
img_resized = cv2.resize(img, rescale_dims)
# apply hue mask
hue_mask = hsv_mask(img_resized,
hue=hue,
sat=sat,
val=val,
show=False)
# clean up mask
cleaned_mask = clean_mask(hue_mask)
filtered, bboxes, centerpoints, sizes = threshold_and_bound(cleaned_mask, img_resized,
pixel_thresholds=thresholds)
if negatives:
negative_bboxes = get_negative_bboxes(cleaned_mask)
negative_bboxes = rescale_factor * negative_bboxes # rescale to full size
#show_bboxes(img, negative_bboxes)
cropped_negatives, negative_coords = crop_from_img(img,
negative_bboxes,
size=(256,256))
save_cropped(cropped_negatives,
imagepath,
negatives_dir)
negative_sizes = np.zeros(len(negative_coords))
write_details_to_csv(negative_details,
negative_coords,
negative_bboxes,
imagepath,
negative_sizes)
# save information
bboxes = rescale_factor * bboxes
centerpoints = rescale_factor * centerpoints
sizes = rescale_factor ** 2 * sizes
cropped_images, cropped_coords = crop_from_img(img,
bboxes,
size=(256,256))
save_cropped(cropped_images, imagepath, blobs_dir)
drawn_bboxes = draw_bboxes(cropped_images, bboxes)
save_cropped(drawn_bboxes, imagepath, blobs_bboxes)
write_details_to_csv(image_details,
cropped_coords,
bboxes,
imagepath,
sizes)
# save information
return bboxes, centerpoints, cropped_coords, sizes
def save_cropped(cropped_images, imagepath, outdir):
"""
Saves CROPPED_IMAGES
to OUTDIR using IMAGEPATH as part of saved directory name
"""
for i, image in enumerate(cropped_images):
path = os.path.splitext(imagepath)
split = path[0].split('/')
col = split[-1]
row = split[-2]
wsi = split[-4]
dirname = os.path.join(outdir, wsi)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = "_".join([wsi, row, col, str(i)]) + '.jpg'
cv2.imwrite(dirname + "/" + filename, image)
def save_image_details(dataframe,
cropped_images,
bboxes,
centerpoints,
imagepath,
outdir):
for i, (image, bbox) in enumerate(zip(cropped_images, bboxes)):
path = os.path.splitext(imagepath)
split = path[0].split('/')
col = split[-1]
row = split[-2]
wsi = split[-4]
coords = bbox
dirname = os.path.join(outdir, wsi)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = "_".join([wsi, row, col, i]) + '.jpg'
cv2.imwrite(dirname + "/" + filename, image)
def write_details_to_csv(filename,
cropped_coords,
bboxes,
imagepath,
sizes):
"""
Write detail to ongoing CSV FILENAME
CROPPED_COORDS: coordinates fo cropped image
BBOXES: bounding boxes of image
IMAGEPATH: path of image
SIZES: plaque sizes
"""
details = []
path = os.path.splitext(imagepath)
split = path[0].split('/')
col = split[-1]
row = split[-2]
source = split[-4]
for i, (img_coords, blob_coords, size) in enumerate(zip(cropped_coords,
bboxes,
sizes)):
imagename = "_".join([source, row, col, str(i)]) + '.jpg'
image_details = [imagename,
source,
col,
row,
img_coords,
blob_coords,
size]
details.append(image_details)
with open(filename, 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(details)
return details
def init_csvs(filenames):
"""
init the details csv from list of FILENAMES
"""
data_fields = [['imagename',
'source',
'tile_column',
'tile_row',
'image coordinates (xywh)',
'blob coordinates (xywh)',
'blob size']]
# Create CSVs for Image Details
for filename in filenames:
print(filename)
with open(filename, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerows(data_fields)
##=================================================================
##Runner Calls
##=================================================================
print(cv2.__version__)
print("IMG DIR: ", IMG_DIR, " SAVE_DIR: ", SAVE_DIR)
stains = ['4G8', '6E10', 'cw']
thresholds = {}
thresholds['4G8'] = [[0, 40], [10, 255], [0, 220]]
thresholds['6E10'] = [[10, 180], [20, 220], [0, 250]]
thresholds['cw'] = [[0, 100], [1, 255], [0, 250]]
for stain in stains:
init_csvs([SAVE_DIR + 'negative_details_{}.csv'.format(stain),
SAVE_DIR + 'image_details_{}.csv'.format(stain)])
images = glob.glob(IMG_DIR + '*/0/*/*.jpg')
images = [x for x in images if stain in x]
crop_imageset(images,
BLOBS_DIR,
NEGATIVES,
IMG_BBOXES,
hue=thresholds[stain][0],
sat=thresholds[stain][1],
val=thresholds[stain][2],
thresholds=[200, 2500],
negative_details=SAVE_DIR+'negative_details.csv',
image_details=SAVE_DIR+'image_details.csv')
|
{"hexsha": "0f5ce36b99504eae90cb39e60097544a07ce3914", "size": 18624, "ext": "py", "lang": "Python", "max_stars_repo_path": "blob_detect.py", "max_stars_repo_name": "keiserlab/consensus-learning-paper", "max_stars_repo_head_hexsha": "2d204362569489b9ab4c861b6cb6c5b819659ada", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "blob_detect.py", "max_issues_repo_name": "keiserlab/consensus-learning-paper", "max_issues_repo_head_hexsha": "2d204362569489b9ab4c861b6cb6c5b819659ada", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "blob_detect.py", "max_forks_repo_name": "keiserlab/consensus-learning-paper", "max_forks_repo_head_hexsha": "2d204362569489b9ab4c861b6cb6c5b819659ada", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-16T19:12:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T19:12:02.000Z", "avg_line_length": 36.3040935673, "max_line_length": 106, "alphanum_fraction": 0.5463380584, "include": true, "reason": "import numpy", "num_tokens": 4494}
|
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import pandas as pd
import feather
import numpy as np
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
data = pd.read_csv('../input/train.csv', index_col=0)
# when using dataframe you can specify some columns that must not be used
not_used = []
cat_feat = []
target = 'target'
# likeli_feat = feather.read_dataframe('./data/likeli_10folds_1000.fth').set_index('ID_code')
# data = pd.merge(data, likeli_feat.iloc[:200000], left_index=True, right_index=True)
features = [i for i in data.columns if i != target]
print(data.shape, len(features))
data.head()
etd = pd.read_csv('../input/test.csv', index_col=0)
# etd = pd.merge(etd, likeli_feat.iloc[200000:], left_index=True, right_index=True)
etd.head()
from tqdm import tqdm
orig = [f'var_{i}' for i in range(200)]
has_one = [f'var_{i}_has_one' for i in range(200)]
has_zero = [f'var_{i}_has_zero' for i in range(200)]
not_u = [f'var_{i}_not_unique' for i in range(200)]
for f in tqdm(orig):
unique_v = etd[f].value_counts()
unique_v = unique_v.index[unique_v == 1]
etd[f + '_u'] = etd[f].isin(unique_v)
etd['has_unique'] = etd[[f + '_u' for f in orig]].any(axis=1)
print(etd['has_unique'].sum())
real_samples = etd.loc[etd['has_unique'], orig]
ref = pd.concat([data, real_samples], axis=0)
print(ref.shape)
for f in tqdm(orig):
data[f + '_has_one'] = 0
data[f + '_has_zero'] = 0
f_1 = data.loc[data[target] == 1, f].value_counts()
f_1_1 = set(f_1.index[f_1 > 1])
f_0_1 = set(f_1.index[f_1 > 0])
f_0 = data.loc[data[target] == 0, f].value_counts()
f_0_0 = set(f_0.index[f_0 > 1])
f_1_0 = set(f_0.index[f_0 > 0])
data.loc[data[target] == 1, f + '_has_one'] = data.loc[data[target] == 1, f].isin(f_1_1).astype(int)
data.loc[data[target] == 0, f + '_has_one'] = data.loc[data[target] == 0, f].isin(f_0_1).astype(int)
data.loc[data[target] == 1, f + '_has_zero'] = data.loc[data[target] == 1, f].isin(f_1_0).astype(int)
data.loc[data[target] == 0, f + '_has_zero'] = data.loc[data[target] == 0, f].isin(f_0_0).astype(int)
data.loc[:, has_one] = 2*data.loc[:, has_one].values + data.loc[:, has_zero].values
for f in tqdm(orig):
etd[f + '_has_one'] = 0
etd[f + '_has_zero'] = 0
f_1 = data.loc[data[target] == 1, f].unique()
f_0 = data.loc[data[target] == 0, f].unique()
etd.loc[:, f + '_has_one'] = etd[f].isin(f_1).astype(int)
etd.loc[:, f + '_has_zero'] = etd[f].isin(f_0).astype(int)
etd.loc[:, has_one] = 2*etd.loc[:, has_one].values + etd.loc[:, has_zero].values
for f in tqdm(orig):
v = ref[f].value_counts()
non_unique_v = v.index[v != 1]
m_trd = data[f].isin(non_unique_v)
data[f + '_not_unique'] = m_trd * data[f] + (~m_trd) * data[f].mean()
m_etd = etd[f].isin(non_unique_v)
etd[f + '_not_unique'] = m_etd * etd[f] + (~m_etd) * data[f].mean()
data.loc[~m_trd, f + '_has_one'] = 4
etd.loc[~m_etd, f + '_has_one'] = 4
data['var_0_has_one'].value_counts()
feather.write_dataframe(data.reset_index(), './921_data.fth')
feather.write_dataframe(etd.reset_index(), './921_etd.fth')
np.save('./real_samples.index', real_samples.index.values)
|
{"hexsha": "94dc42ac7912e3712edf2d561cfb083511db9fe0", "size": 3618, "ext": "py", "lang": "Python", "max_stars_repo_path": "Create_data.py", "max_stars_repo_name": "mcvenkat/Python-Programs", "max_stars_repo_head_hexsha": "2ff66bbd5b07c8e093b11360e1dcac06740a5024", "max_stars_repo_licenses": ["CC0-1.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Create_data.py", "max_issues_repo_name": "mcvenkat/Python-Programs", "max_issues_repo_head_hexsha": "2ff66bbd5b07c8e093b11360e1dcac06740a5024", "max_issues_repo_licenses": ["CC0-1.0", "MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-05-22T14:10:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T19:13:05.000Z", "max_forks_repo_path": "Create_data.py", "max_forks_repo_name": "mcvenkat/Python-Programs", "max_forks_repo_head_hexsha": "2ff66bbd5b07c8e093b11360e1dcac06740a5024", "max_forks_repo_licenses": ["CC0-1.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8217821782, "max_line_length": 113, "alphanum_fraction": 0.6379215036, "include": true, "reason": "import numpy", "num_tokens": 1145}
|
begin=clock;
omega=1;
over_r=2.4; %note that over_r should be between 3 and 4 for this program.
rate=5.3; %this is approximatly the ratio N/r, here the ratio must be above 1
n=ceil(rate*over_r);%this is the number of unions of sets
p=ceil(2*n-over_r); %this is the degree of the fine mesh, use p=n+1, or larger
L=200/n; %the signal is sampled from -L*T:L*T
%L=0.5;
step=over_r/2/omega;
%R1=round(-2/3+n/3);
%L1=R1+1-n;
%L2=round(2/3-n/3);
%R2=n+L2-1;
%I have doubled over_r and halved L so that the
%interval is the same, i.e., width is the same
%as it was for the single sample case.
rand('state',sum(100*clock))
%rand('seed',4)
h=(rand(n,1)-1/2)*step;
%h=(-1/2:1/n:1/2-1/n)*step;
width=L*step;
y=-width:step/p:width;
ysize=size(y);
ysize=ysize(2);
% y is the sampling set for signal_origin and y+h for signal_shift
signal=zeros(n,ysize);
pad=zeros(n,ysize);
signal_origin=zeros(size(y));
signal_shift=zeros(size(y));
pad_origin=zeros(size(y));
pad_shift=zeros(size(y));
%rand('seed',0)
%this sets the seed to a fixed value, 0, so that I get reproducable results
rand('state',sum(100*clock))
R=100;
%the first two collumns are the amplitudes, real(first) and complex(second),
%i.e., signal_coef(1,q)+i*signal_coef(2,q), the third and fourth are the
%band limits, for example, min(signal_coef(3,q),signal_coef(4,q)) is
%the left bandwidth, the max will give the right bandwidth
signal_coef=rand(4,R);
signal_coef(1,:)=2*(signal_coef(1,:)-1/2);
signal_coef(1,:)=signal_coef(1,:)/norm(signal_coef(1,:),2)/2/pi;
signal_coef(2,:)=2*(signal_coef(2,:)-1/2);
signal_coef(2,:)=signal_coef(2,:)/norm(signal_coef(2,:),2)/2/pi;
signal_coef(3,:)=2*(signal_coef(3,:)-1/2);
signal_coef(3,:)=omega*signal_coef(3,:)/max(abs(signal_coef(3,:)));
signal_coef(4,:)=2*(signal_coef(4,:)-1/2);
signal_coef(4,:)=omega*signal_coef(4,:)/max(abs(signal_coef(4,:)));
%need an axis for the dual space that has the same number of
%elements as y does.
dual_axis=2*p*omega/over_r/(2*p*L+1)*(-p*L:1:p*L);
sample_dual=zeros(size(dual_axis));
dualsize=size(dual_axis);
dualsize=dualsize(2);
for q=1:R
for j=1:dualsize
if dual_axis(j)>=min(signal_coef(3,q),signal_coef(4,q)) & dual_axis(j)<=max(signal_coef(3,q),signal_coef(4,q))
sample_dual(j)=sample_dual(j)+signal_coef(1,q)+i*signal_coef(2,q);
end
end
end
tmp=0;
for q=1:R
signal_origin=signal_origin+(signal_coef(1,q)+i*signal_coef(2,q))./(sqrt(2*pi)*2*pi*i*y).*(exp(2*pi*i*y*max(signal_coef(3,q),signal_coef(4,q)))-exp(2*pi*i*y*min(signal_coef(3,q),signal_coef(4,q))))*2*pi;
% signal_shift=signal_shift+(signal_coef(1,q)+i*signal_coef(2,q))./(sqrt(2*pi)*2*pi*i*(y+h(2))).*(exp(2*pi*i*(y+h(2))*max(signal_coef(3,q),signal_coef(4,q)))-exp(2*pi*i*(y+h(2))*min(signal_coef(3,q),signal_coef(4,q))))*2*pi;
tmp=tmp+(signal_coef(1,q)+i*signal_coef(2,q))*(max(signal_coef(3,q),signal_coef(4,q))-min(signal_coef(3,q),signal_coef(4,q)))/sqrt(2*pi)*2*pi;
end
joe=0;
for q=1:ysize
if y(q)==0
joe=q;
end
end
if joe>0
signal_origin(joe)=tmp;
end
for j=1:n
tmp=0;
for q=1:R
signal(j,:)=signal(j,:)+(signal_coef(1,q)+i*signal_coef(2,q))./(sqrt(2*pi)*2*pi*i*(y+h(j))).*(exp(2*pi*i*(y+h(j))*max(signal_coef(3,q),signal_coef(4,q)))-exp(2*pi*i*(y+h(j))*min(signal_coef(3,q),signal_coef(4,q))))*2*pi;
tmp=tmp+(signal_coef(1,q)+i*signal_coef(2,q))*(max(signal_coef(3,q),signal_coef(4,q))-min(signal_coef(3,q),signal_coef(4,q)))/sqrt(2*pi)*2*pi;
end
joe=0;
for q=1:ysize
if y(q)+h(j)==0
joe=q;
end
end
if joe>0
signal(j,joe)=tmp;
end
end
% p=n is the measure of difference between the fine and coarse mesh
for j=1:p:ysize
pad(:,j)=signal(:,j);
end
pad_dual=zeros(n,ysize);
for j=1:dualsize
for k=1:n
pad_dual(k,j)=sum(pad(k,:).*exp(-2*pi*i*dual_axis(j)*y));
end
end
pad_dual=pad_dual*(over_r/2/omega/p)/sqrt(2*pi);
%SHOULD THEY BE MULTIPLIED BY p SO THAT THEY MATCH sample_dual?
%this makes it so that the origin matches.
for j=1:n
pad_dual(j,:)=pad_dual(j,:).*exp(-2*pi*i*h(j)*dual_axis);
end
kappa=max(2,min(n,floor((n+over_r+1)/(n-over_r+1))))
%b=zeros(2*kappa,1);
%B=zeros(2*kappa);
%B(1,1)=1; B(1,2)=1; B(1,3)=-1;
%B(kappa,end)=1; B(kappa,end-1)=1; B(kappa,end-2)=-1;
%
%for j=2:kappa-1
% B(j,2*(j-1))=-1;
% B(j,2*(j-1)+1)=1;
% B(j,2*(j-1)+2)=1;
% B(j,2*(j-1)+3)=-1;
%end
%b(1)=-1;
%b(kappa)=1;
%for j=1:kappa
% B(j+kappa,2*j-1)=-1;
% B(j+kappa,2*j)=1;
% b(j+kappa)=n-1;
%end
%tmp1=B\b;
zones=zeros(kappa,2);
for j=1:kappa
%zones(j,1)=tmp1(2*j-1);
%zones(j,2)=tmp1(2*j);
zones(j,1)=round(j*(n+1)/(kappa+1)-n);
zones(j,2)=zones(j,1)+n-1;
end
%for j=1:kappa
% if abs(zones(j,1)-round(zones(j,1)))<=abs(zones(j,2)-round(zones(j,2)))
% zones(j,1)=round(zones(j,1));
% zones(j,2)=n-1+zones(j,1);
% else
% zones(j,2)=round(zones(j,2));
% zones(j,1)=zones(j,2)-n+1;
% end
%end
A=zeros(n);
for j=1:n
for k=1:n
A(j,k)=exp(2*pi*i*h(k)*j/step);
end
end
A_inverse=inv(A);
c=zeros(kappa,n);
for j=1:kappa
e=zeros(n,1);
e(1-zones(j,1))=1;
R=diag(exp(2*pi*i*h*(zones(j,1)-1)/step));
c(j,:)=(inv(R)*A_inverse*e)'; %'
end
%e=zeros(n,1);
%e(1-L2)=1;
%R=diag(exp(2*pi*i*h*(L2-1)/step));
%c(2,:)=(inv(R)*A_inverse*e)'; %'
partition=over_partitions(zones,omega,over_r,n,dual_axis);
%left_partition=zeros(size(dual_axis));
%right_partition=zeros(size(dual_axis));
%for j=1:length(dual_axis)
% if dual_axis(j)<=(L1-1)/step+omega
% left_partition(j)=0;
% elseif (L1-1)/step+omega<dual_axis(j) & dual_axis(j)<-omega
% left_partition(j)=rho((-dual_axis(j)-omega)/(-(L1-1)/step-2*omega));
% elseif -omega<=dual_axis(j) & dual_axis(j)<=(L2-1)/step+omega
% left_partition(j)=1;
% elseif (L2-1)/step+omega<dual_axis(j) & dual_axis(j)<(R1+1)/step-omega
% left_partition(j)=rho((dual_axis(j)-((L2-1)/step+omega))/((R1-L2+2)/step-2*omega));
% else
% left_partition(j)=0;
% end
%end
%for j=1:length(dual_axis)
% if dual_axis(j)<=(L2-1)/step+omega
% right_partition(j)=0;
% elseif (L2-1)/step+omega<dual_axis(j) & dual_axis(j)<(R1+1)/step-omega
% right_partition(j)=1-rho((dual_axis(j)-((L2-1)/step+omega))/((R1-L2+2)/step-2*omega));
% elseif (R1+1)/step-omega<=dual_axis(j) & dual_axis(j)<=omega
% right_partition(j)=1;
% elseif omega<dual_axis(j) & dual_axis(j)<(R2+1)/step-omega
% right_partition(j)=rho((dual_axis(j)-omega)/((R2+1)/step-2*omega));
% else
% right_partition(j)=0;
% end
%end
filters=zeros(n,length(dual_axis));
for j=1:n
for k=1:kappa
filters(j,:)=filters(j,:)+c(k,j)*partition(k,:);
end
end
filtered_duals=zeros(size(filters));
for j=1:n
filtered_duals(j,:)=filtered_duals(j,:)+filters(j,:).*pad_dual(j,:);
end
recon_dual=zeros(size(dual_axis));
for j=1:n
recon_dual=recon_dual+filtered_duals(j,:);
end
recon_signal=zeros(size(y));
k=-p*L:1:p*L;
for j=1:dualsize
recon_signal=recon_signal+recon_dual(j)*exp(2*pi*i*k/(2*p*L+1)*k(j));
end
recon_signal=recon_signal/(2*p*L+1)*sqrt(2*pi)*omega*p/over_r*2*p;
%hold off
semilogy(y,abs(recon_signal-signal_origin),'k');
%pause
%hold on
%plot(h,10^(-2),'o');
%h/step
cond(A)
done=clock;
time_cost=done-begin;
time_cost=time_cost(6)+60*time_cost(5)+360*time_cost(4)
%L
%step
%for j=1:n
% hold off
% plot(dual_axis,real(filters(j,:)))
% hold on
% plot(dual_axis,imag(filters(j,:)),'r')
% hold off
% pause
%end
|
{"author": "yueyuzhao", "repo": "gyrophone", "sha": "aa816eec3d7a17d9e30ab7afa0d4b79ef0a7a82e", "save_path": "github-repos/MATLAB/yueyuzhao-gyrophone", "path": "github-repos/MATLAB/yueyuzhao-gyrophone/gyrophone-aa816eec3d7a17d9e30ab7afa0d4b79ef0a7a82e/strohmer_tanner_code/arbitrary_over.m"}
|
# Best Model: valid dice: 0.83
# use loop and list to create the only coarse model
# 2018.4.2
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets
from torchvision import transforms
import torchvision as tv
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
# argparse settings
import argparse
parser = argparse.ArgumentParser(description='PROS12')
parser.add_argument('-b', '--batch', type=int, default=32, help='input batch size for training (default: 64)')
parser.add_argument('-e', '--epoch', type=int, default=50, help='number of epochs to train (default: 50)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 0.001)')
parser.add_argument('--gpu', type=int, default=4, help='GPU (default: 4)')
args = parser.parse_args()
# HyperParameter
epoch = args.epoch
batch_size = args.batch
lr = args.lr
gpu_list = [item for item in range(args.gpu)]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,)) # tuple for one channel
])
from dataset2d import PROS12
training_set = PROS12(train=True, transform=transform)
testing_set = PROS12(train=False,transform=transform)
trainloader = torch.utils.data.DataLoader(training_set, batch_size=batch_size, shuffle=True)
testloader = torch.utils.data.DataLoader(testing_set, batch_size=batch_size, shuffle=True)
def to_var(x, volatile):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x,volatile=volatile)
class DownTransition(nn.Module):
def __init__(self,inchan,outchan,layer,dilation_=1):
super(DownTransition, self).__init__()
self.dilation_ = dilation_
self.outchan = outchan
self.layer = layer
self.down = nn.Conv2d(in_channels=inchan,out_channels=self.outchan,kernel_size=3,padding=1,stride=2) # /2
self.bn = nn.BatchNorm2d(num_features=self.outchan,affine=True)
self.conv = self.make_layers()
self.relu = nn.ELU(inplace=True)
def make_layers(self):
layers = []
for i in range(self.layer):
layers.append(nn.ELU(inplace=True))
# padding = dilation
layers.append(nn.Conv2d(self.outchan,self.outchan,kernel_size=3,padding=self.dilation_,stride=1,dilation=self.dilation_))
layers.append(nn.BatchNorm2d(num_features=self.outchan,affine=True))
return nn.Sequential(*layers)
def forward(self,x):
out1 = self.down(x)
out2 = self.conv(self.bn(out1))
out2 = self.relu(torch.add(out1,out2))
return out2
class UpTransition(nn.Module):
def __init__(self,inchan,outchan,layer,last=False):
super(UpTransition, self).__init__()
self.last = last
self.outchan = outchan
self.layer = layer
self.up = nn.ConvTranspose2d(in_channels=inchan,out_channels=self.outchan,kernel_size=4,padding=1,stride=2) # *2
self.bn = nn.BatchNorm2d(num_features=self.outchan,affine=True)
self.conv = self.make_layers()
self.relu = nn.ELU(inplace=True)
if self.last is True:
self.conv1 = nn.Conv2d(self.outchan,2,kernel_size=1) # 1*1 conv
self.softmax = F.softmax
def make_layers(self):
layers = []
for i in range(self.layer):
layers.append(nn.ELU(inplace=True))
layers.append(nn.Conv2d(self.outchan,self.outchan,kernel_size=3,padding=1,stride=1))
layers.append(nn.BatchNorm2d(num_features=self.outchan,affine=True))
return nn.Sequential(*layers)
def forward(self,x):
out1 = self.up(x)
out = self.conv(self.bn(out1))
out = self.relu(torch.add(out1,out))
if self.last is True:
out = self.conv1(out)
out = out.permute(0, 2, 3, 1).contiguous()
# print('forward',out.shape)
# flatten to (N,HW,C=2)
# out = out.view(out.size(0),-1,2)
# out = self.softmax(out,dim=2)
# out = torch.max(out,dim=2)[1].float()
# print('softmax',out.shape)
# result (N,HW)
out = out.view(out.numel() // 2, 2)
out = self.softmax(out,dim=1) # default
return out
class Vnet(nn.Module):
# 1*512*512
def __init__(self, inchans, outchans, down_layers, up_layers, dilations):
super(Vnet,self).__init__()
self.layer0 = nn.Sequential(
nn.Conv2d(1, 8, kernel_size=7, stride=1, padding=3,bias=False),
nn.BatchNorm2d(8,affine=True),
nn.ELU(inplace=True)
)
self.block_num = len(inchans)
self.down = nn.ModuleList() # must create pytorch module list
self.up = nn.ModuleList()
for i in range(self.block_num):
self.down.append(DownTransition(inchan=inchans[i], outchan=outchans[i], layer=down_layers[i], dilation_=dilations[i]))
if i==0 :
self.up.append(UpTransition(inchan=outchans[i], outchan=inchans[i], layer=up_layers[i], last=True))
else:
self.up.append(UpTransition(inchan=outchans[i], outchan=inchans[i], layer=up_layers[i]))
# self.down0 = DownTransition(inchan=8,outchan=32,layer=3,dilation_=2) # 32*128^2
# self.down1 = DownTransition(inchan=32,outchan=128,layer=3,dilation_=2) # 128*64^2
# self.down2 = DownTransition(inchan=128,outchan=256,layer=3,dilation_=4) # 256*32^2
# # self.down3 = DownTransition(inchan=64,outchan=128,layer=2,dilation_=4) # 128*32^2
# # self.up3 = UpTransition(inchan=128,outchan=64,layer=2) # 64*64^2
# self.up2 = UpTransition(inchan=256,outchan=128,layer=2) # 32*128^2
# self.up1 = UpTransition(inchan=128,outchan=32,layer=2) # 16*256^2
# self.up0 = UpTransition(inchan=32,outchan=8,layer=2,last=True) # 2*512^2
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self,x):
x = self.layer0(x)
out_down = []
out_down.append(self.down[0](x))
for i in range(1,self.block_num):
out_down.append(self.down[i](out_down[i-1]))
out_up = self.up[self.block_num-1](out_down[self.block_num-1])
for i in reversed(range(self.block_num-1)):
out_up = self.up[i](torch.add(out_up,out_down[i]))
return out_up
# out_down0 = self.down0(x)
# out_down1 = self.down1(out_down0)
# out_down2 = self.down2(out_down1)
# # out_down3 = self.down3(out_down2)
# out_up2 = self.up2(out_down2)
# # out_up2 = self.up2(torch.add(out_up3,out_down2))
# out_up1 = self.up1(torch.add(out_up2,out_down1))
# out_up0 = self.up0(torch.add(out_up1,out_down0))
# return out_up0
import bioloss
if __name__ == '__main__':
coarse_vnet = Vnet(inchans=[8,32,128], outchans=[32,128,256], down_layers=[3,3,3], up_layers=[2,2,2], dilations=[2,2,4])
if torch.cuda.is_available():
coarse_vnet = torch.nn.DataParallel(coarse_vnet, device_ids=gpu_list).cuda()
optimizer = torch.optim.Adam(coarse_vnet.parameters(), lr=lr, weight_decay=0.0001)
# optimizer = torch.optim.SGD(coarse_vnet.parameters(), lr=lr, momentum=0.9, weight_decay=0.0001, nesterov=True)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[14,24,34], gamma=0.1)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=6, threshold=0.04, threshold_mode='abs')
for e in range(epoch):
coarse_vnet.train()
total_loss = 0.0
# scheduler.step()
for index,(image,target) in enumerate(trainloader):
# print ("Train Epoch[%d/%d], Iter[%d]" %(e+1, epoch, index))
optimizer.zero_grad()
image, target = to_var(image,volatile=False).float(), to_var(target,volatile=False).float()
output = coarse_vnet(image)
target = target.view(target.numel())
loss = bioloss.dice_loss(output, target)
#loss = F.nll_loss(output, target)
total_loss += loss.data[0]
loss.backward()
optimizer.step()
# del loss
# del output
# if index == 0 and e%10 == 9:
# image = image.data.cpu().numpy().reshape(-1,512,512)
# target = target.data.cpu().numpy().reshape(-1,512,512)
# output = output.data.max(dim=1)[1].cpu().numpy().reshape(-1,512,512)
# for i in range(batch_size):
# plt.figure(figsize=(100,100))
# plt.subplot(1,3,1)
# plt.imshow(image[i],cmap="gray") # original image
# plt.subplot(1,3,2)
# plt.imshow(target[i],cmap="Set1") # ground truth
# plt.subplot(1,3,3)
# plt.imshow(output[i],cmap="Set1") # my prediction
# plt.show()
print ("Epoch[%d/%d], Train Dice Coef: %.4f" %(e+1, epoch, total_loss/len(trainloader)))
coarse_vnet.eval()
total_loss = 0.0
for index,(image,target) in enumerate(testloader):
print(index)
# print ("Valid Epoch[%d/%d], Iter[%d]" %(e+1, epoch, index))
image, target = to_var(image,volatile=True), to_var(target,volatile=True).float()
output = coarse_vnet(image)
target = target.view(target.numel()) # (NDHW)
# total_loss += F.nll_loss(output, target)
loss = bioloss.dice_loss(output, target)
total_loss += loss.data[0]
del image, target, loss, output
# if e == 50 or e == 32:
# image = image.data.cpu().numpy().reshape(-1,512,512)
# target = target.data.cpu().numpy().reshape(-1,512,512)
# output = output.data.max(dim=1)[1].cpu().numpy().reshape(-1,512,512)
# if index == 0:
# image_save = image
# target_save = target
# output_save = output
# elif index == 1:
# image_save = np.concatenate((image_save,image),axis=0)
# target_save = np.concatenate((target_save,target),axis=0)
# output_save = np.concatenate((output_save,output),axis=0)
# else:
# image_save = np.concatenate((image_save,image),axis=0)
# target_save = np.concatenate((target_save,target),axis=0)
# output_save = np.concatenate((output_save,output),axis=0)
# print(image_save.shape,target_save.shape,output_save.shape)
# if e == 50:
# np.save('data/image_save_50.npy',image_save)
# np.save('data/target_save_50.npy',target_save)
# np.save('data/output_save_50.npy',output_save)
# else:
# np.save('data/image_save_32.npy',image_save)
# np.save('data/target_save_32.npy',target_save)
# np.save('data/output_save_32.npy',output_save)
# if index == 0 and e%10 == 9:
# image = image.data.cpu().numpy().reshape(-1,512,512)
# target = target.data.cpu().numpy().reshape(-1,512,512)
# output = output.data.max(dim=1)[1].cpu().numpy().reshape(-1,512,512)
# for i in range(batch_size):
# plt.figure(figsize=(100,100))
# plt.subplot(1,3,1)
# plt.imshow(image[i],cmap="gray") # original image
# plt.subplot(1,3,2)
# plt.imshow(target[i],cmap="Set1") # ground truth
# plt.subplot(1,3,3)
# plt.imshow(output[i],cmap="Set1") # my prediction
# plt.show()
print ("Epoch[%d/%d], Valid Dice Coef: %.4f" %(e+1, epoch, total_loss/len(testloader)))
scheduler.step(total_loss/len(testloader))
print('learning rate',optimizer.param_groups[0]['lr'])
# print ("Epoch[%d/%d], Valid Loss: %.2f, Valid Acc: %.2f" %(e+1, epoch, total_loss, 100*accuracy/cnt))
# print('total time cost: %s'%(str(datetime.now()-start)[:7]))
# torch.save(coarse_vnet.state_dict(),'coarse_vnet'+str(datetime.now())[5:16]+'.pkl')
|
{"hexsha": "4f36b0d1bd91cef9de532815293aa69fbbb71ea3", "size": 11181, "ext": "py", "lang": "Python", "max_stars_repo_path": "dilation2d_v2.0.py", "max_stars_repo_name": "twni2016/Prostate-Image-Segmentation", "max_stars_repo_head_hexsha": "d68e61f3910477c6a635aa8e0f4b35304798586e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-14T04:06:32.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-02T18:16:56.000Z", "max_issues_repo_path": "dilation2d_v2.0.py", "max_issues_repo_name": "twni2016/Prostate-Image-Segmentation", "max_issues_repo_head_hexsha": "d68e61f3910477c6a635aa8e0f4b35304798586e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dilation2d_v2.0.py", "max_forks_repo_name": "twni2016/Prostate-Image-Segmentation", "max_forks_repo_head_hexsha": "d68e61f3910477c6a635aa8e0f4b35304798586e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4760479042, "max_line_length": 128, "alphanum_fraction": 0.6728378499, "include": true, "reason": "import numpy", "num_tokens": 3438}
|
import math
from typing import Optional
import cv2
import numpy as np
from paralleldomain.constants import CAMERA_MODEL_OPENCV_FISHEYE, CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_PD_FISHEYE
from paralleldomain.utilities.mask import lookup_values
class DistortionLookupTable(np.ndarray):
"""Container object for distortion lookup tables used in distortion model `pd_fisheye`.
Can be accessed like any `np.ndarray`"""
@classmethod
def from_ndarray(cls, data: np.ndarray) -> "DistortionLookupTable":
"""Takes a `np.ndarray` and creates a :obj:`DistortionLookupTable` instance from it.
Args:
data: `np.ndarray` of shape (N x 2). Array will be sorted by first column. Last two rows will be used to
extrapolate for maximum valid angle value of Pi.
Returns:
Distortion lookup table to be used with projection functions using distortion model `pd_fisheye`.
"""
data_sorted = data[np.argsort(data[:, 0])].astype(np.float)
if data_sorted[:, 0].max() < math.pi:
extrapolated_alpha = (math.pi - data_sorted[-2, 0]) / (data_sorted[-1, 0] - data_sorted[-2, 0])
extrapolated_theta_d = data_sorted[-2, 1] + extrapolated_alpha * (data_sorted[-1, 1] - data_sorted[-2, 1])
data_sorted = np.vstack([data_sorted, np.array([math.pi, extrapolated_theta_d])])
return data_sorted.view(cls)
def _project_points_3d_to_2d_pd_fisheye(
k_matrix: np.ndarray,
points_3d: np.ndarray,
distortion_lookup: DistortionLookupTable,
) -> np.ndarray:
xy_prime = points_3d[:, [0, 1]]
r = np.linalg.norm(xy_prime, axis=1)
theta = np.arctan2(r, points_3d[:, 2])
theta_d = np.interp(x=theta, xp=distortion_lookup[:, 0], fp=distortion_lookup[:, 1])
r_d = (theta_d / r).reshape(-1, 1)
xy_double_prime = r_d * xy_prime
xy_double_prime[np.isnan(xy_double_prime)] = 0.0
xy_double_prime_one = np.ones(shape=(len(xy_double_prime), 1))
uv = (k_matrix @ np.hstack([xy_double_prime, xy_double_prime_one]).T).T[:, :2]
return uv
def project_points_3d_to_2d(
k_matrix: np.ndarray,
camera_model: str,
points_3d: np.ndarray,
distortion_parameters: Optional[np.ndarray] = None,
distortion_lookup: Optional[DistortionLookupTable] = None,
) -> np.ndarray:
"""Projects an array of 3D points in Cartesian coordinates onto an image plane.
Args:
k_matrix: Camera intrinsic matrix. Definition can be found in
`OpenCV documentation <https://docs.opencv.org/4.5.3/dc/dbb/tutorial_py_calibration.html>`_.
camera_model: One of `opencv_pinhole`, `opencv_fisheye`, `pd_fisheye`.
More details in :obj:`~.model.sensor.CameraModel`.
points_3d: A matrix with dimensions (nx3) containing the points.
Points must be already in the camera's coordinate system.
distortion_parameters: Array of applicable distortion parameters for
distortion models `opencv_pinhole` and `opencv_fisheye`.
distortion_lookup: Table of undistorted and distorted angles. Required for `pd_fisheye` model.
Returns:
A matrix with dimensions (nx2) containing the point projections. :obj:`dtype` is :obj:`float` and values need
to be rounded to integers by the user to receive actual pixel coordinates. Includes all points, independent
if they are on the image plane or outside. Points behind the image plane will be projected, too. If values are
not valid (e.g., for undistorted pinhole cameras), filtering needs to be applied by the calling function.
"""
k_matrix = k_matrix.reshape(3, 3).astype(np.float)
points_3d = points_3d.reshape(-1, 3).astype(np.float)
if distortion_parameters is not None:
distortion_parameters = distortion_parameters.reshape(1, -1).astype(np.float)
if camera_model == CAMERA_MODEL_OPENCV_PINHOLE:
uv, _ = cv2.projectPoints(
objectPoints=points_3d,
rvec=(0, 0, 0), # already in camera sensor coordinate system
tvec=(0, 0, 0), # already in camera sensor coordinate system
cameraMatrix=k_matrix,
distCoeffs=distortion_parameters,
)
elif camera_model == CAMERA_MODEL_OPENCV_FISHEYE:
points_3d = np.expand_dims(points_3d, -2) # cv2.fisheye.projectPoints expects dimensions (N x 1 x 3)
uv, _ = cv2.fisheye.projectPoints(
objectPoints=points_3d,
rvec=(0, 0, 0), # already in camera sensor coordinate system
tvec=(0, 0, 0), # already in camera sensor coordinate system
K=k_matrix,
D=distortion_parameters,
)
elif camera_model == CAMERA_MODEL_PD_FISHEYE:
uv = _project_points_3d_to_2d_pd_fisheye(
k_matrix=k_matrix,
points_3d=points_3d,
distortion_lookup=distortion_lookup,
)
else:
raise NotImplementedError(f'Distortion Model "{camera_model}" not implemented.')
return uv.reshape(-1, 2)
def project_points_2d_to_3d(
k_matrix: np.ndarray,
camera_model: str,
points_2d: np.ndarray,
depth: np.ndarray,
distortion_parameters: Optional[np.ndarray] = None,
distortion_lookup: Optional[DistortionLookupTable] = None,
interpolate: bool = True,
) -> np.ndarray:
"""Maps image plane coordinates to 3D points in Cartesian coordinates.
Args:
k_matrix: Camera intrinsic matrix. Definition can be found in
`OpenCV documentation <https://docs.opencv.org/4.5.3/dc/dbb/tutorial_py_calibration.html>`_.
camera_model: One of `opencv_pinhole`, `opencv_fisheye`, `pd_fisheye`.
More details in :obj:`~.model.sensor.CameraModel`.
points_2d: A matrix with dimensions (nx2) containing the points.
Points must be in image coordinate system (x,y).
depth: Depth mask with the same dimensions as the image canvas.
distortion_parameters: Array of applicable distortion parameters for
distortion models `opencv_pinhole` and `opencv_fisheye`.
distortion_lookup: Table of undistorted and distorted angles. Required for `pd_fisheye` model.
interpolate: When points are not exactly on an image pixel, apply bi-linear interpolation to estimate
the corresponding depth value. Default: True.
Returns:
A matrix with dimensions (nx3) containing the point projections in 3D using the provided depth mask.
"""
k_matrix = k_matrix.reshape(3, 3).astype(np.float)
points_2d = points_2d.reshape(-1, 2).astype(np.float)
# Uncomment when OpenCV with distortion reprojection is being implemented
# if distortion_parameters is not None:
# distortion_parameters = distortion_parameters.reshape(1, -1).astype(np.float)
depth_for_points_2d = lookup_values(mask=depth, x=points_2d[:, 0], y=points_2d[:, 1], interpolate=interpolate)
if camera_model == CAMERA_MODEL_OPENCV_PINHOLE:
points_3d = (
np.linalg.inv(k_matrix) @ np.hstack([points_2d, np.ones(shape=(len(points_2d), 1))]).T
).T * depth_for_points_2d
elif camera_model == CAMERA_MODEL_PD_FISHEYE:
points_3d_distorted = (np.linalg.inv(k_matrix) @ np.hstack([points_2d, np.ones(shape=(len(points_2d), 1))]).T).T
xy_prime = points_3d_distorted[:, [0, 1]]
theta_d = np.linalg.norm(xy_prime, axis=1)
theta = np.interp(x=theta_d, xp=distortion_lookup[:, 1], fp=distortion_lookup[:, 0])
r = np.tan(theta)
xy_double_prime = (r / theta_d).reshape(-1, 1) * xy_prime
xy_double_prime[np.isnan(xy_double_prime)] = 0.0
xy_double_prime_one = np.ones(shape=(len(xy_double_prime), 1))
points_3d = np.hstack([xy_double_prime, xy_double_prime_one]) * depth_for_points_2d
else:
raise NotImplementedError(f'Distortion Model "{camera_model}" not implemented.')
return points_3d.reshape(-1, 3)
def points_2d_inside_image(
width: int,
height: int,
camera_model: str,
points_2d: np.ndarray,
points_3d: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Returns the indices for an array of 2D image points that are inside the image canvas.
Args:
width: Pixel width of the image canvas.
height: Pixel height of the image canvas.
camera_model: One of `opencv_pinhole`, `opencv_fisheye`, `pd_fisheye`.
More details in :obj:`~.model.sensor.CameraModel`.
points_2d: A matrix with dimensions (nx2) containing the points that should be tested
if inside the image canvas. Points must be in image coordinate system (x,y).
points_3d: Optional array of size (nx3) which provides the 3D camera coordinates for each point. Required for
camera models `opencv_pinhole` and `opencv_fisheye`.
Returns:
An array with dimensions (n,).
"""
if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) and points_3d is None:
raise ValueError(f"`points_3d` must be provided for camera model {camera_model}")
if len(points_2d) != len(points_3d):
raise ValueError(
f"Mismatch in length between `points_2d` and `points_3d` with {len(points_2d)} vs. {len(points_3d)}"
)
return np.where(
(points_2d[:, 0] >= 0)
& (points_2d[:, 0] < width)
& (points_2d[:, 1] >= 0)
& (points_2d[:, 1] < height)
& (points_3d[:, 2] > 0 if camera_model in (CAMERA_MODEL_OPENCV_PINHOLE, CAMERA_MODEL_OPENCV_FISHEYE) else True)
)
|
{"hexsha": "379675c566c4246e9e6312ca7b98b04df749b29e", "size": 9575, "ext": "py", "lang": "Python", "max_stars_repo_path": "paralleldomain/utilities/projection.py", "max_stars_repo_name": "parallel-domain/pd-sdk", "max_stars_repo_head_hexsha": "20e3d052a5cb612a2dd84bda7b1b5487a6a60edc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-11-17T17:23:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:51:23.000Z", "max_issues_repo_path": "paralleldomain/utilities/projection.py", "max_issues_repo_name": "parallel-domain/pd-sdk", "max_issues_repo_head_hexsha": "20e3d052a5cb612a2dd84bda7b1b5487a6a60edc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-12-02T17:16:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-07T12:47:13.000Z", "max_forks_repo_path": "paralleldomain/utilities/projection.py", "max_forks_repo_name": "parallel-domain/pd-sdk", "max_forks_repo_head_hexsha": "20e3d052a5cb612a2dd84bda7b1b5487a6a60edc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-09T07:03:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T15:53:48.000Z", "avg_line_length": 44.5348837209, "max_line_length": 120, "alphanum_fraction": 0.6807310705, "include": true, "reason": "import numpy", "num_tokens": 2490}
|
#ifndef MANUALPHASE_HPP
#define MANUALPHASE_HPP
#include <set>
#include <cmath>
#include <Eigen/Core>
#include <Eigen/Dense>
namespace raisim
{
constexpr int n_gait = 9;
template<typename T>
class ManualPhase
{
public:
ManualPhase()
{
for (int i = 0; i < n_gait; ++i)
{
TIME[i] = UNIT[i] * unit_time_;
STANCE_TIME[i] = DUTY_UNIT[i] * unit_time_ / (UNIT[i] / 2.0);
}
reset();
}
Eigen::Matrix<T, Eigen::Dynamic, 1> get_raw_phase()
{
phase_raw_.setZero();
for (int i = 0; i < 4; ++i)
{
phase_raw_[i] = std::atan2(q_[2 * i + 1], q_[2 * i]);
}
return phase_raw_;
}
Eigen::Matrix<T, 4, 1> get_omega()
{
return omega_;
}
int get_hold_leg()
{
// not implemented in manual phase
return -1;
}
bool get_transition_status()
{
// not implemented in manual phase
return false;
}
inline void three_leg_mode(int leg)
{
// not implemented in manual phase
;
}
inline void change_gait(int gait_idx)
{
target_gait_idx_ = gait_idx;
}
inline void change_gait_()
{
if (target_gait_idx_ != gait_idx_ && std::fabs(phase_scalar_) <= dt_ / 2.0)
{
gait_idx_ = target_gait_idx_;
}
}
inline int get_gait_index()
{
return gait_idx_;
}
inline T get_stance_time()
{
return STANCE_TIME[gait_idx_];
}
inline void reset()
{
unit_ = UNIT[0];
time_ = TIME[0];
duty_unity_ = DUTY_UNIT[0];
stance_time_ = STANCE_TIME[0];
q_ << 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0;
q_dot_.setZero();
phase_raw_ << - M_PI / 2.0, - M_PI / 2.0, - M_PI / 2.0, - M_PI / 2.0;
phase_ = {0.0, 0.0, 0.0, 0.0};
omega_ << - M_PI / unit_time_, - M_PI / unit_time_, - M_PI / unit_time_, - M_PI / unit_time_;
phase_scalar_ = 0.0;
}
inline void step()
{
if (phase_scalar_ > TIME[gait_idx_])
{
phase_scalar_ = 0.0;
}
change_gait_();
get_q_and_q_dot();
phase_scalar_ += dt_;
}
inline void get_q_and_q_dot()
{
if (gait_idx_ == 0)
{
T theta = - phase_scalar_ / unit_time_ * M_PI + M_PI;
for (int i = 0; i < 4; ++i)
{
q_[2 * i] = std::cos(theta);
q_[2 * i + 1] = std::sin(theta);
}
omega_.setConstant(- M_PI / unit_time_);
}
if (gait_idx_ == 1)
{
T theta_fast = - phase_scalar_ / unit_time_ * M_PI + M_PI;
T theta_slow = - phase_scalar_ / unit_time_ * M_PI / 3;
if (phase_scalar_ / unit_time_ < 3)
{
// 0, 3
q_[0] = std::cos(theta_slow);
q_[1] = std::sin(theta_slow);
q_[6] = std::cos(theta_slow);
q_[7] = std::sin(theta_slow);
omega_[0] = - M_PI / unit_time_ / 3;
omega_[3] = - M_PI / unit_time_ / 3;
// 1, 2
q_[2] = std::cos(theta_fast);
q_[3] = std::sin(theta_fast);
q_[4] = std::cos(theta_fast);
q_[5] = std::sin(theta_fast);
omega_[1] = - M_PI / unit_time_;
omega_[2] = - M_PI / unit_time_;
}
else
{
theta_fast += M_PI;
theta_slow += M_PI;
// 0, 3
q_[0] = std::cos(theta_fast);
q_[1] = std::sin(theta_fast);
q_[6] = std::cos(theta_fast);
q_[7] = std::sin(theta_fast);
omega_[0] = - M_PI / unit_time_;
omega_[3] = - M_PI / unit_time_;
// 1, 2
q_[2] = std::cos(theta_slow);
q_[3] = std::sin(theta_slow);
q_[4] = std::cos(theta_slow);
q_[5] = std::sin(theta_slow);
omega_[1] = - M_PI / unit_time_ / 3;
omega_[2] = - M_PI / unit_time_ / 3;
}
}
if (gait_idx_ == 2)
{
T theta_fast = - phase_scalar_ / unit_time_ * M_PI + M_PI;
T theta_slow = - phase_scalar_ / unit_time_ * M_PI / 2 - M_PI / 2;
// 0, 3
q_[0] = std::cos(theta_slow);
q_[1] = std::sin(theta_slow);
q_[6] = std::cos(theta_slow);
q_[7] = std::sin(theta_slow);
omega_[0] = - M_PI / unit_time_ / 2;
omega_[3] = - M_PI / unit_time_ / 2;
// 1, 2
q_[2] = std::cos(theta_fast);
q_[3] = std::sin(theta_fast);
q_[4] = std::cos(theta_fast);
q_[5] = std::sin(theta_fast);
omega_[1] = - M_PI / unit_time_;
omega_[2] = - M_PI / unit_time_;
}
if (gait_idx_ == 3)
{
T theta_fast = - phase_scalar_ / unit_time_ * M_PI + M_PI;
T theta_slow = - phase_scalar_ / unit_time_ * M_PI / 2 - M_PI;
// 0, 3
q_[0] = std::cos(theta_fast);
q_[1] = std::sin(theta_fast);
q_[6] = std::cos(theta_fast);
q_[7] = std::sin(theta_fast);
omega_[0] = - M_PI / unit_time_;
omega_[3] = - M_PI / unit_time_;
// 1, 2
q_[2] = std::cos(theta_slow);
q_[3] = std::sin(theta_slow);
q_[4] = std::cos(theta_slow);
q_[5] = std::sin(theta_slow);
omega_[0] = - M_PI / unit_time_ / 2;
omega_[3] = - M_PI / unit_time_ / 2;
}
if (gait_idx_ == 4)
{
T theta_fast = - phase_scalar_ / unit_time_ * M_PI + M_PI;
T theta_slow = - phase_scalar_ / unit_time_ * M_PI / 3.0 + M_PI;
// 0, 3
q_[0] = std::cos(theta_fast);
q_[1] = std::sin(theta_fast);
q_[6] = std::cos(theta_fast);
q_[7] = std::sin(theta_fast);
omega_[0] = - M_PI / unit_time_;
omega_[3] = - M_PI / unit_time_;
// 1, 2
q_[2] = std::cos(theta_slow);
q_[3] = std::sin(theta_slow);
q_[4] = std::cos(theta_slow);
q_[5] = std::sin(theta_slow);
omega_[0] = - M_PI / unit_time_ / 3;
omega_[3] = - M_PI / unit_time_ / 3;
}
if (gait_idx_ == 5)
{
T theta_fast = - phase_scalar_ / unit_time_ * M_PI + M_PI;
T theta_slow = - phase_scalar_ / unit_time_ * M_PI / 3.0 + M_PI * 2.0 / 3.0;
// 0, 3
q_[0] = std::cos(theta_slow);
q_[1] = std::sin(theta_slow);
q_[6] = std::cos(theta_slow);
q_[7] = std::sin(theta_slow);
omega_[0] = - M_PI / unit_time_ / 3;
omega_[3] = - M_PI / unit_time_ / 3;
// 1, 2
q_[2] = std::cos(theta_fast);
q_[3] = std::sin(theta_fast);
q_[4] = std::cos(theta_fast);
q_[5] = std::sin(theta_fast);
omega_[1] = - M_PI / unit_time_;
omega_[2] = - M_PI / unit_time_;
}
if (gait_idx_ == 6)
{
T theta_1 = - phase_scalar_ / unit_time_ * M_PI / 2.0 + M_PI;
T theta_2 = - phase_scalar_ / unit_time_ * M_PI / 2.0 + M_PI / 2.0;
// 0, 3
q_[0] = std::cos(theta_1);
q_[1] = std::sin(theta_1);
q_[6] = std::cos(theta_1);
q_[7] = std::sin(theta_1);
omega_[0] = - M_PI / unit_time_ / 2;
omega_[3] = - M_PI / unit_time_ / 2;
// 1, 2
q_[2] = std::cos(theta_2);
q_[3] = std::sin(theta_2);
q_[4] = std::cos(theta_2);
q_[5] = std::sin(theta_2);
omega_[0] = - M_PI / unit_time_ / 2;
omega_[3] = - M_PI / unit_time_ / 2;
}
if (gait_idx_ == 7)
{
if (phase_scalar_ / unit_time_ < 2.0)
{
T theta_1 = - phase_scalar_ / unit_time_ * M_PI + M_PI;
// 0, 3
q_[0] = std::cos(theta_1);
q_[1] = std::sin(theta_1);
q_[6] = std::cos(theta_1);
q_[7] = std::sin(theta_1);
omega_[0] = - M_PI / unit_time_;
omega_[3] = - M_PI / unit_time_;
}
else
{
T theta_1 = - (phase_scalar_ / unit_time_ - 2.0) * M_PI / 2.0 + M_PI;
// 0, 3
q_[0] = std::cos(theta_1);
q_[1] = std::sin(theta_1);
q_[6] = std::cos(theta_1);
q_[7] = std::sin(theta_1);
omega_[0] = - M_PI / unit_time_ / 2.0;
omega_[3] = - M_PI / unit_time_ / 2.0;
}
if (phase_scalar_ / unit_time_ < 3.0 || phase_scalar_ / unit_time_ >= 5.0)
{
T theta_2 = phase_scalar_ / unit_time_ < 3.0 ? - (phase_scalar_ / unit_time_ + 1.0) * M_PI / 2.0 + M_PI : - (phase_scalar_ / unit_time_ - 5.0) * M_PI / 2.0 + M_PI;
// 1, 2
q_[2] = std::cos(theta_2);
q_[3] = std::sin(theta_2);
q_[4] = std::cos(theta_2);
q_[5] = std::sin(theta_2);
omega_[1] = - M_PI / unit_time_ / 2.0;
omega_[2] = - M_PI / unit_time_ / 2.0;
}
if (phase_scalar_ / unit_time_ >= 3.0 && phase_scalar_ / unit_time_ < 5.0)
{
T theta_2 = - (phase_scalar_ / unit_time_ - 3.0) * M_PI + M_PI;
// 1, 2
q_[2] = std::cos(theta_2);
q_[3] = std::sin(theta_2);
q_[4] = std::cos(theta_2);
q_[5] = std::sin(theta_2);
omega_[1] = - M_PI / unit_time_;
omega_[2] = - M_PI / unit_time_;
}
}
if (gait_idx_ == 8)
{
T theta_1 = phase_scalar_ / unit_time_ < 2.0 ? - (phase_scalar_ / unit_time_) * M_PI + M_PI : - (phase_scalar_ / unit_time_ - 2.0) * M_PI / 2.0 + M_PI;
T theta_2 = phase_scalar_ / unit_time_ < 4.0 ? - (phase_scalar_ / unit_time_) * M_PI / 2.0 + M_PI : - (phase_scalar_ / unit_time_ - 4.0) * M_PI + M_PI;
T omega_1 = phase_scalar_ / unit_time_ < 2.0 ? - M_PI / unit_time_ : - M_PI / unit_time_ / 2.0;
T omega_2 = phase_scalar_ / unit_time_ < 4.0 ? - M_PI / unit_time_ / 2.0 : - M_PI / unit_time_;
// 0, 3
q_[0] = std::cos(theta_1);
q_[1] = std::sin(theta_1);
q_[6] = std::cos(theta_1);
q_[7] = std::sin(theta_1);
omega_[0] = omega_1;
omega_[3] = omega_1;
// 1, 2
q_[2] = std::cos(theta_2);
q_[3] = std::sin(theta_2);
q_[4] = std::cos(theta_2);
q_[5] = std::sin(theta_2);
omega_[0] = omega_2;
omega_[3] = omega_2;
}
}
Eigen::Matrix<T, 8, 1> get_status()
{
return q_;
}
Eigen::Matrix<T, 8, 1> get_velocity()
{
// not implemented in manual phase
return q_dot_;
}
private:
int gait_idx_ = 0;
int prev_gait_idx_ = 0;
int target_gait_idx_ = 0;
T phase_scalar_ = 0.0;
T dt_ = 0.01;
T unit_time_ = 0.15;
std::array<T, n_gait> UNIT = {2.0, 6.0, 4.0, 4.0, 6.0, 6.0, 4.0, 6.0, 6.0};
std::array<T, n_gait> DUTY_UNIT = {1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 2.0, 3.0, 3.0};
std::array<T, n_gait> TIME;
std::array<T, n_gait> STANCE_TIME;
T unit_, duty_unity_, time_, stance_time_;
Eigen::Matrix<T, 8, 1> q_;
Eigen::Matrix<T, 8, 1> q_dot_;
Eigen::Matrix<T, 4, 1> phase_raw_; // -pi to +pi
std::array<T, 4> phase_; // 0 to 2 * pi
Eigen::Matrix<T, 4, 1> omega_;
};
}
#endif
|
{"hexsha": "603fc96c27d46bd0f8770e75157eefc30c2d5497", "size": 13427, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/ManualPhase.hpp", "max_stars_repo_name": "ZJU-XMech/PhaseGuidedControl", "max_stars_repo_head_hexsha": "f8a35ae8e1f903e948710b50681d2aa59046150e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2021-12-15T07:37:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:45:42.000Z", "max_issues_repo_path": "src/ManualPhase.hpp", "max_issues_repo_name": "ZJU-XMech/PhaseGuidedControl", "max_issues_repo_head_hexsha": "f8a35ae8e1f903e948710b50681d2aa59046150e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ManualPhase.hpp", "max_forks_repo_name": "ZJU-XMech/PhaseGuidedControl", "max_forks_repo_head_hexsha": "f8a35ae8e1f903e948710b50681d2aa59046150e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-01-21T09:33:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T09:33:25.000Z", "avg_line_length": 32.1220095694, "max_line_length": 184, "alphanum_fraction": 0.403142921, "num_tokens": 3818}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import os
import theano
import numpy
from wordclasses import TheanoBigramOptimizer, NumpyBigramOptimizer
from theanolm.vocabulary import Vocabulary
from theanolm.vocabulary import compute_word_counts, BigramStatistics
class TestBigramOptimizer(unittest.TestCase):
def setUp(self):
theano.config.compute_test_value = 'warn'
script_path = os.path.dirname(os.path.realpath(__file__))
sentences_path = os.path.join(script_path, 'sentences.txt')
self.sentences_file = open(sentences_path)
self.num_classes = 2
word_counts = compute_word_counts([self.sentences_file])
self.vocabulary = Vocabulary.from_word_counts(word_counts,
self.num_classes)
self.sentences_file.seek(0)
self.statistics = BigramStatistics([self.sentences_file], self.vocabulary)
def tearDown(self):
self.sentences_file.close()
def assert_optimizers_equal(self, numpy_optimizer, theano_optimizer):
self.assertTrue(numpy.array_equal(numpy_optimizer._word_counts, theano_optimizer._word_counts.get_value()))
self.assertEqual((numpy_optimizer._ww_counts - theano_optimizer._ww_counts.get_value()).nnz, 0)
self.assertTrue(numpy.array_equal(numpy_optimizer._class_counts, theano_optimizer._class_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._cc_counts, theano_optimizer._cc_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._cw_counts, theano_optimizer._cw_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._wc_counts, theano_optimizer._wc_counts.get_value()))
def test_statistics(self):
num_words = 8
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
self.assertEqual(theano_optimizer.vocabulary_size, num_words)
self.assertEqual(numpy_optimizer.vocabulary_size, num_words)
self.assertEqual(theano_optimizer.num_classes, self.num_classes + 3)
self.assertEqual(numpy_optimizer.num_classes, self.num_classes + 3)
self.assertEqual(len(theano_optimizer._word_to_class.get_value()), num_words)
self.assertEqual(len(numpy_optimizer._word_to_class), num_words)
sos_word_id = self.vocabulary.word_to_id['<s>']
a_word_id = self.vocabulary.word_to_id['a']
b_word_id = self.vocabulary.word_to_id['b']
c_word_id = self.vocabulary.word_to_id['c']
d_word_id = self.vocabulary.word_to_id['d']
e_word_id = self.vocabulary.word_to_id['e']
unk_word_id = self.vocabulary.word_to_id['<unk>']
eos_word_id = self.vocabulary.word_to_id['</s>']
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertEqual(len(numpy_optimizer._word_counts), num_words)
self.assertEqual(numpy_optimizer._word_counts[sos_word_id], 11)
self.assertEqual(numpy_optimizer._word_counts[a_word_id], 13)
self.assertEqual(numpy_optimizer._word_counts[b_word_id], 8)
self.assertEqual(numpy_optimizer._word_counts[c_word_id], 8)
self.assertEqual(numpy_optimizer._word_counts[d_word_id], 11)
self.assertEqual(numpy_optimizer._word_counts[e_word_id], 15)
self.assertEqual(numpy_optimizer._word_counts[unk_word_id], 0)
self.assertEqual(numpy_optimizer._word_counts[eos_word_id], 11)
self.assertEqual(numpy_optimizer._ww_counts.shape[0], num_words)
self.assertEqual(numpy_optimizer._ww_counts.shape[1], num_words)
self.assertEqual(len(numpy_optimizer._class_counts), self.num_classes + 3)
self.assertEqual(numpy_optimizer._cc_counts.shape[0], self.num_classes + 3)
self.assertEqual(numpy_optimizer._cw_counts.shape[0], self.num_classes + 3)
self.assertEqual(numpy_optimizer._cw_counts.shape[1], num_words)
self.assertEqual(numpy_optimizer._wc_counts.shape[0], num_words)
self.assertEqual(numpy_optimizer._wc_counts.shape[1], self.num_classes + 3)
def test_move_and_back(self):
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
orig_class_counts = numpy.copy(numpy_optimizer._class_counts)
orig_cc_counts = numpy.copy(numpy_optimizer._cc_counts)
orig_cw_counts = numpy.copy(numpy_optimizer._cw_counts)
orig_wc_counts = numpy.copy(numpy_optimizer._wc_counts)
word_id = self.vocabulary.word_to_id['d']
orig_class_id = numpy_optimizer.get_word_class(word_id)
new_class_id = 3 if orig_class_id != 3 else 4
numpy_optimizer._move(word_id, new_class_id)
theano_optimizer._move(word_id, new_class_id)
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertEqual(numpy.count_nonzero(numpy_optimizer._class_counts != orig_class_counts), 2)
self.assertEqual(numpy.sum(numpy_optimizer._class_counts), numpy.sum(orig_class_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._cc_counts != orig_cc_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._cc_counts), numpy.sum(orig_cc_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._cw_counts != orig_cw_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._cw_counts), numpy.sum(orig_cw_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._wc_counts != orig_wc_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._wc_counts), numpy.sum(orig_wc_counts))
numpy_optimizer._move(word_id, orig_class_id)
theano_optimizer._move(word_id, orig_class_id)
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertTrue(numpy.array_equal(numpy_optimizer._class_counts, orig_class_counts))
self.assertTrue(numpy.array_equal(numpy_optimizer._cc_counts, orig_cc_counts))
self.assertTrue(numpy.array_equal(numpy_optimizer._cw_counts, orig_cw_counts))
self.assertTrue(numpy.array_equal(numpy_optimizer._wc_counts, orig_wc_counts))
def test_move_and_recompute(self):
optimizer1 = NumpyBigramOptimizer(self.statistics, self.vocabulary)
word_id = self.vocabulary.word_to_id['d']
orig_class_id = optimizer1.get_word_class(word_id)
new_class_id = 3 if orig_class_id != 3 else 4
optimizer1._word_to_class[word_id] = new_class_id
counts = optimizer1._compute_class_statistics(optimizer1._word_counts,
optimizer1._ww_counts,
optimizer1._word_to_class)
class_counts = numpy.zeros(optimizer1.num_classes, 'int32')
cc_counts = numpy.zeros((optimizer1.num_classes, optimizer1.num_classes), dtype='int32')
cw_counts = numpy.zeros((optimizer1.num_classes, optimizer1.vocabulary_size), dtype='int32')
wc_counts = numpy.zeros((optimizer1.vocabulary_size, optimizer1.num_classes), dtype='int32')
for wid, cid in enumerate(optimizer1._word_to_class):
class_counts[cid] += optimizer1._word_counts[wid]
for left_wid, right_wid in zip(*optimizer1._ww_counts.nonzero()):
count = optimizer1._ww_counts[left_wid, right_wid]
left_cid = optimizer1._word_to_class[left_wid]
right_cid = optimizer1._word_to_class[right_wid]
cc_counts[left_cid,right_cid] += count
cw_counts[left_cid,right_wid] += count
wc_counts[left_wid,right_cid] += count
self.assertTrue(numpy.array_equal(class_counts, counts[0]))
self.assertTrue(numpy.array_equal(cc_counts, counts[1]))
self.assertTrue(numpy.array_equal(cw_counts, counts[2]))
self.assertTrue(numpy.array_equal(wc_counts, counts[3]))
optimizer1._class_counts = counts[0]
optimizer1._cc_counts = counts[1]
optimizer1._cw_counts = counts[2]
optimizer1._wc_counts = counts[3]
optimizer2 = NumpyBigramOptimizer(self.statistics, self.vocabulary)
orig_class_id = optimizer2.get_word_class(word_id)
optimizer2._move(word_id, new_class_id)
self.assertEqual(numpy.count_nonzero(optimizer1._class_counts != optimizer2._class_counts), 0)
self.assertEqual(numpy.count_nonzero(optimizer1._cc_counts != optimizer2._cc_counts), 0)
self.assertEqual(numpy.count_nonzero(optimizer1._cw_counts != optimizer2._cw_counts), 0)
self.assertEqual(numpy.count_nonzero(optimizer1._wc_counts != optimizer2._wc_counts), 0)
optimizer3 = TheanoBigramOptimizer(self.statistics, self.vocabulary)
orig_class_id = optimizer3.get_word_class(word_id)
optimizer3._move(word_id, new_class_id)
self.assert_optimizers_equal(optimizer2, optimizer3)
def test_evaluate(self):
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
word_id = numpy_optimizer.get_word_id('d')
orig_class_id = numpy_optimizer.get_word_class(word_id)
new_class_id = 1 if orig_class_id != 1 else 0
orig_ll = numpy_optimizer.log_likelihood()
self.assertTrue(numpy.isclose(orig_ll, theano_optimizer.log_likelihood()))
ll_diff = numpy_optimizer._evaluate(word_id, new_class_id)
self.assertTrue(numpy.isclose(ll_diff, theano_optimizer._evaluate(word_id, new_class_id)))
numpy_optimizer._move(word_id, new_class_id)
new_ll = numpy_optimizer.log_likelihood()
self.assertFalse(numpy.isclose(orig_ll, new_ll))
self.assertTrue(numpy.isclose(orig_ll + ll_diff, new_ll))
theano_optimizer._move(word_id, new_class_id)
self.assertTrue(numpy.isclose(new_ll, theano_optimizer.log_likelihood()))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "bc3af24334c7853926542558d398fc29df93fa10", "size": 10110, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/wordclasses/bigramoptimizer_test.py", "max_stars_repo_name": "vasuneralla/theanolm", "max_stars_repo_head_hexsha": "51fbd89082ca3ea5d0178d09b744cf15c0113ab6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 95, "max_stars_repo_stars_event_min_datetime": "2016-01-16T16:18:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T16:31:29.000Z", "max_issues_repo_path": "tests/wordclasses/bigramoptimizer_test.py", "max_issues_repo_name": "nd1511/theanolm", "max_issues_repo_head_hexsha": "9eda655ed63e8906234e62ab7da016e64e931afe", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2015-10-16T08:49:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-12T07:17:04.000Z", "max_forks_repo_path": "tests/wordclasses/bigramoptimizer_test.py", "max_forks_repo_name": "nd1511/theanolm", "max_forks_repo_head_hexsha": "9eda655ed63e8906234e62ab7da016e64e931afe", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 37, "max_forks_repo_forks_event_min_datetime": "2016-03-25T23:21:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-05T11:21:58.000Z", "avg_line_length": 54.9456521739, "max_line_length": 117, "alphanum_fraction": 0.7301681503, "include": true, "reason": "import numpy,import theano,from theano", "num_tokens": 2138}
|
import csv
import cv2
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Flatten, Dense, Dropout, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras import optimizers
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
# My humble tribute to Michael Jordan and Magic Johnson,
# the best basketball players ever.
np.random.seed(23)
tf.set_random_seed(32)
# Batch_size must be a multiple of 6 [6, 12, 18, 24...]
def generator(samples, batch_size=36):
num_samples = len(samples)
if (batch_size > num_samples):
batch_size = num_samples
batch_size = int(batch_size/6)
print(batch_size)
correction = 0.3
while 1: # Loop forever so the gerator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset : offset + batch_size]
images = []
measurements = []
for sample in batch_samples:
for i in range(3):
source_path = sample[i]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
image = cv2.imread(current_path)
images.append(image)
# Left correction and right correction are different
# because I have noticed that the car tends to one
# side more than the other. This is done to compensate
# that behaviour. That is probably due to the training
# data used.
measurement = float(sample[3])
if i == 1:
measurement = measurement + (0.9 * correction)
elif i == 2:
measurement = measurement - (1.1 * correction)
measurements.append(measurement)
# Flipping images and ateering measurements
augmented_images, augmented_measurements = [], []
for image, measurement in zip (images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image, 1))
augmented_measurements.append(measurement * -1.0)
X_train = np.array(augmented_images)
y_train = np.array(augmented_measurements)
yield shuffle(X_train, y_train)
# Model design
model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=(160, 320, 3), output_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70, 20), (0, 0))))
model.add(Convolution2D(24,5,5, subsample=(2,2), activation="relu"))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation="relu"))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation="relu"))
model.add(Convolution2D(64,3,3, activation="relu"))
model.add(Convolution2D(64,3,3, activation="relu"))
model.add(Flatten())
model.add(Dense(120))
model.add(Dense(70))
model.add(Dropout(0.1))
model.add(Dense(40))
model.add(Dense(10))
model.add(Dense(1))
# Read the driving log
samples = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip the headers
for sample in reader:
samples.append(sample)
# Split the samples in train and validation sets
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# Train and valid the model using the generator function
BATCH_SIZE = 36
train_generator = generator(train_samples, batch_size=BATCH_SIZE)
validation_generator = generator(validation_samples, batch_size=BATCH_SIZE)
# Compile and train the model
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator, samples_per_epoch=len(train_samples),
validation_data=validation_generator, nb_val_samples=len(validation_samples), epochs=2)
# Save the model
model.save('model.h5')
|
{"hexsha": "b199729abcf5d80745f35f8cd143cd3fa3f4f890", "size": 4061, "ext": "py", "lang": "Python", "max_stars_repo_path": "clone.py", "max_stars_repo_name": "Denis-Tsvetanov/sdc-behavioralCloning", "max_stars_repo_head_hexsha": "56d8d6c25cea13c1171c624192baf0c8cb215292", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clone.py", "max_issues_repo_name": "Denis-Tsvetanov/sdc-behavioralCloning", "max_issues_repo_head_hexsha": "56d8d6c25cea13c1171c624192baf0c8cb215292", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clone.py", "max_forks_repo_name": "Denis-Tsvetanov/sdc-behavioralCloning", "max_forks_repo_head_hexsha": "56d8d6c25cea13c1171c624192baf0c8cb215292", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-13T00:11:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T02:54:39.000Z", "avg_line_length": 36.9181818182, "max_line_length": 98, "alphanum_fraction": 0.6532873676, "include": true, "reason": "import numpy", "num_tokens": 907}
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 6 17:25:27 2021
@author: ShiningStone
"""
import torch
import ignite
from ignite.metrics import Loss,RunningAverage
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.contrib.handlers import ProgressBar
from ignite import handlers as igHandler
from matplotlib import pyplot as plt
import numpy as np
# fTruePredLossOutput = lambda x, y, y_pred, loss: {'true':y,'pred':y_pred,'loss':loss}
# fTruePredOutput = lambda x, y, y_pred: {'true':y,'pred':y_pred}
fPickLossFromOutput = lambda output: output['loss']
fPickPredTrueFromOutput = lambda output: (output['y_pred'],output['y'])
def fPickPredTrueFromOutputT(output):
pred,true = output['y_pred'],output['y']
return (pred.transpose(-1,-2),true.transpose(-1,-2))
class TCEngineOutput:
def __call__(x, y, y_pred, loss = None):
out = dict()
out['x'] = x
out['y'] = y
out['y_pred'] = y_pred
if loss:
out['loss'] = loss
return out
def tfEngineOutput(x, y, y_pred, loss = None):
out = dict()
out['x'] = x
out['y'] = y
out['y_pred'] = y_pred
if loss:
out['loss'] = loss
return out
class CTrainer:
def __init__(self,epoch,device,criterion,optimizer,lrScheduler = None):
self.curEpoch = 0
self.nEpoch = epoch
self.optimizer = None
self.lrScheduler = None
self.criterion = None
self.device = device
self.trainer = None
self.evaluator = None
self.model = None
self.tarFolder = None
self.oLog = None
self.metrics = dict()
self.metricsRecord:dict= dict()
self.bestEpoch = -1
self.bestTargetMetricValue = -1
self.targetMetric:str = None
self.lrRecord:list = list()
self.fPlotsFunc:list = list()
self.setOptm(criterion,optimizer,lrScheduler)
self.extList:list = list()
self.exprFlag = False
def addLr(self):
# print('cha yan')
for param_group in self.optimizer.param_groups:
param_group['lr'] += 0.0001
def addExpr(self):
self.trainer.add_event_handler(Events.EPOCH_COMPLETED,self.addLr)
def setDataLoader(self,dtldTrain,dtldTest):
self.dtldTrain = dtldTrain
self.dtldDev = dtldTest
def setOptm(self,criterion,optimizer,lrScheduler = None):
self.criterion = criterion
self.addMetrics('loss', Loss(self.criterion,output_transform=fPickPredTrueFromOutput))
self.optimizer = optimizer
self.lrScheduler = lrScheduler
def setDir(self,oLog,tarFolder):
self.oLog = oLog
self.tarFolder = tarFolder
def addMetrics(self,name:str,metric:ignite.metrics.Metric):
assert isinstance(metric,ignite.metrics.Metric)
self.metrics[name] = metric
def score_function(self,engine):
self.evaluator.run(self.dtldDev)
metrics = self.evaluator.state.metrics
print('a')
val = metrics['corr']
return val
def addPlotFunc(self,func):
self.fPlotsFunc.append(func)
def plots(self,epoch,best = False):
figsList = list()
for func in self.fPlotsFunc:
figs = func(self.model)
figsList += figs
for idx, f in enumerate(figsList):
if best:
f.savefig(self.tarFolder + '/' + '_epoch_best_' + str(idx) + '.png')
else:
f.savefig(self.tarFolder + '/' + '_epoch_' + str(epoch) + '_' + str(idx) + '.png')
plt.close(f)
def recordLr(self,):
for param_group in self.optimizer.param_groups:
self.lrRecord.append(param_group['lr'])
def addEvaluatorExtensions(self,handler):
# self.evaluator.add_event_handler(Events.COMPLETED, handler)
self.extList.append([Events.COMPLETED, handler])
def hookTrainingResults(self,trainer):
self.plots(trainer.state.epoch)
self.evaluator.run(self.dtldTrain)
metrics = self.evaluator.state.metrics
self.recordLr()
for i in self.metricsRecord:
self.metricsRecord[i]['train'].append(metrics[i])
if self.oLog:
self.oLog('Train','Epoch:',trainer.state.epoch,'Metrics',metrics,'lr',self.lrRecord[-1],splitChar = '\t')
else:
print(f"Training Results - Epoch: {trainer.state.epoch} Metrics: {metrics}")
def hookValidationResults(self,trainer):
self.evaluator.run(self.dtldDev)
metrics = self.evaluator.state.metrics
targetMetric = metrics[self.targetMetric]
if isinstance(self.lrScheduler,torch.optim.lr_scheduler.ReduceLROnPlateau):
# print(metrics['corr'])
self.lrScheduler.step(metrics['corr'])
for i in self.metricsRecord:
self.metricsRecord[i]['eval'].append(metrics[i])
if targetMetric > self.bestTargetMetricValue:
self.plots(trainer.state.epoch,True)
self.bestEpoch = trainer.state.epoch
self.bestTargetMetricValue = targetMetric
# then save checkpoint
checkpoint = {
'state_dict': self.model.state_dict(),
'targetMetric': targetMetric,
}
torch.save(checkpoint,self.tarFolder + '/savedModel_feedForward_best.pt')
# if self.lrScheduler:
# print(metrics['corr'])
# self.lrScheduler.step(metrics['corr'])
if self.oLog:
self.oLog('Validation','Epoch:',trainer.state.epoch,'Metrics',metrics,splitChar = '\t')
else:
print(f"Validation Results - Epoch: {trainer.state.epoch} Metrics: {metrics}")
def setEvalExt(self):
for i in self.extList:
self.evaluator.add_event_handler(*i)
def step(self):
self.lrScheduler.step()
def setWorker(self,model,targetMetric,device = 'cpu'):
''' used for dowmward compatibility'''
self._setRecording()
self.targetMetric = targetMetric
self.trainer = create_supervised_trainer(model,self.optimizer,self.criterion,device=device,output_transform=tfEngineOutput)
self.evaluator = create_supervised_evaluator(model, metrics=self.metrics,device=device,output_transform=tfEngineOutput)
self.model = model
RunningAverage(output_transform=fPickLossFromOutput).attach(self.trainer, "loss")
# CMPearsonr(output_transform=fPickPredTrueFromOutputT).attachForTrain(self.trainer, "corr")
for i in self.metrics:
if i != 'loss':
self.metrics[i].attach(self.trainer,i)
pbar = ProgressBar(persist=True,ncols = 75)
pbar.attach(self.trainer, metric_names='all')
# scheduler = LRScheduler(self.lrScheduler)
# self.trainer.add_event_handler(Events.EPOCH_COMPLETED, self.reduct_step)
self.trainer.add_event_handler(Events.EPOCH_COMPLETED,self.hookTrainingResults)
self.trainer.add_event_handler(Events.EPOCH_COMPLETED,self.hookValidationResults)
# self.addExpr()
if isinstance(self.lrScheduler, torch.optim.lr_scheduler.CyclicLR):
print('CyclicLR')
self.trainer.add_event_handler(Events.ITERATION_COMPLETED,self.step)
# self.trainer.add_event_handler(Events.ITERATION_COMPLETED, self._hookIterationComplete)
# handler = EarlyStopping(patience=5, score_function=self.score_function, trainer=self.trainer)
# self.addEvaluatorExtensions(handler)
# self.setEvalExt()
def _setRecording(self):
for i in self.metrics:
self.metricsRecord[i] = {'train':list(),'eval':list()}
def _hookIterationComplete(self):
for param_group in self.optimizer.param_groups:
print(param_group['lr'])
def train(self,model,targetMetric,device = 'cpu'):
self.setWorker(model,targetMetric,device)
self.trainer.run(self.dtldTrain, max_epochs=self.nEpoch)
return self.bestEpoch, self.bestTargetMetricValue
def test(self,model,dtldTest,device = 'cpu'):
self.addMetrics('loss', Loss(self.criterion,output_transform=fPickPredTrueFromOutput))
self.tester = create_supervised_evaluator(model, metrics=self.metrics,device=device,output_transform=tfEngineOutput)
self.tester.run(dtldTest)
metrics = self.tester.state.metrics
return metrics
def safeAllocate(arraylike):
out = None
if not isinstance(arraylike,torch.Tensor):
out = torch.from_numpy(arraylike)
else:
out = arraylike
return out
def collate_fn(batch,channelFirst = True):
nBatch = len(batch)
outBatch = []
dimIdxLen = -1
dimIdxChannel = -1
if channelFirst:
dimIdxLen = 1
dimIdxChannel = 0
else:
dimIdxLen = 0
dimIdxChannel = 1
for idx,item in enumerate(batch[0]):
maxLen = item.shape[dimIdxLen]
nChannel = item.shape[dimIdxChannel]
for i in range(nBatch):
maxLen = max(batch[i][idx].shape[dimIdxLen],maxLen)
zerosInput = [0] * 3
zerosInput[0] = nBatch
zerosInput[1 + dimIdxLen] = maxLen
zerosInput[1 + dimIdxChannel] = nChannel
tmp = torch.zeros(*zerosInput)
for i in range(nBatch):
indices = [0] * 3
indices[0] = i
indices[1 + dimIdxLen] = slice(batch[i][idx].shape[dimIdxLen])
indices[1 + dimIdxChannel] = slice(None)
# print(indices,i)
indices = tuple(indices)
# print(indices,tmp[indices].shape,batch[i][idx].shape)
tmp[indices] = safeAllocate(batch[i][idx])
outBatch.append(tmp)
return outBatch
|
{"hexsha": "9198ba37b7e41ffd5ad5ac160d7fe5af5cb56524", "size": 10032, "ext": "py", "lang": "Python", "max_stars_repo_path": "StimRespFlow/DataProcessing/DeepLearning/Trainer.py", "max_stars_repo_name": "powerfulbean/StellarBrainwav", "max_stars_repo_head_hexsha": "7636c28b0dbbf7cdd5514b9699e1d3cb21883563", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-16T06:14:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-17T00:05:06.000Z", "max_issues_repo_path": "StimRespFlow/DataProcessing/DeepLearning/Trainer.py", "max_issues_repo_name": "powerfulbean/StellarBrainwav", "max_issues_repo_head_hexsha": "7636c28b0dbbf7cdd5514b9699e1d3cb21883563", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "StimRespFlow/DataProcessing/DeepLearning/Trainer.py", "max_forks_repo_name": "powerfulbean/StellarBrainwav", "max_forks_repo_head_hexsha": "7636c28b0dbbf7cdd5514b9699e1d3cb21883563", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.347826087, "max_line_length": 131, "alphanum_fraction": 0.6216108453, "include": true, "reason": "import numpy", "num_tokens": 2322}
|
#!/usr/bin/env python
# TODO
# this is for Suzhou Chinese data
# change vow and the conditional flow statements accordingly
import os, re, glob, shutil
from PIL import Image
import numpy as np
import argparse
import audiolabel
from operator import itemgetter
from ultratils.rawreader import RawReader
from ultratils.pysonix.scanconvert import Converter
# TODO copy eb-pca-prep ...
# except where you take RawReader method from eb-extract-frames
|
{"hexsha": "98b77983af15c2c4265b95c0470a878bad48070f", "size": 447, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/dim-reduction/eb-cache-frames.py", "max_stars_repo_name": "jenniferxkuo/ultramisc", "max_stars_repo_head_hexsha": "65bff53c97715eb9a24acc62b77460f22e0dabc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/dim-reduction/eb-cache-frames.py", "max_issues_repo_name": "jenniferxkuo/ultramisc", "max_issues_repo_head_hexsha": "65bff53c97715eb9a24acc62b77460f22e0dabc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/dim-reduction/eb-cache-frames.py", "max_forks_repo_name": "jenniferxkuo/ultramisc", "max_forks_repo_head_hexsha": "65bff53c97715eb9a24acc62b77460f22e0dabc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2941176471, "max_line_length": 63, "alphanum_fraction": 0.8031319911, "include": true, "reason": "import numpy", "num_tokens": 106}
|
(* SPDX-License-Identifier: GPL-2.0 *)
Require Import Coqlib.
Require Import Errors.
Require Import AST.
Require Import Integers.
Require Import Floats.
Require Import Op.
Require Import Events.
Require Import Globalenvs.
Require Import Smallstep.
Require Import Values.
Require Import Memory.
Require Import Maps.
Require Import ZSet.
Require Import ListLemma2.
Require Import CommonTactic.
Require Import AuxLemma.
Require Import AuxStateDataType.
Require Import RealParams.
Require Import RefinementTactic.
Require Import PrimSemantics.
Require Import LayerCalculusLemma.
Require Import TacticsForTesting.
Require Import liblayers.logic.PTreeModules.
Require Import liblayers.logic.LayerLogicImpl.
Require Import liblayers.compcertx.Stencil.
Require Import liblayers.compcertx.MakeProgram.
Require Import liblayers.compat.CompatLayers.
Require Import liblayers.compat.CompatGenSem.
Require Import RData.
Require Import Constants.
Require Import MmioCore.Layer.
Require Import HypsecCommLib.
Require Import MmioCoreAux.Layer.
Require Import MmioCore.Spec.
Require Import AbstractMachine.Spec.
Require Import MmioRaw.Spec.
Require Import MmioCoreAux.Spec.
Local Open Scope Z_scope.
Local Opaque Z.add Z.mul Z.div Z.shiftl Z.shiftr Z.land Z.lor.
Section MmioCoreProofHigh.
Local Open Scope string_scope.
Local Open Scope Z_scope.
Context `{real_params: RealParams}.
Notation HDATA := RData.
Notation LDATA := RData.
Notation HDATAOps := (cdata (cdata_ops := MmioCore_ops) HDATA).
Notation LDATAOps := (cdata (cdata_ops := MmioCoreAux_ops) HDATA).
Section WITHMEM.
Context `{Hstencil: Stencil}.
Context `{Hmem: Mem.MemoryModelX}.
Context `{Hmwd: UseMemWithData mem}.
Record relate_RData (f:meminj) (hadt: HDATA) (ladt: LDATA) :=
mkrelate_RData {
id_rel: hadt = ladt
}.
Inductive match_RData: stencil -> HDATA -> mem -> meminj -> Prop :=
| MATCH_RDATA: forall habd m f s, match_RData s habd m f.
Local Hint Resolve MATCH_RDATA.
Global Instance rel_ops: CompatRelOps HDATAOps LDATAOps :=
{
relate_AbData s f d1 d2 := relate_RData f d1 d2;
match_AbData s d1 m f := match_RData s d1 m f;
new_glbl := nil
}.
Global Instance rel_prf: CompatRel HDATAOps LDATAOps.
Proof.
constructor; intros; simpl; trivial.
constructor; inv H; trivial.
Qed.
Section FreshPrim.
Hint Unfold
handle_smmu_read_spec0
panic_spec
handle_smmu_read_spec
smmu_get_cbndx_spec
__handle_smmu_read_spec
handle_smmu_write_spec
handle_smmu_cb_access_spec
__handle_smmu_write_spec
handle_smmu_write_spec0
handle_smmu_global_access_spec
get_smmu_cfg_hw_ttbr_spec.
Lemma handle_smmu_write_spec_exists:
forall habd habd' labd hsr fault_ipa len index f
(Hspec: handle_smmu_write_spec hsr fault_ipa len index habd = Some habd')
(Hrel: relate_RData f habd labd),
exists labd', handle_smmu_write_spec0 hsr fault_ipa len index labd = Some labd' /\ relate_RData f habd' labd'.
Proof.
intros; inv Hrel; repeat autounfold in *; repeat simpl_hyp Hspec; contra; inv Hspec; srewrite; simpl in *.
destruct_if. eexists; split. reflexivity. constructor; destruct habd'; simpl in *; srewrite; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
eexists; split. reflexivity. constructor; destruct habd'; simpl in *; srewrite; reflexivity.
bool_rel; srewrite. simpl.
eexists; split. reflexivity. constructor; destruct labd; simpl in *; srewrite; reflexivity.
extract_if. repeat simpl_hyp C12; contra; inv C12; reflexivity. rewrite Cond.
eexists; split. reflexivity. constructor; reflexivity.
extract_if. repeat simpl_hyp C12; contra; inv C12; reflexivity. rewrite Cond.
eexists; split. reflexivity. constructor; reflexivity.
extract_if. repeat simpl_hyp C12; contra; inv C12; reflexivity. rewrite Cond.
eexists; split. reflexivity. constructor; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
extract_if. apply andb_true_iff; split; bool_rel_all; somega; reflexivity. rewrite Cond0.
srewrite; simpl.
extract_if. apply andb_true_iff; split; bool_rel_all; somega; reflexivity. rewrite Cond1.
eexists; split. reflexivity. constructor; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
extract_if. apply andb_true_iff; split; bool_rel_all; somega; reflexivity. rewrite Cond0.
srewrite; simpl.
extract_if. apply andb_true_iff; split; bool_rel_all; somega; reflexivity. rewrite Cond1.
eexists; split. reflexivity. constructor; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
extract_if. apply andb_true_iff; split; bool_rel_all; somega; reflexivity. rewrite Cond0.
srewrite; simpl.
extract_if. apply andb_true_iff; split; bool_rel_all; somega; reflexivity. rewrite Cond1.
eexists; split. reflexivity. constructor; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
eexists; split. reflexivity. constructor; destruct labd; simpl in *; srewrite; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
eexists; split. reflexivity. constructor; destruct habd'; simpl in *; srewrite; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
eexists; split. reflexivity. constructor; destruct habd'; simpl in *; srewrite; reflexivity.
extract_if. bool_rel; omega. rewrite Cond. simpl.
eexists; split. reflexivity. constructor; destruct labd; simpl in *; srewrite; reflexivity.
Qed.
Lemma handle_smmu_write_spec_ref:
compatsim (crel RData RData) (gensem handle_smmu_write_spec) handle_smmu_write_spec_low.
Proof.
Opaque handle_smmu_write_spec.
compatsim_simpl (@match_RData).
exploit handle_smmu_write_spec_exists; eauto 1;
intros (labd' & Hspec & Hrel).
refine_split; repeat (try econstructor; eauto).
Transparent handle_smmu_write_spec.
Qed.
Lemma handle_smmu_read_spec_exists:
forall habd habd' labd hsr fault_ipa len f
(Hspec: handle_smmu_read_spec hsr fault_ipa len habd = Some habd')
(Hrel: relate_RData f habd labd),
exists labd', handle_smmu_read_spec hsr fault_ipa len labd = Some labd' /\ relate_RData f habd' labd'.
Proof.
intros; inv Hrel; repeat autounfold in *. simpl_hyp Hspec. rewrite Hspec.
eexists; split. reflexivity. constructor; reflexivity.
Qed.
End FreshPrim.
End WITHMEM.
End MmioCoreProofHigh.
|
{"author": "VeriGu", "repo": "VRM-proof", "sha": "9e3c9751f31713a133a0a7e98f3d4c9600ca7bde", "save_path": "github-repos/coq/VeriGu-VRM-proof", "path": "github-repos/coq/VeriGu-VRM-proof/VRM-proof-9e3c9751f31713a133a0a7e98f3d4c9600ca7bde/sekvm/MmioCore/ProofHigh.v"}
|
/*
$Id: gspan.cpp,v 1.8 2004/05/21 09:27:17 taku-ku Exp $;
Copyright (C) 2004 Taku Kudo, All rights reserved.
This is free software with ABSOLUTELY NO WARRANTY.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA
*/
#include "gspan.h"
#include <iterator>
#include <stdlib.h>
#include <ostream>
#include <fstream>
#include <iostream>
#include <strstream>
#include <Eigen/Core>
#include <Eigen/Cholesky>
#include <Eigen/QR>
#include <float.h> // MAX
#include <iomanip> //setw
#include <deque> //iterative method
namespace GSPAN {
gSpan::gSpan(void) {
initial_graph_search = true;
report = true;
cv_run = false; // when true, MSE is surveyed at gSpan::lambda
single_node_enumerated = false;
breadth_first = false;
/*Things done to save memory:
1. cache no-longer saves graph. graph is generated on the fly when the graph is selected.*/
}
void gSpan::reset(void) {
// clear up vectors
beta.clear();
gamma.clear();
test_logical_index.clear();
sign.clear();
w.clear();
v.clear();
// initial graph search enabled.
initial_graph_search = true;
// reset caches.
for (std::vector<DFSCodeCache *>::iterator it = linearly_dependent_caches.begin(); it != linearly_dependent_caches.end(); ++it) {
(*it)->linearlydependent = false;
}
for (std::vector<DFSCodeCache *>::iterator it = X_cache_pointers.begin(); it != X_cache_pointers.end(); ++it) {
(*it)->ActiveSet = false;
}
linearly_dependent_caches.clear();
X_cache_pointers.clear();
}
std::vector<double> gSpan::compute_residual(std::vector<double> Beta, bool TestOrTrain) {
// initialize residual vector
std::vector<double> residual;
if (TestOrTrain) residual.reserve(test_set_size);
else residual.reserve(train_set_size);
// compute residual
for (unsigned int i = 0; i < Data_Set.size(); i++) {
// check whether the index is test or train set
if ((TestOrTrain && test_logical_index.at(i)) || (!TestOrTrain && !test_logical_index.at(i))) {
// compute predicted y
double ypi = 0;
for (unsigned int j = 0; j < Beta.size(); j++) {
ypi += Beta.at(j)*X_cache_pointers[j]->x.at(i);
}
// subtract from actual y and append
residual.push_back(y.at(i) - ypi);
}
}
return residual;
}
double gSpan::compute_MSE_from_residual(std::vector<double> residual) {
// sum up squared error
double MSE = 0;
for (unsigned int i = 0; i < residual.size(); i++) {
MSE += residual.at(i)*residual.at(i);
}
// take average
MSE = MSE / residual.size();
return MSE;
}
std::vector<double> gSpan::complete_regression (void) {
std::vector<double> TempBeta;
double d = rho0 / eta0;
for (unsigned int i = 0; i < beta.size(); i++) {
TempBeta.push_back(beta[i] + d*gamma[i]);
}
return TempBeta;
}
double gSpan::compute_covariance_r_xtrain(FrequencyVector x) {
std::vector<double>::iterator wIT = w.begin();
double covariance = 0;
for (unsigned int i = 0; i < Data_Set.size(); i++) {
// check whether the index is test or train set
if (!test_logical_index.at(i)) {
covariance += (*wIT)*x.at(i);
wIT++;
}
}
return covariance;
}
void gSpan::compute_v(void) {
v.clear();
//g^t*x
for (unsigned int i = 0; i < Data_Set.size(); i++) {
// check whether the index is test or train set
if (!test_logical_index.at(i)){
// compute predicted y
double gtxi = 0;
for (unsigned int j = 0; j < gamma.size(); j++) {
gtxi += gamma.at(j)*X_cache_pointers[j]->x.at(i);
}
v.push_back(gtxi);
}
}
}
double gSpan::compute_covariance_g_xtrain(FrequencyVector x) {
std::vector<double>::iterator vIT = v.begin();
double covariance = 0;
for (unsigned int i = 0; i < Data_Set.size(); i++) {
// check whether the index is test or train set
if (!test_logical_index.at(i)) {
covariance += (*vIT)*x.at(i);
vIT++;
}
}
return covariance;
}
double gSpan::compute_dt(FrequencyVector x) {
// can this be optimized?
// compute dt
double wx = compute_covariance_r_xtrain(x);
double vx = compute_covariance_g_xtrain(x);
double dt1 = (rho0 - wx) / (eta0 - vx);
double dt2 = (rho0 + wx) / (eta0 + vx);
// find positive minimum
double dt;
if (dt1 > 0 && dt1 <= dt2) // a is positive and smaller than or equal to b
dt = dt1;
else if (dt2 > 0) // b is positive and either smaller than a or a is negative
dt = dt2;
else //both values are not positive values (could be negative or zero)
dt = DBL_MAX;
return dt;
}
unsigned int gSpan::compute_d2(void) {
double dt;
unsigned int d2index = UINT_MAX;
for (unsigned int i = 0; i < beta.size(); i++) {
dt = -beta.at(i) / gamma.at(i);
if (dt > 0 && d2 > dt) {
d2 = dt;
d2index = i; // saving the index in case we need to remove it.
}
}
return d2index;
}
// sign function
template <typename T> int sgn(T val) {
return (T(0) < val) - (val < T(0));
}
unsigned int gSpan::support (std::vector<PDFS * > PDFSs)
{
unsigned int oid = 0xffffffff;
unsigned int size = 0;
for (std::vector<PDFS * >::iterator cur = PDFSs.begin(); cur != PDFSs.end(); ++cur) { // the total number of PDFS is returned. but why is this related to support?
if (oid != (*cur)->graph->id) {
++size;
}
oid = (*cur)->graph->id;
}
return size;
}
double gSpan::compute_gain(FrequencyVector x) {
double gain_t = 0;
for (unsigned int i = 0; i < Data_Set.size(); i++) {
// check whether the index is test or train set
if (!test_logical_index.at(i)) gain_t += y.at(i)*x.at(i);
}
return fabs(gain_t);
}
bool gSpan::gain_prune_condition(FrequencyVector x) {
double gain_p1 = 0;
double gain_p2 = 0;
for (unsigned int i = 0; i < Data_Set.size(); i++) {
if (!test_logical_index.at(i)) {
if (y.at(i) < 0) {
gain_p1 += fabs(y.at(i))*x.at(i);
}
else if (y.at(i) > 0) {
gain_p2 += (y.at(i))*x.at(i);
}
}
}
return largest_gain > std::max(gain_p1, gain_p2);
}
bool gSpan::d1_prune_condition(FrequencyVector x) {
// solving pruning condition equation.
double bw1 = 0;
double bw2 = 0;
std::vector<double>::iterator wIT = w.begin();
for (unsigned int i = 0; i < Data_Set.size(); i++) {
if (!test_logical_index.at(i)) {
if (*wIT < 0) {
bw1 += fabs(*wIT)*x.at(i);
}
else if (*wIT > 0) {
bw2 += *wIT*x.at(i);
}
wIT++;
}
}
double bv1 = 0;
double bv2 = 0;
std::vector<double>::iterator vIT = v.begin();
for (unsigned int i = 0; i < Data_Set.size(); i++) {
if (!test_logical_index.at(i)) {
if (*vIT < 0) {
bv1 += fabs(*vIT)*x.at(i);
}
else if (*vIT > 0) {
bv2 += *vIT*x.at(i);
}
vIT++;
}
}
return (std::max(bw1, bw2) + d1*std::max(bv1, bv2)) < (fabs(rho0) - d1*fabs(eta0)); //RHS can be calculated when new d1 is calculated
}
Eigen::MatrixXd gSpan::AppendXtoGram(Eigen::MatrixXd TempGram, FrequencyVector x) {
// must be executed before appending selected_cache to X_cache_pointers.
// resizing
TempGram.conservativeResize(X_cache_pointers.size() + 1, X_cache_pointers.size() + 1);
// off-diagonal
for (unsigned int i = 0; i < X_cache_pointers.size(); i++) {
double num = 0;
for (unsigned int k = 0; k < Data_Set.size(); k++) {
if (!test_logical_index.at(k)) num += X_cache_pointers[i]->x.at(k) * x.at(k);
}
TempGram(X_cache_pointers.size(), i) = num;
TempGram(i, X_cache_pointers.size()) = num;
}
//diagonal
double num = 0;
for (unsigned int k = 0; k < Data_Set.size(); k++) {
if (!test_logical_index.at(k)) num += x.at(k) * x.at(k);
}
TempGram(X_cache_pointers.size(), X_cache_pointers.size()) = num;
return TempGram;
}
Eigen::MatrixXd gSpan::RemoveXfromGram(Eigen::MatrixXd Gram, unsigned int dimensionToRemove) {
unsigned int Dim = Gram.rows() - 1;
if (dimensionToRemove < Dim) {
Gram.block(dimensionToRemove, 0, Dim - dimensionToRemove, Dim + 1) = Gram.block(dimensionToRemove + 1, 0, Dim - dimensionToRemove, Dim + 1);
Gram.block(0, dimensionToRemove, Dim , Dim - dimensionToRemove) = Gram.block(0, dimensionToRemove + 1, Dim, Dim - dimensionToRemove);
}
Gram.conservativeResize(Dim, Dim);
return Gram;
}
////////////////////////////////////// new vector < unsigned int > -> sort -> find method
// This is the slowest part of the algorithm for the large graph data.
// Tried several thing to optimize:
// 1. Tried edge pointer for isomorphism. didn't work well.
// 2. Tried unordered_set for unsigned int container which is the fastest container for find to avoid using sort. Not the fastest
// 3. Sort -> find is the fastest.
// here are some related discussion
// https://stackoverflow.com/questions/26431593/compare-two-integer-arrays-and-check-if-they-have-equal-values
// https://stackoverflow.com/questions/17394149/how-to-efficiently-compare-vectors-with-c
// https://stackoverflow.com/questions/6985572/which-is-the-fastest-stl-container-for-find
FrequencyVector gSpan::GetFrequencyVector(std::vector<PDFS * > PDFSs) {
// initialize zero vector
FrequencyVector x(Data_Set.size(), 0);
unsigned int oid = 0xffffffff;
// get size for reserving
unsigned int n_edge = 0;
for (PDFS *p = (*PDFSs.begin()); p; p = p->prev) n_edge++;
std::vector <std::vector<unsigned int> > UniquePDFS; // uniquePDFS given a data set.
//std::unique(SortedPDFS.begin(), SortedPDFS.begin(),)
//std::cout << projected.size() << std::endl;
//int i = 0;
// edge id pointer
std::vector<unsigned int> * edge_ids = new std::vector<unsigned int>;
edge_ids->reserve(n_edge);
for (std::vector<PDFS *>::iterator cur = PDFSs.begin(); cur != PDFSs.end(); ++cur) {
//i += 1;
//std::cout << i << std::endl;
if (oid != (*cur)->graph->id && cur != PDFSs.begin()) { // or the next iteration involves different data point (or graph)
x.at(oid) = UniquePDFS.size();
UniquePDFS.clear();
}
// push ids
for (PDFS *p = (*cur); p; p = p->prev) edge_ids->push_back(p->edge->id);
// sort.
std::sort(edge_ids->begin(), edge_ids->end());
// compare with existing unique vector of edge_pts.
if (std::find(UniquePDFS.begin(), UniquePDFS.end(), *edge_ids) != UniquePDFS.end())
//isomorphic graph.
edge_ids->clear();
else {
//non-isomorphic graph. add to unique list.
UniquePDFS.push_back(*edge_ids);
delete edge_ids;
edge_ids = new std::vector<unsigned int>;
}
// If iterator reaches the end or find next graph, it assigns x
oid = (*cur)->graph->id;
}
delete edge_ids;
// for the last graph repeat
x.at(oid) = UniquePDFS.size();
return x;
}
bool gSpan::DFS_check_condition(std::vector<PDFS*> PDFSs) {
// Check if the pattern is frequent enough. Support prunning is currently done for test set and training set combined.
unsigned int sup = support(PDFSs);
if (sup < minsup)
return false;
// The minimal DFS code check is more expensive than the support check,
// hence it is done now, after checking the support.
if (is_min() == false)
return false;
// In case we have a valid upper bound and our graph already exceeds it,
// return. Note: we do not check for equality as the DFS exploration may
// still add edges within an existing subgraph, without increasing the
// number of nodes.
if (DFS_CODE.nodeCount() > maxpat_max)
return false;
return true;
}
// This code below is an example of recursive function version.
void gSpan::project_depth_first(Projected * const &projected)
{
DFSCodeCache *DFS_Code_Cache = projected->DFS_Code_Cache;
/* GGDEBUG
if (DFS_Code_Cache->g == debug_graph) {
std::cout << "GG";
}
GGDEBUG*/
// compute gain or dt and prune
if (!DFS_Code_Cache->maxpatmincheck) {
if (initial_graph_search) { // initial descriptor search
// Line 3 of algorithm 3
double gain_t = compute_gain(DFS_Code_Cache->x);
// Line 10,11,12.
gain_t = fabs(gain_t);
if (gain_t > largest_gain) {
largest_gain = gain_t;
selected_cache = DFS_Code_Cache;
}
// Line 13,14,15
if (gain_prune_condition(DFS_Code_Cache->x_upper)) return;
}
else { // regular run
double dt = compute_dt(DFS_Code_Cache->x); // Line 9 of algorithm 2
if (!DFS_Code_Cache->ActiveSet && !DFS_Code_Cache->linearlydependent && dt < d1 && dt != 0) {
d1 = dt; // Line 11
selected_cache = DFS_Code_Cache;
}
if (d1_prune_condition(DFS_Code_Cache->x_upper)) return;
}
}
// enumerate next extension if it has not been done.
if (!DFS_Code_Cache->next_extension_enumerated) {
// Here we enumerate supergraphs as it has passed all the test.
const RMPath &rmpath = DFS_CODE.buildRMPath();
int minlabel = DFS_CODE[0].fromlabel;
DFS_Code_Cache->maxtoc = DFS_CODE[rmpath[0]].to;
EdgeList edges;
// Enumerate all possible one edge extensions of the current substructure.
for (unsigned int n = 0; n < projected->PDFSs.size(); ++n) {
Graph *graph = projected->PDFSs.at(n)->graph;
History history(*graph, projected->PDFSs.at(n));
// backward
for (int i = (int)rmpath.size() - 1; i >= 1; --i) {
Edge *e = get_backward(*graph, history[rmpath[i]], history[rmpath[0]], history);
if (e)
DFS_Code_Cache->new_bck_DFS[DFS_CODE[rmpath[i]].from][e->elabel].push(graph, e, projected->PDFSs.at(n));
}
// pure forward
if (get_forward_pure(*graph, history[rmpath[0]], minlabel, history, edges))
for (EdgeList::iterator it = edges.begin(); it != edges.end(); ++it)
DFS_Code_Cache->new_fwd_DFS[DFS_Code_Cache->maxtoc][(*it)->elabel][(*graph)[(*it)->to].label].push(graph, *it, projected->PDFSs.at(n));
// backtracked forward
for (int i = 0; i < (int)rmpath.size(); ++i)
if (get_forward_rmpath(*graph, history[rmpath[i]], minlabel, history, edges))
for (EdgeList::iterator it = edges.begin(); it != edges.end(); ++it)
DFS_Code_Cache->new_fwd_DFS[DFS_CODE[rmpath[i]].from][(*it)->elabel][(*graph)[(*it)->to].label].push(graph, *it, projected->PDFSs.at(n));
}
// create caches for next extensions. Backward
for (Projected_iterator2 to = DFS_Code_Cache->new_bck_DFS.begin();
to != DFS_Code_Cache->new_bck_DFS.end(); )// from each root
{
for (Projected_iterator1 elabel = to->second.begin();
elabel != to->second.end(); )
{
DFS_CODE.push(DFS_Code_Cache->maxtoc, to->first, -1, elabel->first, -1);
if (DFS_check_condition(elabel->second.PDFSs)) {
elabel->second.DFS_Code_Cache = new DFSCodeCache; // create cache
elabel->second.DFS_Code_Cache->Parent_DFS_Code_Cache = DFS_Code_Cache;
// record graph
elabel->second.DFS_Code_Cache->g = GRAPH_IS_MIN; // graph is built when is_min() is called.
// minimum check does not stop algorithm as DFS code needs to be continued to be expanded before its size can exceed minimum.
elabel->second.DFS_Code_Cache->maxpatmincheck = DFS_CODE.nodeCount() < maxpat_min;
// Get frequency vector
elabel->second.DFS_Code_Cache->x = GetFrequencyVector(elabel->second.PDFSs);
elabel->second.DFS_Code_Cache->x_upper = elabel->second.DFS_Code_Cache->x;
// Update upper bound of the parent DFS_Code
for (DFSCodeCache* p = DFS_Code_Cache; p; p = p->Parent_DFS_Code_Cache) {
for (unsigned int i = 0; i < elabel->second.DFS_Code_Cache->x_upper.size(); ++i) {
if (p->x_upper.at(i) < elabel->second.DFS_Code_Cache->x.at(i)) {
p->x_upper.at(i) = elabel->second.DFS_Code_Cache->x.at(i);
}
}
}
++elabel;
ndfs += 1;
}
else {
// erase PDFSs
for (std::vector< PDFS* >::iterator it = elabel->second.PDFSs.begin(); it != elabel->second.PDFSs.end(); ++it) delete (*it);
elabel->second.PDFSs.clear();
std::vector<PDFS*>().swap(elabel->second.PDFSs);
// delete map key
to->second.erase(elabel++);
}
DFS_CODE.pop();
}
if (DFS_Code_Cache->new_bck_DFS.size() != 0) ++to;
else DFS_Code_Cache->new_bck_DFS.erase(to++);
}
// create caches for next extensions. Forward
for (Projected_iterator3 fromlabel = DFS_Code_Cache->new_fwd_DFS.begin();
fromlabel != DFS_Code_Cache->new_fwd_DFS.end();)// from each root
{
for (Projected_iterator2 elabel = fromlabel->second.begin(); // given std::map<X, Y> where X is key and Y is value, std::map<X, Y>->second will give Y
elabel != fromlabel->second.end();)
{
for (Projected_iterator1 tolabel = elabel->second.begin();
tolabel != elabel->second.end();)
{
if (DFS_CODE.size() == 0) DFS_CODE.push(0, 1, fromlabel->first, elabel->first, tolabel->first);
else DFS_CODE.push(fromlabel->first, DFS_Code_Cache->maxtoc + 1, -1, elabel->first, tolabel->first);
if (DFS_check_condition(tolabel->second.PDFSs)) {
tolabel->second.DFS_Code_Cache = new DFSCodeCache; // create cache
tolabel->second.DFS_Code_Cache->Parent_DFS_Code_Cache = DFS_Code_Cache;
// record graph
tolabel->second.DFS_Code_Cache->g = GRAPH_IS_MIN; // graph is built when is_min() is called.
// minimum check does not stop algorithm as DFS code needs to be continued to be expanded before its size can exceed minimum.
tolabel->second.DFS_Code_Cache->maxpatmincheck = DFS_CODE.nodeCount() < maxpat_min;
// Get frequency vector
tolabel->second.DFS_Code_Cache->x = GetFrequencyVector(tolabel->second.PDFSs);
tolabel->second.DFS_Code_Cache->x_upper = tolabel->second.DFS_Code_Cache->x;
// Update upper bound of the parent DFS_Code
for (DFSCodeCache* p = DFS_Code_Cache; p; p = p->Parent_DFS_Code_Cache) {
for (unsigned int i = 0; i < tolabel->second.DFS_Code_Cache->x_upper.size(); ++i) {
if (p->x_upper.at(i) < tolabel->second.DFS_Code_Cache->x.at(i)) {
p->x_upper.at(i) = tolabel->second.DFS_Code_Cache->x.at(i);
}
}
}
++tolabel;
ndfs += 1;
}
else {
// erase PDFSs
for (std::vector< PDFS* >::iterator it = tolabel->second.PDFSs.begin(); it != tolabel->second.PDFSs.end(); ++it) delete (*it);
tolabel->second.PDFSs.clear();
std::vector<PDFS*>().swap(tolabel->second.PDFSs);
// delete map key
elabel->second.erase(tolabel++);
}
DFS_CODE.pop();
}
if (fromlabel->second.size() != 0) ++elabel;
else fromlabel->second.erase(elabel++);
}
if (DFS_Code_Cache->new_fwd_DFS.size() != 0) ++fromlabel;
else DFS_Code_Cache->new_fwd_DFS.erase(fromlabel++);
}
// clear to save memory.
projected->PDFSs.clear();
std::vector<PDFS*>().swap(projected->PDFSs);
DFS_Code_Cache->next_extension_enumerated = true;
}
// Test all extended substructures.
// backward
for (Projected_iterator2 to = DFS_Code_Cache->new_bck_DFS.begin(); to != DFS_Code_Cache->new_bck_DFS.end(); ++to) {
for (Projected_iterator1 elabel = to->second.begin(); elabel != to->second.end(); ++elabel) {
DFS_CODE.push(DFS_Code_Cache->maxtoc, to->first, -1, elabel->first, -1);
project_depth_first(&(elabel->second));
DFS_CODE.pop();
}
}
// forward
for (Projected_riterator3 from = DFS_Code_Cache->new_fwd_DFS.rbegin();
from != DFS_Code_Cache->new_fwd_DFS.rend(); ++from)
{
for (Projected_iterator2 elabel = from->second.begin();
elabel != from->second.end(); ++elabel)
{
for (Projected_iterator1 tolabel = elabel->second.begin();
tolabel != elabel->second.end(); ++tolabel)
{
if (DFS_CODE.size() == 0) DFS_CODE.push(0, 1, from->first, elabel->first, tolabel->first);
else DFS_CODE.push(from->first, DFS_Code_Cache->maxtoc + 1, -1, elabel->first, tolabel->first);
project_depth_first(&(tolabel->second));
DFS_CODE.pop();
}
}
}
return;
}
void gSpan::project_breadth_first(void)
{
// stack for the iterative for-loop
std::deque<Projected *> Stack;
// initial stack
for (std::map<int, Projected>::iterator SingleNodeIt = Single_Node_DFS_Code_Cache_Map.begin();
SingleNodeIt != Single_Node_DFS_Code_Cache_Map.end(); ++SingleNodeIt) Stack.push_back(&(SingleNodeIt->second));
// enter iterative loop
while (!Stack.empty()) {
// Pick the very first element from the stack.
Projected *projected = Stack.front();
Stack.pop_front();
DFSCodeCache *DFS_Code_Cache = projected->DFS_Code_Cache;
// Get DFS Code as well
DFS_CODE = DFS_Code_Cache->DFS_Code;
// compute gain or dt and prune
if (!DFS_Code_Cache->maxpatmincheck) {
if (initial_graph_search) { // initial descriptor search
// Line 3 of algorithm 3
double gain_t = compute_gain(DFS_Code_Cache->x);
// Line 10,11,12.
gain_t = fabs(gain_t);
if (gain_t > largest_gain) {
largest_gain = gain_t;
selected_cache = DFS_Code_Cache;
}
// Line 13,14,15
if (gain_prune_condition(DFS_Code_Cache->x)) continue;
}
else { // regular run
double dt = compute_dt(DFS_Code_Cache->x); // Line 9 of algorithm 2
if (!DFS_Code_Cache->ActiveSet && !DFS_Code_Cache->linearlydependent && dt < d1 && dt != 0) {
d1 = dt; // Line 11
selected_cache = DFS_Code_Cache;
}
if (d1_prune_condition(DFS_Code_Cache->x)) continue;
}
}
// enumerate next extension if it has not been done.
if (!DFS_Code_Cache->next_extension_enumerated) {
// Here we enumerate supergraphs as it has passed all the test.
const RMPath &rmpath = DFS_CODE.buildRMPath();
int minlabel = DFS_CODE[0].fromlabel;
DFS_Code_Cache->maxtoc = DFS_CODE[rmpath[0]].to;
EdgeList edges;
// Enumerate all possible one edge extensions of the current substructure.
for (unsigned int n = 0; n < projected->PDFSs.size(); ++n) {
Graph *graph = projected->PDFSs.at(n)->graph;
History history(*graph, projected->PDFSs.at(n));
// XXX: do we have to change something here for directed edges?
// backward
for (int i = (int)rmpath.size() - 1; i >= 1; --i) {
Edge *e = get_backward(*graph, history[rmpath[i]], history[rmpath[0]], history);
if (e)
DFS_Code_Cache->new_bck_DFS[DFS_CODE[rmpath[i]].from][e->elabel].push(graph, e, projected->PDFSs.at(n));
}
// pure forward
// FIXME: here we pass a too large e->to (== history[rmpath[0]]->to
// into get_forward_pure, such that the assertion fails.
//
// The problem is:
// history[rmpath[0]]->to > Data_Set[id].size()
if (get_forward_pure(*graph, history[rmpath[0]], minlabel, history, edges))
for (EdgeList::iterator it = edges.begin(); it != edges.end(); ++it)
DFS_Code_Cache->new_fwd_DFS[DFS_Code_Cache->maxtoc][(*it)->elabel][(*graph)[(*it)->to].label].push(graph, *it, projected->PDFSs.at(n));
// backtracked forward
for (int i = 0; i < (int)rmpath.size(); ++i)
if (get_forward_rmpath(*graph, history[rmpath[i]], minlabel, history, edges))
for (EdgeList::iterator it = edges.begin(); it != edges.end(); ++it)
DFS_Code_Cache->new_fwd_DFS[DFS_CODE[rmpath[i]].from][(*it)->elabel][(*graph)[(*it)->to].label].push(graph, *it, projected->PDFSs.at(n));
}
// create caches for next extensions. Backward
for (Projected_iterator2 to = DFS_Code_Cache->new_bck_DFS.begin();
to != DFS_Code_Cache->new_bck_DFS.end(); )// from each root
{
for (Projected_iterator1 elabel = to->second.begin();
elabel != to->second.end(); )
{
DFS_CODE.push(DFS_Code_Cache->maxtoc, to->first, -1, elabel->first, -1);
if (DFS_check_condition(elabel->second.PDFSs)) {
elabel->second.DFS_Code_Cache = new DFSCodeCache; // create cache
elabel->second.DFS_Code_Cache->Parent_DFS_Code_Cache = DFS_Code_Cache;
// record graph
elabel->second.DFS_Code_Cache->g = GRAPH_IS_MIN; // graph is built when is_min() is called.
// minimum check does not stop algorithm as DFS code needs to be continued to be expanded before its size can exceed minimum.
elabel->second.DFS_Code_Cache->DFS_Code = DFS_CODE;
elabel->second.DFS_Code_Cache->maxpatmincheck = DFS_CODE.nodeCount() < maxpat_min;
// Get frequency vector
elabel->second.DFS_Code_Cache->x = GetFrequencyVector(elabel->second.PDFSs);
elabel->second.DFS_Code_Cache->x_upper = elabel->second.DFS_Code_Cache->x;
// Update upper bound of the parent DFS_Code
for (DFSCodeCache* p = DFS_Code_Cache; p; p = p->Parent_DFS_Code_Cache) {
for (unsigned int i = 0; i < elabel->second.DFS_Code_Cache->x_upper.size(); ++i) {
if (p->x_upper.at(i) < elabel->second.DFS_Code_Cache->x.at(i)) {
p->x_upper.at(i) = elabel->second.DFS_Code_Cache->x.at(i);
}
}
}
++elabel;
ndfs += 1;
}
else {
// erase PDFSs
for (std::vector< PDFS* >::iterator it = elabel->second.PDFSs.begin(); it != elabel->second.PDFSs.end(); ++it) delete (*it);
elabel->second.PDFSs.clear();
std::vector<PDFS*>().swap(elabel->second.PDFSs);
// delete map key
to->second.erase(elabel++);
}
DFS_CODE.pop();
}
if (DFS_Code_Cache->new_bck_DFS.size() != 0) ++to;
else DFS_Code_Cache->new_bck_DFS.erase(to++);
}
// create caches for next extensions. Forward
for (Projected_iterator3 fromlabel = DFS_Code_Cache->new_fwd_DFS.begin();
fromlabel != DFS_Code_Cache->new_fwd_DFS.end();)// from each root
{
for (Projected_iterator2 elabel = fromlabel->second.begin(); // given std::map<X, Y> where X is key and Y is value, std::map<X, Y>->second will give Y
elabel != fromlabel->second.end();)
{
for (Projected_iterator1 tolabel = elabel->second.begin();
tolabel != elabel->second.end();)
{
if (DFS_CODE.size() == 0) DFS_CODE.push(0, 1, fromlabel->first, elabel->first, tolabel->first);
else DFS_CODE.push(fromlabel->first, DFS_Code_Cache->maxtoc + 1, -1, elabel->first, tolabel->first);
if (DFS_check_condition(tolabel->second.PDFSs)) {
tolabel->second.DFS_Code_Cache = new DFSCodeCache; // create cache
tolabel->second.DFS_Code_Cache->Parent_DFS_Code_Cache = DFS_Code_Cache;
// record graph
tolabel->second.DFS_Code_Cache->g = GRAPH_IS_MIN; // graph is built when is_min() is called.
// minimum check does not stop algorithm as DFS code needs to be continued to be expanded before its size can exceed minimum.
tolabel->second.DFS_Code_Cache->DFS_Code = DFS_CODE;
tolabel->second.DFS_Code_Cache->maxpatmincheck = DFS_CODE.nodeCount() < maxpat_min;
// Get frequency vector
tolabel->second.DFS_Code_Cache->x = GetFrequencyVector(tolabel->second.PDFSs);
tolabel->second.DFS_Code_Cache->x_upper = tolabel->second.DFS_Code_Cache->x;
// Update upper bound of the parent DFS_Code
for (DFSCodeCache* p = DFS_Code_Cache; p; p = p->Parent_DFS_Code_Cache) {
for (unsigned int i = 0; i < tolabel->second.DFS_Code_Cache->x_upper.size(); ++i) {
if (p->x_upper.at(i) < tolabel->second.DFS_Code_Cache->x.at(i)) {
p->x_upper.at(i) = tolabel->second.DFS_Code_Cache->x.at(i);
}
}
}
++tolabel;
ndfs += 1;
}
else {
// erase PDFSs
for (std::vector< PDFS* >::iterator it = tolabel->second.PDFSs.begin(); it != tolabel->second.PDFSs.end(); ++it) delete (*it);
tolabel->second.PDFSs.clear();
std::vector<PDFS*>().swap(tolabel->second.PDFSs);
// delete map key
elabel->second.erase(tolabel++);
}
DFS_CODE.pop();
}
if (fromlabel->second.size() != 0) ++elabel;
else fromlabel->second.erase(elabel++);
}
if (DFS_Code_Cache->new_fwd_DFS.size() != 0) ++fromlabel;
else DFS_Code_Cache->new_fwd_DFS.erase(fromlabel++);
}
// clear to save memory.
projected->PDFSs.clear();
std::vector<PDFS*>().swap(projected->PDFSs);
DFS_Code_Cache->next_extension_enumerated = true;
}
// Test all extended substructures.
// backward
for (Projected_iterator2 to = DFS_Code_Cache->new_bck_DFS.begin(); to != DFS_Code_Cache->new_bck_DFS.end(); ++to) {
for (Projected_iterator1 elabel = to->second.begin(); elabel != to->second.end(); ++elabel) {
Stack.push_back(&(elabel->second));
}
}
// forward
for (Projected_riterator3 from = DFS_Code_Cache->new_fwd_DFS.rbegin();
from != DFS_Code_Cache->new_fwd_DFS.rend(); ++from)
{
for (Projected_iterator2 elabel = from->second.begin();
elabel != from->second.end(); ++elabel)
{
for (Projected_iterator1 tolabel = elabel->second.begin();
tolabel != elabel->second.end(); ++tolabel)
{
Stack.push_back(&(tolabel->second));
}
}
}
}
}
void gSpan::run(InputData _input_data, std::ostream &_os,
unsigned int _minsup, unsigned int _maxpat_min, unsigned int _maxpat_max,
bool _enc, unsigned int _nmaxparam, double _minlambda, bool _directed,
std::vector<bool> * _test_logical_index, std::vector<double> * lambdas, std::vector<double> * CVMSE, std::vector < std::vector<double> > * CVE)
{ /************************* Begin Initialization *************************/
// Error check
if (_maxpat_max < _maxpat_min) {
std::cerr << " Maximum number of nodes cannot be lower than minimum number of nodes.";
return;
}
reset();
// Reading Input
os = &_os; // output stream
ID = 0; // mined subgraph index for report function
minsup = _minsup; // minimum support
maxpat_min = _maxpat_min; // minimum number of nodes in mined pattern
maxpat_max = _maxpat_max; // maximum number of nodes in mined pattern
enc = _enc; // print out selected descriptor
directed = _directed; // edge direction specification
unsigned int nmaxparam; // maximum number of pattern mined
if (_nmaxparam == 0xffffffff) nmaxparam = _input_data.size()-1;
else nmaxparam = _nmaxparam;
double minlambda = _minlambda;
// initialize
Eigen::MatrixXd Gram;
std::cout << std::scientific;
std::vector<double>::iterator lambdasIT = lambdas->begin(); // this is only used for CV run.
unsigned int nevent = 1;
ilambda = 0;
std::ofstream XmatFIO;
XmatFIO.open("Xmat.txt");
// Reading Data
Data_Set = _input_data;
y.reserve(Data_Set.size());
for (std::vector < Graph >::iterator cur = Data_Set.begin(); cur != Data_Set.end(); ++cur) {
y.push_back(cur->y);
}
// Set up response vector y.
if (cv_run) {
test_logical_index = *_test_logical_index;
test_set_size = std::count(test_logical_index.begin(), test_logical_index.end(), true);
train_set_size = std::count(test_logical_index.begin(), test_logical_index.end(), false);
}
else {
test_logical_index = std::vector<bool>(Data_Set.size(), false);
test_set_size = 0;
train_set_size = Data_Set.size();
}
/************************** End Initialization **************************/
/********************** Begin Pick First Descriptor **********************/
if (report) std::cout << " #: Event (N graph): Lambda TrainMSE TestMSE" << std::endl;
// line 1 in Algorithm 1
// set up beta
beta.push_back(0.0);
// initial descriptor search
largest_gain = 0;
// enumerate single node DFS code
if (!single_node_enumerated) Enumerate_single_node_DFScode();
single_node_enumerated = true;
// DFS search through each vertex, and pick a optimal graph
if (breadth_first) {
project_breadth_first();
}
else {
for (std::map<int, Projected>::iterator SingleNodeIt = Single_Node_DFS_Code_Cache_Map.begin();
SingleNodeIt != Single_Node_DFS_Code_Cache_Map.end(); ++SingleNodeIt) project_depth_first(&(SingleNodeIt->second));
}
// Append first graph
selected_cache->ActiveSet = true;
X_cache_pointers.push_back(selected_cache);
// compute train set MSE
std::vector < double > residual = compute_residual(beta, false);
double trainMSE = compute_MSE_from_residual(residual);
// compute test set MSE
residual = compute_residual(beta, true);
double testMSE = compute_MSE_from_residual(residual);
Lambda = largest_gain / train_set_size;
// record lambdas (non-CV run), or record MSE (CV run)
if (!cv_run) {
lambdas->push_back(Lambda);
XmatFIO << "Event " << nevent << "; Lambda = " << Lambda << std::endl;
for (unsigned int i = 0; i < X_cache_pointers[0]->x.size(); i++) {
for (unsigned int j = 0; j < X_cache_pointers.size(); j++) {
XmatFIO << X_cache_pointers[j]->x[i] << " ";
}
XmatFIO << std::endl;
}
XmatFIO << std::endl;
}
else if (cv_run){
residual = compute_residual(std::vector<double>(1, 0.0), false);
double InitialtrainMSE = compute_MSE_from_residual(residual);
residual = compute_residual(std::vector<double>(1, 0.0), true);
double InitialtestMSE = compute_MSE_from_residual(residual);
while (lambdasIT != lambdas->end() && *lambdasIT > Lambda ) {
if (report) {
std::cout << std::setw(4) << nevent << ": Step to Lambda (" << std::setw(4) << X_cache_pointers.size() << "): ";
std::cout << *lambdasIT << " " << InitialtrainMSE << " ";
if (test_set_size != 0) std::cout << InitialtestMSE << std::endl;
else std::cout << "n/a" << std::endl;
}
//If lambda of interests are below computed lambda,
CVMSE->push_back(InitialtestMSE);
//record error
std::vector<double>::iterator rit = residual.begin();
for (unsigned int i = 0; i < Data_Set.size(); i++) {
if (test_logical_index.at(i)) {
CVE->at(ilambda).at(i) = (*rit);
rit++;
}
}
ilambda++;
lambdasIT++;
nevent++;
}
}
// Report
if (report) {
std::cout << std::setw(4) << nevent << ": Adding a graph ( 1): " << Lambda << " " << trainMSE << " ";
if (test_set_size != 0) std::cout << testMSE << std::endl;
else std::cout << "n/a" << std::endl;
}
nevent++;
// initialize Gram Matrix
Gram.resize(1, 1);
double num = 0;
for (unsigned int i = 0; i < Data_Set.size(); i++) {
// pick out training set molecules
if (!test_logical_index.at(i)) {
num += selected_cache->x.at(i) * selected_cache->x.at(i);
}
}
Gram(0, 0) = num;
w = compute_residual(beta, false);
// set up sign vector and gamma vector
sign.push_back(sgn(compute_covariance_r_xtrain(selected_cache->x)));
gamma.push_back(sign[0]/Gram(0,0));
// turn off initial search
initial_graph_search = false;
/*********************** End Pick First Descriptor ***********************/
/************************ Begin Perform LarsLasso ************************/
// computation of w, v, rho0, and eta0. These are function of active set.
w = compute_residual(beta, false);
rho0 = compute_covariance_r_xtrain(X_cache_pointers[0]->x);
compute_v();
eta0 = compute_covariance_g_xtrain(X_cache_pointers[0]->x);
/* GG DEBUG
debug_graph.resize(2);
debug_graph[0].label = 0;
debug_graph[1].label = 0;
debug_graph[0].push(0, 1, 1);
debug_graph[1].push(1, 0, 1);
debug_graph.resize(3);
debug_graph[2].label = 0;
debug_graph[1].push(1, 2, 1);
debug_graph[2].push(2, 1, 1);
debug_graph[2].push(2, 0, 3);
debug_graph[0].push(0, 2, 3);
//debug_graph.resize(4);
//debug_graph[3].label = 0;
//debug_graph[2].push(2, 3, 1);
//debug_graph[3].push(3, 2, 1);
GGDEBUG */
unsigned int nlindep = 0;
while (X_cache_pointers.size() < nmaxparam && X_cache_pointers.size() < train_set_size && Lambda > minlambda) {
// Line 4 algorithm 1
d1 = DBL_MAX;
d2 = DBL_MAX;
ID = 0;
selected_cache = NULL;
/* GGDEBUG
if (nevent == 31) {
std::cout << "wow";
}
GGDEBUG*/
// DFS search through each vertex, and pick a optimal graph
if (breadth_first) {
project_breadth_first();
}
else {
for (std::map<int, Projected>::iterator SingleNodeIt = Single_Node_DFS_Code_Cache_Map.begin();
SingleNodeIt != Single_Node_DFS_Code_Cache_Map.end(); ++SingleNodeIt) project_depth_first(&(SingleNodeIt->second));
}
/* GGDEBUG
// This means that no graph has selected. perhaps every graph has been added as active set. terminate
if (nevent == 31) {
std::cout << "wow";
}
*/
if (selected_cache == NULL) {
if (report) std::cout << std::setw(4) << nevent <<": Could not find a new descriptor." << std::endl;
break;
}
// Checking the rank when the new descriptor is added.
Eigen::MatrixXd TempGram = AppendXtoGram(Gram, selected_cache->x);
Eigen::ColPivHouseholderQR<Eigen::MatrixXd> QRGram(TempGram);
unsigned int rank = QRGram.rank(); // is there a faster method to compute rank?
if (rank != X_cache_pointers.size() + 1) {
// This descriptor is dropped for good.
nlindep++;
selected_cache->linearlydependent = true;
linearly_dependent_caches.push_back(selected_cache);
continue;
}
unsigned d2index = compute_d2(); // line 6 of algorithm 1.
// Lambda of interest requires smaller step than d1 and d2, then the step is temporarily taken and MSE are computed.
if (cv_run) {
double d1lambda = d1 / train_set_size;
double d2lambda = d2 / train_set_size;
while (lambdasIT != lambdas->end() && (Lambda - *lambdasIT) < d1lambda && (Lambda - *lambdasIT) < d2lambda) {
// compute a new beta
std::vector<double> tempbeta = beta;
for (unsigned int i = 0; i < tempbeta.size(); i++) tempbeta[i] += (Lambda - *lambdasIT)*train_set_size*gamma[i];
// compute MSEs
residual = compute_residual(tempbeta, false);
double trainMSE = compute_MSE_from_residual(residual);
residual = compute_residual(tempbeta, true);
double testMSE = compute_MSE_from_residual(residual);
// report
if (report) {
std::cout << std::setw(4) << nevent << ": Step to Lambda (" << std::setw(4) << X_cache_pointers.size() << "): ";
std::cout << *lambdasIT << " " << trainMSE << " ";
if (test_set_size != 0) std::cout << testMSE << std::endl;
else std::cout << "n/a" << std::endl;
}
// save MSE, look at next lambda
CVMSE->push_back(testMSE);
//record error
std::vector<double>::iterator rit = residual.begin();
for (unsigned int i = 0; i < Data_Set.size(); i++) {
if (test_logical_index.at(i)) {
CVE->at(ilambda).at(i) = (*rit);
rit++;
}
}
ilambda++;
lambdasIT++;
nevent++;
}
if (lambdasIT == lambdas->end()) {
break;
}
}
/*
if (nevent == 30) {
std::cout << "wow";
}
*/
// line 7-8
double d = std::min(d1, d2);
for (unsigned int i = 0; i < beta.size(); i++) {
beta[i] += d*gamma[i];
}
// line 9-10
// Update w, v, rho0, and eta0 for next iteration and for covariance calculation.
w = compute_residual(beta, false);
rho0 = compute_covariance_r_xtrain(X_cache_pointers[0]->x);
if (d1 < d2) { // append new graph.
Gram = AppendXtoGram(Gram, selected_cache->x);
sign.push_back(sgn(compute_covariance_r_xtrain(selected_cache->x)));
selected_cache->ActiveSet = true;
X_cache_pointers.push_back(selected_cache);
beta.push_back(0.0);
gamma.push_back(0.0);
if (report) std::cout << std::setw(4) << nevent << ": Adding a graph (" << std::setw(4) << X_cache_pointers.size() << "): ";
}
else { // remove graph
Gram = RemoveXfromGram(Gram, d2index);
sign.erase(sign.begin() + d2index);
X_cache_pointers[d2index]->ActiveSet = false;
X_cache_pointers.erase(X_cache_pointers.begin() + d2index);
beta.erase(beta.begin() + d2index);
gamma.erase(gamma.begin() + d2index);
if (report) std::cout << std::setw(4) << nevent << ": Removing a graph (" << std::setw(4) << X_cache_pointers.size() << "): ";
// See if any descriptor is no-longer linearly dependent.
for (std::vector<DFSCodeCache *>::iterator it = linearly_dependent_caches.begin();
it != linearly_dependent_caches.end();) {
Eigen::MatrixXd TempGram = AppendXtoGram(Gram, (*it)->x);
Eigen::ColPivHouseholderQR<Eigen::MatrixXd> QRGram(TempGram);
unsigned int rank = QRGram.rank();
if (rank == X_cache_pointers.size() + 1) {
// This descriptor is dropped for good.
nlindep--;
(*it)->linearlydependent = false;
it = linearly_dependent_caches.erase(it);
continue;
}
else {
++it;
}
}
}
// line 11 inverse
Eigen::VectorXd TempSign(sign.size());
for (unsigned int i = 0; i < sign.size(); i++) {
TempSign(i) = sign.at(i);
}
// Cholesky inverse solve.
Eigen::VectorXd Tempgamma(gamma.size());
Tempgamma = Gram.llt().solve(TempSign);
for (unsigned int i = 0; i < sign.size(); i++) {
gamma[i] = Tempgamma(i);
}
// Update w, v, rho0, and eta0 for next iteration and for covariance calculation.
compute_v();
eta0 = compute_covariance_g_xtrain(X_cache_pointers[0]->x);
// Report. Regression is completed with current set.
residual = compute_residual(beta, false);
double trainMSE = compute_MSE_from_residual(residual);
residual = compute_residual(beta, true);
double testMSE = compute_MSE_from_residual(residual);
Lambda = fabs(rho0)/train_set_size;
if (report) {
std::cout << Lambda << " " << trainMSE << " ";
if (test_set_size != 0) std::cout << testMSE << std::endl;
else std::cout << "n/a" << std::endl;
}
if (!cv_run) {
lambdas->push_back(Lambda);
XmatFIO << "Event " << nevent << "; Lambda = " << Lambda << std::endl;
for (unsigned int i = 0; i < X_cache_pointers[0]->x.size(); i++) {
for (unsigned int j = 0; j < X_cache_pointers.size(); j++) {
XmatFIO << X_cache_pointers[j]->x[i] << " ";
}
XmatFIO << std::endl;
}
XmatFIO << std::endl;
}
nevent++;
}
/************************* End Perform LarsLasso *************************/
/***************************** Begin Report *****************************/
beta = complete_regression();
// compute MSEs
residual = compute_residual(beta, false);
trainMSE = compute_MSE_from_residual(residual);
residual = compute_residual(beta, true);
testMSE = compute_MSE_from_residual(residual);
// compute w, v, rho0, and eta0
w = compute_residual(beta, false);
rho0 = compute_covariance_r_xtrain(X_cache_pointers[0]->x);
Lambda = fabs(rho0) / train_set_size;
if (report) {
std::cout << std::setw(4) << nevent << ": Final step (" << std::setw(4) << X_cache_pointers.size() <<
"): " << Lambda << " " << trainMSE << " ";
if (test_set_size != 0) std::cout << testMSE << std::endl;
else std::cout << "n/a" << std::endl;
std::cout << "--------------------------------------------------------------------------------" << std::endl;
std::cout << "Number of descriptor searched: " << ndfs << std::endl;
std::cout << "Number of dropped Linearly dependent graph: " << nlindep << std::endl;
std::cout << "Regression Coefficients" << std::endl;
for (unsigned int i = 0; i < beta.size(); i++) {
std::cout << std::setw(13) << beta[i] << std::endl;
}
if (cv_run) {
std::cout << "Test Set Residual" << std::endl;
std::vector<double> residual = compute_residual(beta, true);
}
for (unsigned int i = 0; i < residual.size(); i++) {
std::cout << residual[i] << std::endl;
}
if (enc) {
std::cout << "Selected Graphs" << std::endl;
for (unsigned int i = 0; i < X_cache_pointers.size(); i++) {
std::cout << "{";
for (unsigned int j = 0; j < X_cache_pointers[i]->x.size(); j++) {
std::cout << " " << X_cache_pointers[i]->x[j];
}
std::cout << " }" << std::endl;
report_graph(X_cache_pointers[i]->g, X_cache_pointers[i]->x);
}
}
}
/****************************** End Report ******************************/
return;
}
void gSpan::Enumerate_single_node_DFScode()
{
if (1 > maxpat_max) return;
/* Do single node handling, as the normal gspan DFS code based processing
* cannot find subgraphs of size |subg|==1. Hence, we find frequent node
* labels explicitly.
*/
std::map<unsigned int, std::map<unsigned int, unsigned int> > singleVertex;
std::map<unsigned int, unsigned int> singleVertexLabel;
// All the nodes are organized
for (unsigned int id = 0; id < Data_Set.size(); ++id) {// for each graph
for (unsigned int nid = 0; nid < Data_Set[id].size(); ++nid) { // for each vertex
if (singleVertex[id][Data_Set[id][nid].label] == 0) {
// number of graphs it appears in
singleVertexLabel[Data_Set[id][nid].label] += 1; // record support
}
singleVertex[id][Data_Set[id][nid].label] += 1; // record how much each graph has.
}
}
/* All minimum support node labels are frequent 'subgraphs'.
* singleVertexLabel[nodelabel] gives the number of graphs it appears
* in.
*/
// Organized nodes are processed further more and put into cache.
for (std::map<unsigned int, unsigned int>::iterator it =
singleVertexLabel.begin(); it != singleVertexLabel.end(); ++it)
{
ndfs += 1;
// new cache
DFSCodeCache * Single_Node_DFS_Code_Cache = new DFSCodeCache;
Single_Node_DFS_Code_Cache_Map[(*it).first].DFS_Code_Cache = Single_Node_DFS_Code_Cache;
// set flags
Single_Node_DFS_Code_Cache->next_extension_enumerated = true;
Single_Node_DFS_Code_Cache->maxtoc = 0;
// record Max pattern check
Single_Node_DFS_Code_Cache->maxpatmincheck = 1 < maxpat_min;
// append graph. For book keeping.
Graph g(directed);
g.resize(1);
g[0].label = (*it).first;
Single_Node_DFS_Code_Cache->g = g;
// append frequency vector
FrequencyVector x(Data_Set.size(), 0);
std::vector<unsigned int> counts(Data_Set.size());
for (std::map<unsigned int, std::map<unsigned int, unsigned int> >::iterator it2 =
singleVertex.begin(); it2 != singleVertex.end(); ++it2)
{
x.at((*it2).first) = (*it2).second[(*it).first];
}
Single_Node_DFS_Code_Cache->x = x;
Single_Node_DFS_Code_Cache->x_upper = x;
}
// Enumerate first edge projected DFS.
EdgeList edges; //vector of edges
// enumerate first edges
for (unsigned int id = 0; id < Data_Set.size(); ++id) {
Graph &g = Data_Set[id];
for (unsigned int from = 0; from < g.size(); ++from) {
if (get_forward_root(g, g[from], edges)) { // forward edges are appended here, and return true if edge list is not empty.
for (EdgeList::iterator it = edges.begin(); it != edges.end(); ++it)
// append PDFS to the projected. Here Null is passed as an memory address for PDFS
Single_Node_DFS_Code_Cache_Map[g[from].label].DFS_Code_Cache->new_fwd_DFS[0][(*it)->elabel][g[(*it)->to].label].push(&g, *it, 0);
}
}
}
// create a list of pointers to caches
for (std::map<int, Projected>::iterator SingleNodeIt = Single_Node_DFS_Code_Cache_Map.begin();
SingleNodeIt != Single_Node_DFS_Code_Cache_Map.end(); ++SingleNodeIt)
{
for (Projected_iterator2 elabel = SingleNodeIt->second.DFS_Code_Cache->new_fwd_DFS[0].begin(); // given std::map<X, Y> where X is key and Y is value, std::map<X, Y>->second will give Y
elabel != SingleNodeIt->second.DFS_Code_Cache->new_fwd_DFS[0].end();)
{
for (Projected_iterator1 tolabel = elabel->second.begin();
tolabel != elabel->second.end();)
{
DFS_CODE.push(0, 1, SingleNodeIt->first, elabel->first, tolabel->first);
if (DFS_check_condition(tolabel->second.PDFSs)) {
tolabel->second.DFS_Code_Cache = new DFSCodeCache; // create cache
tolabel->second.DFS_Code_Cache->Parent_DFS_Code_Cache = SingleNodeIt->second.DFS_Code_Cache;
// record graph
tolabel->second.DFS_Code_Cache->g = GRAPH_IS_MIN; // graph is built when is_min() is called.
// minimum check does not stop algorithm as DFS code needs to be continued to be expanded before its size can exceed minimum.
tolabel->second.DFS_Code_Cache->DFS_Code = DFS_CODE;
tolabel->second.DFS_Code_Cache->maxpatmincheck = DFS_CODE.nodeCount() < maxpat_min;
// Get frequency vector
tolabel->second.DFS_Code_Cache->x = GetFrequencyVector(tolabel->second.PDFSs);
tolabel->second.DFS_Code_Cache->x_upper = tolabel->second.DFS_Code_Cache->x;
// Update upper bound of the parent DFS_Code
for (DFSCodeCache* p = SingleNodeIt->second.DFS_Code_Cache; p; p = p->Parent_DFS_Code_Cache) {
for (unsigned int i = 0; i < tolabel->second.DFS_Code_Cache->x_upper.size(); ++i) {
if (p->x_upper.at(i) < tolabel->second.DFS_Code_Cache->x.at(i)) {
p->x_upper.at(i) = tolabel->second.DFS_Code_Cache->x.at(i);
}
}
}
++tolabel;
ndfs += 1;
}
else {
// erase PDFSs
for (std::vector< PDFS* >::iterator it = tolabel->second.PDFSs.begin(); it != tolabel->second.PDFSs.end(); ++it) delete (*it);
tolabel->second.PDFSs.clear();
std::vector<PDFS*>().swap(tolabel->second.PDFSs);
// delete map key
elabel->second.erase(tolabel++);
}
DFS_CODE.pop();
}
if (SingleNodeIt->second.DFS_Code_Cache->new_fwd_DFS[0].size() != 0) ++elabel;
else SingleNodeIt->second.DFS_Code_Cache->new_fwd_DFS[0].erase(elabel++);
}
}
}
}
|
{"hexsha": "a8ea8ab9cb206a4e81f71e60b36fa9465b37f52b", "size": 48678, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "gspan.cpp", "max_stars_repo_name": "VlachosGroup/LARLASSO-gSpan", "max_stars_repo_head_hexsha": "67933e25bdb8a2ef2071d28178950cde44278b0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gspan.cpp", "max_issues_repo_name": "VlachosGroup/LARLASSO-gSpan", "max_issues_repo_head_hexsha": "67933e25bdb8a2ef2071d28178950cde44278b0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gspan.cpp", "max_forks_repo_name": "VlachosGroup/LARLASSO-gSpan", "max_forks_repo_head_hexsha": "67933e25bdb8a2ef2071d28178950cde44278b0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0174904943, "max_line_length": 186, "alphanum_fraction": 0.6511154937, "num_tokens": 14653}
|
# Monte Carlo Simulation of Berry 1994
function Berry1994(R, repNum;
βo = 5.00,
βx = 2.00,
α = 1.00,
γo = 1.00,
γx = 0.50,
γw = 0.25,
σc = 0.25,
σw = 0.25,
σd = 3.00)
#Let's set up the remaining parameters
J = 2
# Let's randomly draw observable and unobservable characteristics that affect
# demand: xₘⱼ and ξₘⱼ
xₘⱼ = randn(R, J, repNum);
ξₘⱼ = randn(R, J, repNum);
# Let's randomly draw observable and unobservable characteristics that affect
# marginal cost: wₘⱼ and ωₘⱼ
wₘⱼ = randn(R, J, repNum);
ωₘⱼ = randn(R, J, repNum);
# Let's compute the marginal cost (cₘⱼ) for each market m and product/firm j
# as Berry does:
cₘⱼ = exp.( γo.*ones(R,J,repNum) .+ γx.*xₘⱼ + σc.*ξₘⱼ .+ γw.*wₘⱼ .+ σw.*ωₘⱼ );
# Let's compute the equilibrium prices (pₘⱼ) and market share (sₘⱼ)
function eq_p_sh( xₘ, ξₘ, cₘ, sₘ)
p = cₘ .+ ( α.*(1.0.-sₘ)).^(-1);
v = βo .+ βx.*xₘ .+ σd.*ξₘ .- α.*p;
F = (exp.(v))./(1.0 .+ sum(exp.(v)));
return F
end
s₀ = [0.5, 0.5];
sₘⱼ = similar(xₘⱼ);
pₘⱼ = similar(xₘⱼ);
for k = 1:repNum
for r = 1:R
sol = nlsolve(y -> eq_p_sh( xₘⱼ[r,:,k], ξₘⱼ[r,:,k], cₘⱼ[r,:,k], y)-y, s₀)
s_eq = sol.zero
if ( sol.f_converged == false || sum(s_eq .< 0)>0 || sum(s_eq)>1 )
s_eq = [NaN , NaN];
end
p_eq = cₘⱼ[r,:,k] .+ (α.*(1.0 .- s_eq) ).^(-1);
sₘⱼ[r,:,k] = s_eq;
pₘⱼ[r,:,k] = p_eq;
end
end
@assert sum(isnan.(sₘⱼ)) == 0
# Let's calculate the mean utility of product j: (δₘⱼ)
δₘⱼ = similar(xₘⱼ);
for k = 1:repNum
for r=1:R
δₘⱼ[r,1,k] = log(sₘⱼ[r,1,k])-log(1-sₘⱼ[r,1,k]-sₘⱼ[r,2,k]);
δₘⱼ[r,2,k] = log(sₘⱼ[r,2,k])-log(1-sₘⱼ[r,1,k]-sₘⱼ[r,2,k]);
end
end
# Let's compute the OLS estimates
θhatᵒˡˢ = zeros(3,repNum);
for k = 1:repNum
y = reshape(δₘⱼ[:,:,k],R*J,1);
X = [ones(R*J,1) reshape(xₘⱼ[:,:,k],R*J,1) reshape(pₘⱼ[:,:,k],R*J,1) ];
θhatᵒˡˢ[:,k] = inv(X'*X)*(X'*y);
end
# Taking the average and the standard deviation of the simulations
m_θhatᵒˡˢ = mean(θhatᵒˡˢ,dims=2);
sd_θhatᵒˡˢ = std(θhatᵒˡˢ,dims=2);
# Let's compute the IV estimates
θhatⁱᵛ = zeros(3,repNum);
aux_xₘⱼ = similar(xₘⱼ);
aux_xₘⱼ[:,1,:] = xₘⱼ[:,2,:];
aux_xₘⱼ[:,2,:] = xₘⱼ[:,1,:];
for k = 1:repNum
y = reshape(δₘⱼ[:,:,k],R*J,1);
X = [ones(R*J,1) reshape(xₘⱼ[:,:,k],R*J,1) reshape(pₘⱼ[:,:,k],R*J,1) ];
Z = [ones(R*J,1) reshape(xₘⱼ[:,:,k],R*J,1) reshape(wₘⱼ[:,:,k],R*J,1) reshape(aux_xₘⱼ[:,:,k],R*J,1) ];
Xhat = Z*inv(Z'*Z)*(Z'*X);
θhatⁱᵛ[:,k] = inv(Xhat'*Xhat)*(Xhat'*y);
end
# Taking the average and the standard deviation of the simulations
m_θhatⁱᵛ = mean(θhatⁱᵛ, dims=2);
sd_θhatⁱᵛ = std(θhatⁱᵛ, dims=2);
# Organizing the outputs
θ = [βo ; βx ; -α];
comparison = (θ = θ, m_θhatᵒˡˢ= m_θhatᵒˡˢ, sd_θhatᵒˡˢ=sd_θhatᵒˡˢ, m_θhatⁱᵛ= m_θhatⁱᵛ, sd_θhatⁱᵛ= sd_θhatⁱᵛ);
data = ( xₘⱼ=xₘⱼ, ξₘⱼ=ξₘⱼ, wₘⱼ=wₘⱼ, ωₘⱼ=ωₘⱼ, cₘⱼ=cₘⱼ, sₘⱼ=sₘⱼ, pₘⱼ=pₘⱼ, δₘⱼ=δₘⱼ );
params = (βo = βo, βx = βx, α = α, γo = γo, γx = γx, γw = γw, σc = σc, σw = σw, σd = σd);
output = (comparison = comparison, data = data, params=params);
return output
end
|
{"hexsha": "35c2424649f85ebe798b4399e88aa5153600a4a9", "size": 3237, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Berry1994.jl", "max_stars_repo_name": "leopoldomig/MetricsProject.jl", "max_stars_repo_head_hexsha": "1872643bd99739152201fb331832e2c0db98e264", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Berry1994.jl", "max_issues_repo_name": "leopoldomig/MetricsProject.jl", "max_issues_repo_head_hexsha": "1872643bd99739152201fb331832e2c0db98e264", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Berry1994.jl", "max_forks_repo_name": "leopoldomig/MetricsProject.jl", "max_forks_repo_head_hexsha": "1872643bd99739152201fb331832e2c0db98e264", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9051724138, "max_line_length": 111, "alphanum_fraction": 0.546802595, "num_tokens": 1683}
|
import tempfile
from urllib.request import urlretrieve
import cv2
import numpy as np
from vidstab import layer_overlay
from vidstab.vidstab_utils import border_frame
import imutils
tmp_dir = tempfile.TemporaryDirectory()
remote_expected_result = 'https://s3.amazonaws.com/python-vidstab/overlay_2_test.jpg'
overlay_2_test_file = '{}/overlay_2_test.jpg'.format(tmp_dir.name)
urlretrieve(remote_expected_result, overlay_2_test_file)
def add_random_circles(img, n=50, seed=None):
if seed:
np.random.seed(seed)
for _ in range(n):
color = tuple(np.random.randint(256) for _ in range(3))
center = (np.random.randint(img.shape[1]), np.random.randint(img.shape[0]))
radius = np.random.randint(3, 30)
cv2.circle(img, center, radius, color, -1)
def test_layer_overlay():
black_frame = np.zeros((100, 200, 3), dtype='uint8')
rand_frame = black_frame.copy()
add_random_circles(rand_frame)
black_frame, _ = border_frame(black_frame, border_size=0, border_type='black')
rand_frame, _ = border_frame(rand_frame, border_size=0, border_type='black')
overlay_rand = layer_overlay(rand_frame, black_frame)
overlay_black = layer_overlay(black_frame, rand_frame)
assert np.allclose(overlay_black, black_frame)
assert np.allclose(overlay_rand, rand_frame)
def test_layer_overlay_rotated():
black_frame = np.zeros((100, 200, 3), dtype='uint8')
rand_frame_1 = black_frame.copy()
rand_frame_2 = black_frame.copy()
add_random_circles(rand_frame_1, seed=42)
add_random_circles(rand_frame_2, seed=8675309)
rand_frame_1, _ = border_frame(rand_frame_1, border_size=0, border_type='black')
rand_frame_2, _ = border_frame(rand_frame_2, border_size=0, border_type='black')
rand_frame_2 = imutils.rotate(rand_frame_2, 90)
overlay_1 = layer_overlay(rand_frame_1, rand_frame_2)
overlay_2 = layer_overlay(rand_frame_2, rand_frame_1)
overlay_2_expected = cv2.imread(overlay_2_test_file)
overlay_1 = overlay_1[:, :, :3]
overlay_2 = overlay_2[:, :, :3]
# write/read as jpg to match expected
cv2.imwrite(overlay_2_test_file, overlay_2)
overlay_2 = cv2.imread(overlay_2_test_file)
assert np.allclose(overlay_1, overlay_1)
assert np.allclose(overlay_2, overlay_2_expected)
|
{"hexsha": "df0c7c1878fe1f0fc509a116b888c89678c2a48a", "size": 2304, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_layer_utils.py", "max_stars_repo_name": "supersom/python_video_stab", "max_stars_repo_head_hexsha": "b6edd5cae79c3ef8a554243c55e97d6f945c9527", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_layer_utils.py", "max_issues_repo_name": "supersom/python_video_stab", "max_issues_repo_head_hexsha": "b6edd5cae79c3ef8a554243c55e97d6f945c9527", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_layer_utils.py", "max_forks_repo_name": "supersom/python_video_stab", "max_forks_repo_head_hexsha": "b6edd5cae79c3ef8a554243c55e97d6f945c9527", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3880597015, "max_line_length": 85, "alphanum_fraction": 0.7413194444, "include": true, "reason": "import numpy", "num_tokens": 619}
|
"""
Run modisco
"""
import logging
import matplotlib.pyplot as plt
from argh.decorators import named, arg
import shutil
import pandas as pd
import os
from collections import OrderedDict
from tqdm import tqdm
from pathlib import Path
from bpnet.utils import write_pkl, render_ipynb, remove_exists, add_file_logging, create_tf_session, pd_first_cols
from bpnet.cli.contrib import ContribFile
from bpnet.cli.train import _get_gin_files, log_gin_config
from bpnet.modisco.files import ModiscoFile
from bpnet.utils import write_json, read_json
import gin
import numpy as np
import inspect
filename = inspect.getframeinfo(inspect.currentframe()).filename
this_path = os.path.dirname(os.path.abspath(filename))
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# --------------------------------------------
# load functions for the modisco directory
def load_included_samples(modisco_dir):
return np.load(os.path.join(modisco_dir, "modisco-run.subset-contrib-file.npy"))
def load_ranges(modisco_dir):
modisco_dir = Path(modisco_dir)
included_samples = load_included_samples(modisco_dir)
kwargs = read_json(modisco_dir / "modisco-run.kwargs.json")
d = ContribFile(kwargs["contrib_file"], included_samples)
df = d.get_ranges()
d.close()
return df
def load_contrib_type(modisco_kwargs):
"""Load the contrib_wildcard contribution score
"""
# use the first one as the default
contrib_types = [wildcard.split("/", maxsplit=1)[1]
for wildcard in modisco_kwargs['contrib_wildcard'].split(",")]
if not len(set(contrib_types)):
contrib_wildcard = modisco_kwargs['contrib_wildcard']
logger.warn(f"contrib_wildcard: {contrib_wildcard} contains multiple contrib_types. "
"Current code can by default only handle a single one.")
contrib_type = contrib_types[0]
return contrib_type
def get_nonredundant_example_idx(ranges, width=200):
"""Get non - overlapping intervals(in the central region)
Args:
ranges: pandas.DataFrame returned by bpnet.cli.modisco.load_ranges
width: central region considered that should not overlap between
any interval
"""
from pybedtools import BedTool
from bpnet.preproc import resize_interval
# 1. resize ranges
ranges['example_idx'] = np.arange(len(ranges)) # make sure
r = ranges[['chrom', 'start', 'end', 'example_idx']] # add also the strand information
if width is not None:
r = resize_interval(r, width, ignore_strand=True)
bt = BedTool.from_dataframe(r)
btm = bt.sort().merge()
df = btm.to_dataframe()
df = df[(df.end - df.start) < width * 2]
r_overlaps = bt.intersect(BedTool.from_dataframe(df), wb=True).to_dataframe()
keep_idx = r_overlaps.drop_duplicates(['score', 'strand', 'thickStart'])['name'].astype(int)
return keep_idx
# --------------------------------------------
@gin.configurable
def modisco_run(output_path, # specified by bpnet_modisco_run
task_names,
contrib_scores,
hypothetical_contribs,
one_hot,
null_per_pos_scores,
# specified by gin-config
workflow=gin.REQUIRED, # TfModiscoWorkflow
report=None): # reports to use
"""
Args:
workflow: TfModiscoWorkflow objects
report: path to the report ipynb
"""
import h5py
modisco_results = workflow(task_names=task_names,
contrib_scores=contrib_scores,
hypothetical_contribs=hypothetical_contribs,
one_hot=one_hot,
null_per_pos_scores=null_per_pos_scores)
# save the results
logger.info(f"Saving modisco file to {output_path}")
grp = h5py.File(output_path)
modisco_results.save_hdf5(grp)
grp.flush()
grp.close()
if report is not None:
if report is not None:
report = os.path.abspath(os.path.expanduser(report))
if not os.path.exists(report):
raise ValueError(f"Report file {report} doesn't exist")
logger.info("Running the report")
# Run the jupyter notebook
report_path = os.path.join(os.path.dirname(output_path), os.path.basename(report))
render_ipynb(report,
report_path,
params=dict(modisco_file=output_path,
modisco_dir=os.path.dirname(output_path)))
logger.info(f"Done rendering the report file: {report_path}")
@named("modisco-run")
@arg('contrib_file',
help='path to the hdf5 file containing contribution scores')
@arg('output_dir',
help='output file directory')
@arg('--null-contrib-file',
help='Path to the null contribution scores')
@arg('--premade',
help='pre-made config file specifying modisco hyper-paramters to use.')
@arg('--config',
help='gin config file path(s) specifying the modisco workflow parameters.'
' Parameters specified here override the --premade parameters. Multiple '
'config files can be separated by comma separation (i.e. --config=file1.gin,file2.gin)')
@arg('--override',
help='semi-colon separated list of additional gin bindings to use')
@arg("--contrib-wildcard",
help="Wildcard of the contribution scores to use for running modisco. For example, */profile/wn computes"
"uses the profile contribution scores for all the tasks (*) using the wn normalization (see bpnet.heads.py)."
"*/counts/pre-act uses the total count contribution scores for all tasks w.r.t. the pre-activation output "
"of prediction heads. Multiple wildcards can be by comma-separating them.")
@arg('--only-task-regions',
help='If specified, only the contribution scores from regions corresponding to the tasks specified '
'in --contrib-wildcard will be used. For example, if dataspec.yml contained Oct4 and Sox2 peaks when '
'generating the contrib_file and `--contrib-wildcard=Oct4/profile/wn`, then modisco will be only ran '
'in the Oct4 peaks. If `--contrib-wildcard=Oct4/profile/wn,Sox2/profile/wn` or `--contrib-wildcard=*/profile/wn`, '
'then peaks of both Sox2 and Oct4 will be used.')
@arg('--filter-npy',
help='File path to the .npz file containing a boolean one-dimensional numpy array of the same length'
'as the contrib_file. Modisco will be ran on a subset of regions in the contrib_file '
'where this array has value=True.')
@arg('--exclude-chr',
help='Comma-separated list of chromosomes to exclude.')
@arg('--num-workers',
help='number of workers to use in parallel for running modisco')
@arg('--gpu',
help='which gpu to use. Example: gpu=1')
@arg('--memfrac-gpu',
help='what fraction of the GPU memory to use')
@arg('--overwrite',
help='If True, the output files will be overwritten if they already exist.')
def bpnet_modisco_run(contrib_file,
output_dir,
null_contrib_file=None,
premade='modisco-50k',
config=None,
override='',
contrib_wildcard="*/profile/wn", # on which contribution scores to run modisco
only_task_regions=False,
filter_npy=None,
exclude_chr="",
num_workers=10,
gpu=None, # no need to use a gpu by default
memfrac_gpu=0.45,
overwrite=False,
):
"""Run TF-MoDISco on the contribution scores stored in the contribution score file
generated by `bpnet contrib`.
"""
add_file_logging(output_dir, logger, 'modisco-run')
if gpu is not None:
logger.info(f"Using gpu: {gpu}, memory fraction: {memfrac_gpu}")
create_tf_session(gpu, per_process_gpu_memory_fraction=memfrac_gpu)
else:
# Don't use any GPU's
os.environ['CUDA_VISIBLE_DEVICES'] = ''
os.environ['MKL_THREADING_LAYER'] = 'GNU'
import modisco
assert '/' in contrib_wildcard
if filter_npy is not None:
filter_npy = os.path.abspath(str(filter_npy))
if config is not None:
config = os.path.abspath(str(config))
# setup output file paths
output_path = os.path.abspath(os.path.join(output_dir, "modisco.h5"))
remove_exists(output_path, overwrite=overwrite)
output_filter_npy = os.path.abspath(os.path.join(output_dir, 'modisco-run.subset-contrib-file.npy'))
remove_exists(output_filter_npy, overwrite=overwrite)
kwargs_json_file = os.path.join(output_dir, "modisco-run.kwargs.json")
remove_exists(kwargs_json_file, overwrite=overwrite)
if config is not None:
config_output_file = os.path.join(output_dir, 'modisco-run.input-config.gin')
remove_exists(config_output_file, overwrite=overwrite)
shutil.copyfile(config, config_output_file)
# save the hyper-parameters
write_json(dict(contrib_file=os.path.abspath(contrib_file),
output_dir=str(output_dir),
null_contrib_file=null_contrib_file,
config=str(config),
override=override,
contrib_wildcard=contrib_wildcard,
only_task_regions=only_task_regions,
filter_npy=str(filter_npy),
exclude_chr=exclude_chr,
num_workers=num_workers,
overwrite=overwrite,
output_filter_npy=output_filter_npy,
gpu=gpu,
memfrac_gpu=memfrac_gpu),
kwargs_json_file)
# setup the gin config using premade, config and override
cli_bindings = [f'num_workers={num_workers}']
gin.parse_config_files_and_bindings(_get_gin_files(premade, config),
bindings=cli_bindings + override.split(";"),
# NOTE: custom files were inserted right after
# ther user's config file and before the `override`
# parameters specified at the command-line
skip_unknown=False)
log_gin_config(output_dir, prefix='modisco-run.')
# --------------------------------------------
# load the contribution file
logger.info(f"Loading the contribution file: {contrib_file}")
cf = ContribFile(contrib_file)
tasks = cf.get_tasks()
# figure out subset_tasks
subset_tasks = set()
for w in contrib_wildcard.split(","):
task, head, head_summary = w.split("/")
if task == '*':
subset_tasks = None
else:
if task not in tasks:
raise ValueError(f"task {task} not found in tasks: {tasks}")
subset_tasks.add(task)
if subset_tasks is not None:
subset_tasks = list(subset_tasks)
# --------------------------------------------
# subset the intervals
logger.info(f"Loading ranges")
ranges = cf.get_ranges()
# include all samples at the beginning
include_samples = np.ones(len(cf)).astype(bool)
# --only-task-regions
if only_task_regions:
if subset_tasks is None:
logger.warn("contrib_wildcard contains all tasks (specified by */<head>/<summary>). Not using --only-task-regions")
elif np.all(ranges['interval_from_task'] == ''):
raise ValueError("Contribution file wasn't created from multiple set of peaks. "
"E.g. interval_from_task='' for all ranges. Please disable --only-task-regions")
else:
logger.info(f"Subsetting ranges according to `interval_from_task`")
include_samples = include_samples & ranges['interval_from_task'].isin(subset_tasks).values
logger.info(f"Using {include_samples.sum()} / {len(include_samples)} regions after --only-task-regions subset")
# --exclude-chr
if exclude_chr:
logger.info(f"Excluding chromosomes: {exclude_chr}")
chromosomes = ranges['chr']
include_samples = include_samples & (~pd.Series(chromosomes).isin(exclude_chr)).values
logger.info(f"Using {include_samples.sum()} / {len(include_samples)} regions after --exclude-chr subset")
# -- filter-npy
if filter_npy is not None:
print(f"Loading a filter file from {filter_npy}")
include_samples = include_samples & np.load(filter_npy)
logger.info(f"Using {include_samples.sum()} / {len(include_samples)} regions after --filter-npy subset")
# store the subset-contrib-file.npy
logger.info(f"Saving the included samples from ContribFile to {output_filter_npy}")
np.save(output_filter_npy, include_samples)
# --------------------------------------------
# convert to indices
idx = np.arange(len(include_samples))[include_samples]
seqs = cf.get_seq(idx=idx)
# fetch the contribution scores from the importance score file
# expand * to use all possible values
# TODO - allow this to be done also for all the heads?
hyp_contrib = {}
task_names = []
for w in contrib_wildcard.split(","):
wc_task, head, head_summary = w.split("/")
if task == '*':
use_tasks = tasks
else:
use_tasks = [wc_task]
for task in use_tasks:
key = f"{task}/{head}/{head_summary}"
task_names.append(key)
hyp_contrib[key] = cf._subset(cf.data[f'/hyp_contrib/{key}'], idx=idx)
contrib = {k: v * seqs for k, v in hyp_contrib.items()}
if null_contrib_file is not None:
logger.info(f"Using null-contrib-file: {null_contrib_file}")
null_cf = ContribFile(null_contrib_file)
null_seqs = null_cf.get_seq()
null_per_pos_scores = {key: null_seqs * null_cf.data[f'/hyp_contrib/{key}'][:]
for key in task_names}
else:
# default Null distribution. Requires modisco 5.0
logger.info(f"Using default null_contrib_scores")
null_per_pos_scores = modisco.coordproducers.LaplaceNullDist(num_to_samp=10000)
# run modisco.
# NOTE: `workflow` and `report` parameters are provided by gin config files
modisco_run(task_names=task_names,
output_path=output_path,
contrib_scores=contrib,
hypothetical_contribs=hyp_contrib,
one_hot=seqs,
null_per_pos_scores=null_per_pos_scores)
logger.info(f"bpnet modisco-run finished. modisco.h5 and other files can be found in: {output_dir}")
def modisco_plot(modisco_dir,
output_dir,
# filter_npy=None,
# ignore_dist_filter=False,
heatmap_width=200,
figsize=(10, 10), contribsf=None):
"""Plot the results of a modisco run
Args:
modisco_dir: modisco directory
output_dir: Output directory for writing the results
figsize: Output figure size
contribsf: [optional] modisco contribution score file (ContribFile)
"""
plt.switch_backend('agg')
add_file_logging(output_dir, logger, 'modisco-plot')
from bpnet.plot.vdom import write_heatmap_pngs
from bpnet.plot.profiles import plot_profiles
from bpnet.utils import flatten
output_dir = Path(output_dir)
output_dir.parent.mkdir(parents=True, exist_ok=True)
# load modisco
mf = ModiscoFile(f"{modisco_dir}/modisco.h5")
if contribsf is not None:
d = contribsf
else:
d = ContribFile.from_modisco_dir(modisco_dir)
logger.info("Loading the contribution scores")
d.cache() # load all
thr_one_hot = d.get_seq()
# thr_hypothetical_contribs
tracks = d.get_profiles()
thr_hypothetical_contribs = dict()
thr_contrib_scores = dict()
# TODO - generalize this
thr_hypothetical_contribs['profile'] = d.get_hyp_contrib()
thr_contrib_scores['profile'] = d.get_contrib()
tasks = d.get_tasks()
# Count contribution (if it exists)
if d.contains_contrib_score("counts/pre-act"):
count_contrib_score = "counts/pre-act"
thr_hypothetical_contribs['count'] = d.get_hyp_contrib(contrib_score=count_contrib_score)
thr_contrib_scores['count'] = d.get_contrib(contrib_score=count_contrib_score)
elif d.contains_contrib_score("count"):
count_contrib_score = "count"
thr_hypothetical_contribs['count'] = d.get_hyp_contrib(contrib_score=count_contrib_score)
thr_contrib_scores['count'] = d.get_contrib(contrib_score=count_contrib_score)
else:
# Don't do anything
pass
thr_hypothetical_contribs = OrderedDict(flatten(thr_hypothetical_contribs, separator='/'))
thr_contrib_scores = OrderedDict(flatten(thr_contrib_scores, separator='/'))
# -------------------------------------------------
all_seqlets = mf.seqlets()
all_patterns = mf.pattern_names()
if len(all_patterns) == 0:
print("No patterns found")
return
# 1. Plots with tracks and contrib scores
print("Writing results for contribution scores")
plot_profiles(all_seqlets,
thr_one_hot,
tracks=tracks,
contribution_scores=thr_contrib_scores,
legend=False,
flip_neg=True,
rotate_y=0,
seq_height=.5,
patterns=all_patterns,
n_bootstrap=100,
fpath_template=str(output_dir / "{pattern}/agg_profile_contribcores"),
mkdir=True,
figsize=figsize)
# 2. Plots only with hypothetical contrib scores
print("Writing results for hypothetical contribution scores")
plot_profiles(all_seqlets,
thr_one_hot,
tracks={},
contribution_scores=thr_hypothetical_contribs,
legend=False,
flip_neg=True,
rotate_y=0,
seq_height=1,
patterns=all_patterns,
n_bootstrap=100,
fpath_template=str(output_dir / "{pattern}/agg_profile_hypcontribscores"),
figsize=figsize)
print("Plotting heatmaps")
for pattern in tqdm(all_patterns):
write_heatmap_pngs(all_seqlets[pattern],
d,
tasks,
pattern,
output_dir=str(output_dir / pattern),
resize_width=heatmap_width)
mf.close()
def cwm_scan_seqlets(modisco_dir,
output_file,
trim_frac=0.08,
num_workers=1,
contribsf=None,
verbose=False):
"""Compute the cwm scanning scores of the original modisco seqlets
"""
from bpnet.modisco.table import ModiscoData
os.makedirs(os.path.dirname(output_file), exist_ok=True)
add_file_logging(os.path.dirname(output_file), logger, 'cwm_scan_seqlets')
# figure out contrib_wildcard
mf = ModiscoFile(modisco_dir / "modisco.h5")
if contribsf is None:
contrib = ContribFile.from_modisco_dir(modisco_dir)
else:
contrib = contribsf
tasks = mf.tasks()
# HACK prune the tasks of contribution (in case it's present)
tasks = [t.split("/")[0] for t in tasks]
dfi_list = []
for pattern_name in tqdm(mf.pattern_names()):
pattern = mf.get_pattern(pattern_name).trim_seq_ic(trim_frac)
seqlets = mf._get_seqlets(pattern_name, trim_frac=trim_frac)
# scan only the existing locations of the seqlets instead of the full sequences
# to obtain the distribution
stacked_seqlets = contrib.extract(seqlets)
match, contribution = pattern.scan_contribution(stacked_seqlets.contrib, hyp_contrib=None, tasks=tasks,
n_jobs=num_workers, verbose=False, pad_mode=None)
seq_match = pattern.scan_seq(stacked_seqlets.seq, n_jobs=num_workers, verbose=False, pad_mode=None)
dfm = pattern.get_instances(tasks, match, contribution, seq_match, fdr=1, verbose=verbose, plot=verbose)
dfm = dfm[dfm.seq_match > 0]
dfi_list.append(dfm)
df = pd.concat(dfi_list)
df.to_csv(output_file)
# TODO - rename centroid_seqlet_matches?
# TODO - rename pssm to pfm or pwm?
@arg('modisco_dir',
help='modisco directory - used to obtain (optionally centroid_seqlet_matches.csv.gz), modisco.h5, contrib-wildcard')
@arg('output_file',
help='Output file path. File format will depend on the file suffix. '
'Available suffixes are: .parq (Parquet file), .csv, .csv.gz, .tsv, .tsv.gz, .bed, .bed.gz. '
'NOTE: when using .bed or .bed.gz, only the following 7 columns are written: '
'chromosome, start, end, pattern, contrib_weighted_p, strand, match_weighted_p')
@arg('--trim-frac',
help='How much to trim the pattern when scanning for motif instances. See also `bpnet.modisco.utils.trim_pssm_idx`')
@arg('--patterns',
help='Comma separated list of patterns for which to run CWM scanning')
@arg('--filters',
help='Filters to apply. Specify empty string `--filters=""` for no filters.')
@arg('--contrib-file',
help='Optional file path to the contribution score file. If not specified, '
'the contribution score file used in `bpnet modisco-run` will be used by default.')
@arg('--add-profile-features',
help='Add profile features at the location of motif matches such as the maximum number of counts.')
@arg('--num-workers',
help='Number of workers to use in parallel for cwm scanning.')
def cwm_scan(modisco_dir,
output_file,
trim_frac=0.08,
patterns='all',
filters='match_weighted_p>=.2,contrib_weighted_p>=.01',
contrib_file=None,
add_profile_features=False,
num_workers=10):
"""Get motif instances via CWM scanning.
"""
from bpnet.modisco.utils import longer_pattern, shorten_pattern
from bpnet.modisco.pattern_instances import annotate_profile_single
add_file_logging(os.path.dirname(output_file), logger, 'cwm-scan')
modisco_dir = Path(modisco_dir)
valid_suffixes = [
'.csv',
'.csv.gz',
'.tsv',
'.tsv.gz',
'.parq',
'.bed',
'.bed.gz',
]
if not any([str(output_file).endswith(suffix) for suffix in valid_suffixes]):
raise ValueError(f"output_file doesn't have a valid file suffix. Valid file suffixes are: {valid_suffixes}")
# Centroid matches path
cm_path = modisco_dir / f'cwm-scan-seqlets.trim-frac={trim_frac:.2f}.csv.gz'
# save the hyper-parameters
kwargs_json_file = os.path.join(os.path.dirname(output_file), 'cwm-scan.kwargs.json')
write_json(dict(modisco_dir=os.path.abspath(str(contrib_file)),
output_file=str(output_file),
cwm_scan_seqlets_path=str(cm_path),
trim_frac=trim_frac,
patterns=patterns,
filters=filters,
contrib_file=contrib_file,
add_profile_features=add_profile_features,
num_workers=num_workers),
str(kwargs_json_file))
# figure out contrib_wildcard
modisco_kwargs = read_json(os.path.join(modisco_dir, "modisco-run.kwargs.json"))
contrib_type = load_contrib_type(modisco_kwargs)
mf = ModiscoFile(modisco_dir / "modisco.h5")
tasks = mf.tasks()
# HACK prune the tasks of contribution (in case it's present)
tasks = [t.split("/")[0] for t in tasks]
logger.info(f"Using tasks: {tasks}")
if contrib_file is None:
cf = ContribFile.from_modisco_dir(modisco_dir)
cf.cache() # cache it since it can be re-used in `modisco_centroid_seqlet_matches`
else:
logger.info(f"Loading the contribution scores from: {contrib_file}")
cf = ContribFile(contrib_file, default_contrib_score=contrib_type)
if not cm_path.exists():
logger.info(f"Generating centroid matches to {cm_path.resolve()}")
cwm_scan_seqlets(modisco_dir,
output_file=cm_path,
trim_frac=trim_frac,
contribsf=cf if contrib_file is None else None,
num_workers=num_workers,
verbose=False)
else:
logger.info("Centroid matches already exist.")
logger.info(f"Loading centroid matches from {cm_path.resolve()}")
dfm_norm = pd.read_csv(cm_path)
# get the raw data
seq, contrib, ranges = cf.get_seq(), cf.get_contrib(), cf.get_ranges()
logger.info("Scanning for patterns")
dfl = []
# patterns to scan. `longer_pattern` makes sure the patterns are in the long format
scan_patterns = patterns.split(",") if patterns is not 'all' else mf.pattern_names()
scan_patterns = [longer_pattern(pn) for pn in scan_patterns]
if add_profile_features:
profile = cf.get_profiles()
logger.info("Profile features will also be added to dfi")
for pattern_name in tqdm(mf.pattern_names()):
if pattern_name not in scan_patterns:
# skip scanning that patterns
continue
pattern = mf.get_pattern(pattern_name).trim_seq_ic(trim_frac)
match, contribution = pattern.scan_contribution(contrib, hyp_contrib=None, tasks=tasks,
n_jobs=num_workers, verbose=False)
seq_match = pattern.scan_seq(seq, n_jobs=num_workers, verbose=False)
dfm = pattern.get_instances(tasks, match, contribution, seq_match,
norm_df=dfm_norm[dfm_norm.pattern == pattern_name],
verbose=False, plot=False)
for filt in filters.split(","):
if len(filt) > 0:
dfm = dfm.query(filt)
if add_profile_features:
dfm = annotate_profile_single(dfm, pattern_name, mf, profile,
profile_width=70,
trim_frac=trim_frac)
dfm['pattern_short'] = shorten_pattern(pattern_name)
# TODO - is it possible to write out the results incrementally?
dfl.append(dfm)
logger.info("Merging")
# merge and write the results
dfp = pd.concat(dfl)
# append the ranges
logger.info("Append ranges")
ranges.columns = ["example_" + v for v in ranges.columns]
dfp = dfp.merge(ranges, on="example_idx", how='left')
# add the absolute coordinates
dfp['pattern_start_abs'] = dfp['example_start'] + dfp['pattern_start']
dfp['pattern_end_abs'] = dfp['example_start'] + dfp['pattern_end']
logger.info("Table info")
dfp.info()
logger.info(f"Writing the resuling pd.DataFrame of shape {dfp.shape} to {output_file}")
# set the first 7 columns to comply to bed6 format (chrom, start, end, name, score, strand, ...)
bed_columns = ['example_chrom', 'pattern_start_abs', 'pattern_end_abs',
'pattern', 'contrib_weighted_p', 'strand', 'match_weighted_p']
dfp = pd_first_cols(dfp, bed_columns)
# write to a parquet file
if output_file.endswith(".parq"):
logger.info("Writing a parquet file")
dfp.to_parquet(output_file, partition_on=['pattern_short'], engine='fastparquet')
elif output_file.endswith(".csv.gz") or output_file.endswith(".csv"):
logger.info("Writing a csv file")
dfp.to_csv(output_file, compression='infer', index=False)
elif output_file.endswith(".tsv.gz") or output_file.endswith(".tsv"):
logger.info("Writing a tsv file")
dfp.to_csv(output_file, sep='\t', compression='infer', index=False)
elif output_file.endswith(".bed.gz") or output_file.endswith(".bed"):
logger.info("Writing a BED file")
# write only the first (and main) 7 columns
dfp[bed_columns].to_csv(output_file, sep='\t', compression='infer', index=False, header=False)
else:
logger.warn("File suffix not recognized. Using .csv.gz file format")
dfp.to_csv(output_file, compression='gzip', index=False)
logger.info("Done!")
def modisco_report(modisco_dir, output_dir):
render_ipynb(os.path.join(this_path, "../templates/modisco-chip.ipynb"),
os.path.join(output_dir, "modisco-chip.ipynb"),
params=dict(modisco_dir=modisco_dir))
@arg('modisco_dir',
help='directory path `output_dir` in `bpnet.cli.modisco.modisco_run` contains: '
'modisco.h5, modisco-run.subset-contrib-file.npy, modisco-run.kwargs.json')
@arg("output_dir",
help='output directory where to store the output bed files')
@arg('--trim-frac',
help='How much to trim the pattern when scanning for motif instances. '
'See also `bpnet.modisco.utils.trim_pssm_idx`')
def modisco_export_seqlets(modisco_dir, output_dir, trim_frac=0.08):
from pybedtools import Interval
from bpnet.modisco.files import ModiscoFile
add_file_logging(output_dir, logger, 'modisco_export_seqlets')
ranges = load_ranges(modisco_dir)
example_intervals = [Interval(row.chrom, row.start, row.end)
for i, row in ranges.iterrows()]
r = ModiscoFile(os.path.join(modisco_dir, "modisco.h5"))
r.export_seqlets_bed(output_dir,
example_intervals=example_intervals,
position='absolute',
trim_frac=trim_frac)
r.close()
def modisco_table(modisco_dir, contrib_scores, output_dir, report_url=None, contribsf=None,
footprint_width=200):
"""Write the pattern table to as .html and .csv
"""
plt.switch_backend('agg')
from bpnet.modisco.table import ModiscoData, modisco_table, write_modisco_table
from bpnet.modisco.motif_clustering import hirearchically_reorder_table
add_file_logging(output_dir, logger, 'modisco-table')
print("Loading required data")
data = ModiscoData.load(modisco_dir, contrib_scores, contribsf=contribsf, footprint_width=footprint_width)
print("Generating the table")
df = modisco_table(data)
print("Writing the results")
write_modisco_table(df, output_dir, report_url, 'pattern_table')
print("Writing clustered table")
write_modisco_table(hirearchically_reorder_table(df, data.tasks),
output_dir, report_url, 'pattern_table.sorted')
print("Writing footprints")
profiles = OrderedDict([(pattern, {task: data.get_profile_wide(pattern, task).mean(axis=0)
for task in data.tasks})
for pattern in data.mf.pattern_names()])
write_pkl(profiles, Path(output_dir) / 'footprints.pkl')
print("Done!")
# def modisco_enrich_patterns(patterns_pkl_file, modisco_dir, output_file, contribsf=None):
# """Add stacked_seqlet_contrib to pattern `attrs`
# Args:
# patterns_pkl: patterns.pkl file path
# modisco_dir: modisco directory containing
# output_file: output file path for patterns.pkl
# """
# from bpnet.utils import read_pkl, write_pkl
# from bpnet.cli.contrib import ContribFile
# logger.info("Loading patterns")
# modisco_dir = Path(modisco_dir)
# patterns = read_pkl(patterns_pkl_file)
# mf = ModiscoFile(modisco_dir / 'modisco.h5')
# if contribsf is None:
# contrib_file = ContribFile.from_modisco_dir(modisco_dir)
# logger.info("Loading ContribFile into memory")
# contrib_file.cache()
# else:
# logger.info("Using the provided ContribFile")
# contrib_file = contribsf
# logger.info("Extracting profile and contribution scores")
# extended_patterns = []
# for p in tqdm(patterns):
# p = p.copy()
# profile_width = p.len_profile()
# # get the shifted seqlets
# seqlets = [s.pattern_align(**p.attrs['align']) for s in mf._get_seqlets(p.name)]
# # keep only valid seqlets
# valid_seqlets = [s for s in seqlets
# if s.valid_resize(profile_width, contrib_file.get_seqlen() + 1)]
# # extract the contribution scores
# p.attrs['stacked_seqlet_contrib'] = contrib_file.extract(valid_seqlets, profile_width=profile_width)
# p.attrs['n_seqlets'] = mf.n_seqlets(p.name)
# extended_patterns.append(p)
# write_pkl(extended_patterns, output_file)
def modisco_export_patterns(modisco_dir, output_file, contribsf=None):
"""Export patterns to a pkl file. Don't cluster them
Adds `stacked_seqlet_contrib` and `n_seqlets` to pattern `attrs`
Args:
modisco_dir: modisco directory containing
output_file: output file path for patterns.pkl
"""
from bpnet.cli.contrib import ContribFile
logger.info("Loading patterns")
modisco_dir = Path(modisco_dir)
mf = ModiscoFile(modisco_dir / 'modisco.h5')
patterns = [mf.get_pattern(pname)
for pname in mf.pattern_names()]
if contribsf is None:
contrib_file = ContribFile.from_modisco_dir(modisco_dir)
logger.info("Loading ContribFile into memory")
contrib_file.cache()
else:
logger.info("Using the provided ContribFile")
contrib_file = contribsf
logger.info("Extracting profile and contribution scores")
extended_patterns = []
for p in tqdm(patterns):
p = p.copy()
# get seqlets
valid_seqlets = mf._get_seqlets(p.name)
# extract the contribution scores
sti = contrib_file.extract(valid_seqlets, profile_width=None)
sti.dfi = mf.get_seqlet_intervals(p.name, as_df=True)
p.attrs['stacked_seqlet_contrib'] = sti
p.attrs['n_seqlets'] = mf.n_seqlets(p.name)
extended_patterns.append(p)
write_pkl(extended_patterns, output_file)
@arg('modisco_dir',
help='directory path `output_dir` in `bpnet.cli.modisco.modisco_run` contains: '
'modisco.h5, modisco-run.subset-contrib-file.npy, modisco-run.kwargs.json')
@arg('--trim-frac',
help='How much to trim the pattern when scanning for motif instances. '
'See also `bpnet.modisco.utils.trim_pssm_idx`')
@arg('--num-workers',
help='number of workers to use in parallel for running modisco')
@arg('--run-cwm-scan',
help='if True, cwm scanning will be ran')
@arg('--force',
help='if True, commands will be re-run regardless of whether whey have already been computed')
@arg('--footprint-width',
help='Width of the footprint to consider when showing heatmaps or when computing the footprint scores')
def chip_nexus_analysis(modisco_dir, trim_frac=0.08, num_workers=20, run_cwm_scan=False, force=False,
footprint_width=200):
"""Compute all the results for modisco specific for ChIP-nexus/exo data. Runs:
- modisco_plot
- modisco_report
- modisco_table
- modisco_export_patterns
- cwm_scan
- modisco_export_seqlets
Note:
All the sub-commands are only executed if they have not been ran before. Use --force override this.
Whether the commands have been run before is deterimined by checking if the following file exists:
`{modisco_dir}/.modisco_report_all/{command}.done`.
"""
plt.switch_backend('agg')
from bpnet.utils import ConditionalRun
modisco_dir = Path(modisco_dir)
# figure out the contribution scores used
kwargs = read_json(modisco_dir / "modisco-run.kwargs.json")
contrib_scores = kwargs["contrib_file"]
mf = ModiscoFile(f"{modisco_dir}/modisco.h5")
all_patterns = mf.pattern_names()
mf.close()
if len(all_patterns) == 0:
print("No patterns found.")
# Touch modisco-chip.html for snakemake
open(modisco_dir / 'modisco-chip.html', 'a').close()
open(modisco_dir / 'seqlets/scored_regions.bed', 'a').close()
return
# class determining whether to run the command or not (poor-man's snakemake)
cr = ConditionalRun("modisco_report_all", None, modisco_dir, force=force)
sync = []
# --------------------------------------------
if (not cr.set_cmd('modisco_plot').done()
or not cr.set_cmd('modisco_enrich_patterns').done()):
# load ContribFile and pass it to all the functions
logger.info("Loading ContribFile")
contribsf = ContribFile.from_modisco_dir(modisco_dir)
contribsf.cache()
else:
contribsf = None
# --------------------------------------------
# Basic reports
if not cr.set_cmd('modisco_plot').done():
modisco_plot(modisco_dir,
modisco_dir / 'plots',
heatmap_width=footprint_width,
figsize=(10, 10), contribsf=contribsf)
cr.write()
sync.append("plots")
if not cr.set_cmd('modisco_report').done():
modisco_report(str(modisco_dir), str(modisco_dir))
cr.write()
sync.append("modisco-chip.html")
# Export bed-files and bigwigs
# Seqlets
if not cr.set_cmd('modisco_export_seqlets').done():
modisco_export_seqlets(str(modisco_dir), str(modisco_dir / 'seqlets'), trim_frac=trim_frac)
cr.write()
sync.append("seqlets")
if not cr.set_cmd('modisco_export_patterns').done():
modisco_export_patterns(modisco_dir,
output_file=modisco_dir / 'patterns.pkl',
contribsf=contribsf)
cr.write()
sync.append("patterns.pkl")
# --------------------------------------------
# Finding new instances
if run_cwm_scan:
if not cr.set_cmd('cwm_scan').done():
cwm_scan(modisco_dir,
modisco_dir / 'instances.bed.gz',
trim_frac=trim_frac,
contrib_file=None,
num_workers=num_workers)
cr.write()
if not cr.set_cmd('modisco_table').done():
modisco_table(modisco_dir, contrib_scores, modisco_dir, report_url=None, contribsf=contribsf,
footprint_width=footprint_width)
cr.write()
sync.append("footprints.pkl")
sync.append("pattern_table.*")
# --------------------------------------------
# print the rsync command to run in order to sync the output
# directories to the webserver
logger.info("Run the following command to sync files to the webserver")
dirs = " ".join(sync)
print(f"rsync -av --progress {dirs} <output_dir>/")
|
{"hexsha": "38092f5ee909490627af28f4333e5b060dc49f01", "size": 38834, "ext": "py", "lang": "Python", "max_stars_repo_path": "bpnet/cli/modisco.py", "max_stars_repo_name": "mlweilert/bpnet", "max_stars_repo_head_hexsha": "dcc9e8d805f9de774ae9dcc62c20504915be614f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 93, "max_stars_repo_stars_event_min_datetime": "2019-08-15T19:49:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T08:23:44.000Z", "max_issues_repo_path": "bpnet/cli/modisco.py", "max_issues_repo_name": "mlweilert/bpnet", "max_issues_repo_head_hexsha": "dcc9e8d805f9de774ae9dcc62c20504915be614f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2019-08-15T15:44:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T06:56:07.000Z", "max_forks_repo_path": "bpnet/cli/modisco.py", "max_forks_repo_name": "mlweilert/bpnet", "max_forks_repo_head_hexsha": "dcc9e8d805f9de774ae9dcc62c20504915be614f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2019-08-29T18:54:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T21:04:46.000Z", "avg_line_length": 41.1377118644, "max_line_length": 127, "alphanum_fraction": 0.6385641448, "include": true, "reason": "import numpy", "num_tokens": 8666}
|
#=
getindex.jl
Indexing support for LinearMapAX
2018-01-19, Jeff Fessler, University of Michigan
=#
# [end]
function Base.lastindex(A::LinearMapAX)
return prod(size(A._lmap))
end
# [?,end] and [end,?]
function Base.lastindex(A::LinearMapAX, d::Int)
return size(A._lmap, d)
end
# A[i,j]
function Base.getindex(A::LinearMapAX, i::Int, j::Int)
T = eltype(A)
e = zeros(T, size(A._lmap,2)); e[j] = one(T)
tmp = A._lmap * e
return tmp[i]
end
# A[:,j]
# it is crucial to provide this function rather than to inherit from
# Base.getindex(A::AbstractArray, ::Colon, ::Int)
# because Base.getindex does this by iterating (I think).
function Base.getindex(A::LinearMapAX, ::Colon, j::Int)
T = eltype(A)
e = zeros(T, size(A,2)); e[j] = one(T)
return A._lmap * e
end
# A[ii,j]
Base.getindex(A::LinearMapAX, ii::Indexer, j::Int) = A[:,j][ii]
# A[i,jj]
Base.getindex(A::LinearMapAX, i::Int, jj::Indexer) = A[i,:][jj]
# A[:,jj]
# this one is also important for efficiency
Base.getindex(A::LinearMapAX, ::Colon, jj::AbstractVector{Bool}) =
A[:,findall(jj)]
Base.getindex(A::LinearMapAX, ::Colon, jj::Indexer) =
hcat([A[:,j] for j in jj]...)
# A[ii,:]
# trick: cannot use A' for a FunctionMap with no fc
function Base.getindex(A::LinearMapAX, ii::Indexer, ::Colon)
if (:fc in propertynames(A._lmap)) && isnothing(A._lmap.fc)
return hcat([A[ii,j] for j in 1:size(A,2)]...)
else
return A'[:,ii]'
end
end
# A[ii,jj]
Base.getindex(A::LinearMapAX, ii::Indexer, jj::Indexer) = A[:,jj][ii,:]
# A[k]
function Base.getindex(A::LinearMapAX, k::Int)
c = CartesianIndices(size(A._lmap))[k] # is there a more elegant way?
return A[c[1], c[2]]
end
# A[kk]
Base.getindex(A::LinearMapAX, kk::AbstractVector{Bool}) = A[findall(kk)]
Base.getindex(A::LinearMapAX, kk::Indexer) = [A[k] for k in kk]
# A[i,:]
# trick: one row slice returns a 1D ("column") vector
Base.getindex(A::LinearMapAX, i::Int, ::Colon) = A[[i],:][:]
# A[:,:] = Matrix(A)
Base.getindex(A::LinearMapAX, ::Colon, ::Colon) = Matrix(A._lmap)
# A[:]
Base.getindex(A::LinearMapAX, ::Colon) = A[:,:][:]
|
{"hexsha": "432dd9e41c1df6a977da70eccd293a564e761db6", "size": 2125, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/getindex.jl", "max_stars_repo_name": "JeffFessler/LinearMapsAA.jl", "max_stars_repo_head_hexsha": "65d7fd4eaf0558988ae23ccad2a547778c6111e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-08-09T10:32:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T02:26:33.000Z", "max_issues_repo_path": "src/getindex.jl", "max_issues_repo_name": "JeffFessler/LinearMapsAA.jl", "max_issues_repo_head_hexsha": "65d7fd4eaf0558988ae23ccad2a547778c6111e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2019-08-15T03:31:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-08T03:02:37.000Z", "max_forks_repo_path": "src/getindex.jl", "max_forks_repo_name": "JeffFessler/LinearMapsAA.jl", "max_forks_repo_head_hexsha": "65d7fd4eaf0558988ae23ccad2a547778c6111e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:22:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-21T16:15:22.000Z", "avg_line_length": 26.2345679012, "max_line_length": 73, "alphanum_fraction": 0.6343529412, "num_tokens": 723}
|
"""
File: pylinex/loglikelihood/LinearTruncationLoglikelihood.py
Author: Keith Tauscher
Date: 29 Sep 2018
Description: File containing a class which represents a DIC-like loglikelihood
which uses the number of coefficients to use in each of a number
of bases as the parameters of the likelihood.
"""
import numpy as np
from ..util import create_hdf5_dataset, get_hdf5_value
from ..basis import Basis, BasisSum
from ..fitter import Fitter
from .LoglikelihoodWithData import LoglikelihoodWithData
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class LinearTruncationLoglikelihood(LoglikelihoodWithData):
"""
Class which represents a DIC-like loglikelihood which uses the number of
coefficients to use in each of a number of bases as the parameters of the
likelihood.
"""
def __init__(self, basis_sum, data, error,\
information_criterion='deviance_information_criterion'):
"""
Initializes a new TruncationLoglikelihood with the given basis_sum,
data, and error.
basis_sum: BasisSum containing Basis objects which contain the largest
number of basis vectors allowed.
data: 1D data vector to fit
error: 1D vector of noise level estimates for data
information_criterion: the name of the quantity returned by this
Loglikelihood. Must be a valid property of the
Fitter class from pylinex.
Default: 'deviance_information_criterion'
"""
self.basis_sum = basis_sum
self.data = data
self.error = error
self.information_criterion = information_criterion
@property
def information_criterion(self):
"""
Property storing the string name of the information criterion to return
when called (must be a valid property of the Fitter class from
pylinex).
"""
if not hasattr(self, '_information_criterion'):
raise AttributeError("information_criterion was referenced " +\
"before it was set.")
return self._information_criterion
@information_criterion.setter
def information_criterion(self, value):
"""
Setter for the information criterion returned by this Loglikelihood.
value: string name of a valid property of the Fitter class from pylinex
"""
if isinstance(value, basestring):
self._information_criterion = value
else:
raise TypeError("information_criterion was set to a non-string.")
@property
def basis_sum(self):
"""
Property storing the BasisSum object whose basis vectors will be
used by this object in the fit.
"""
if not hasattr(self, '_basis_sum'):
raise AttributeError("basis_sum was referenced before it was " +\
"set. This shouldn't happen. Something is " +\
"wrong.")
return self._basis_sum
@basis_sum.setter
def basis_sum(self, value):
"""
Allows user to set basis_sum property.
value: BasisSum object or, more generally, a Basis object containing
the basis vectors with which to perform the fit
"""
if isinstance(value, BasisSum):
self._basis_sum = value
elif isinstance(value, Basis):
self._basis_sum = BasisSum('sole', value)
else:
raise TypeError("basis_sum was neither a BasisSum or a " +\
"different Basis object.")
@property
def names(self):
"""
Property storing the names of the component bases at play with this
Loglikelihood.
"""
if not hasattr(self, '_names'):
return [name for name in self.basis_sum.names]
return self._names
@property
def nterms_maxima(self):
"""
Property storing the maximum number of terms in each subbasis.
"""
if not hasattr(self, '_nterms_maxima'):
self._nterms_maxima =\
np.array([self.basis_sum[name].num_basis_vectors\
for name in self.names])
return self._nterms_maxima
def truncated_basis_sum(self, truncations):
"""
Finds the BasisSum corresponding to the given truncations
truncations: array of integers of same length as self.basis_sum_names
returns: a BasisSum object corresponding to the given truncations.
"""
new_bases = [self.basis_sum[name][:truncation]\
for (name, truncation) in zip(self.names, truncations)]
return BasisSum(self.names, new_bases)
@property
def error(self):
"""
Property storing the 1D error vector for fit.
"""
if not hasattr(self, '_error'):
raise AttributeError("error wasn't set before it was " +\
"referenced. Something is wrong. This " +\
"shouldn't happen.")
return self._error
@error.setter
def error(self, value):
"""
Allows user to set the error property.
value: must be a 1D numpy.ndarray with the same length as the basis
vectors.
"""
value = np.array(value)
if value.shape == (self.num_channels,):
self._error = value
else:
raise ValueError(("error was not of the expected shape, " +\
"({0:d},).").format(self.num_channels))
@property
def computed(self):
"""
Property storing the loglikelihood's computed values.
"""
if not hasattr(self, '_computed'):
self._computed = {}
return self._computed
def hash(self, point):
"""
Hashes the given point of integers to summarize it into a single
integer.
point: the point, consisting of integers, to hash into a single integer
returns: single integer, summarizing point
"""
return np.ravel_multi_index(point - 1, self.nterms_maxima)
def unhash(self, hash_value):
"""
Unhashes the given value to give the point which hashed to it.
hash_value: the hash value to which the desired point maps
returns: the point which maps to the given hash value
"""
return np.array(np.unravel_index(hash_value, self.nterms_maxima)) + 1
def __call__(self, point):
"""
Calls this loglikelihood by evaluating the DIC achieved by the
truncations specified by point.
point: array of integers containing truncation numbers
"""
point = point.astype(int)
hash_value = self.hash(point)
if hash_value not in self.computed:
self.computed[hash_value] =\
getattr(Fitter(self.truncated_basis_sum(point), self.data,\
error=self.error), self.information_criterion) / (-2.)
return self.computed[hash_value]
@property
def parameters(self):
"""
Property storing the names of the parameters of the model defined by
this likelihood.
"""
if not hasattr(self, '_parameters'):
self._parameters =\
['{!s}_nterms'.format(name) for name in self.names]
return self._parameters
def fill_hdf5_group(self, group, data_link=None, error_link=None):
"""
Fills the given hdf5 group with information about this Loglikelihood.
group: the group to fill with information about this Loglikelihood
data_link: link to data, if applicable
error_link: link to error, if applicable
"""
group.attrs['class'] = 'TruncationLoglikelihood'
group.attrs['information_criterion'] = self.information_criterion
self.save_data(group, data_link=data_link)
create_hdf5_dataset(group, 'error', data=self.error, link=error_link)
self.basis_sum.fill_hdf5_group(group.create_group('basis_sum'))
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a Loglikelihood object from an hdf5 file group in which it was
previously saved.
group: the hdf5 file group from which to load a Loglikelihood object
returns: the Loglikelihood object loaded from the given hdf5 file group
"""
group.attrs['class'] = 'LinearTruncationLoglikelihood'
data = LoglikelihoodWithData.load_data(group)
error = get_hdf5_value(group['error'])
basis_sum = BasisSum.load_from_hdf5_group(group['basis_sum'])
information_criterion = group.attrs['information_criterion']
return TruncationLoglikelihood(basis_sum, data, error,\
information_criterion=information_criterion)
@property
def gradient_computable(self):
"""
The gradient of this Loglikelihood is not computable because it exists
in a discrete parameter space.
"""
return False
def gradient(self, point):
"""
Raises an error because the gradient of this loglikelihood doesn't
exist.
"""
raise NotImplementedError("gradient of this loglikelihood cannot " +\
"be computed because it exists in a discrete parameter space.")
@property
def hessian_computable(self):
"""
The hessian of this Loglikelihood is not computable because it exists
in a discrete parameter space.
"""
return False
def hessian(self, point):
"""
Raises an error because the hessian of this loglikelihood doesn't
exist.
"""
raise NotImplementedError("hessian of this loglikelihood cannot " +\
"be computed because it exists in a discrete parameter space.")
def __eq__(self, other):
"""
Checks if self is equal to other.
other: a Loglikelihood object to check for equality
returns: True if other and self have the same properties
"""
if not isinstance(other, TruncationLoglikelihood):
return False
if self.basis_sum != other.basis_sum:
return False
if not np.allclose(self.data, other.data):
return False
if not np.allclose(self.error, other.error):
return False
return (self.information_criterion == other.information_criterion)
def change_data(self, new_data):
"""
Finds the LinearTruncationLoglikelihood with a different data vector
with everything else kept constant.
returns: new LinearTruncationLoglikelihood with the given data property
"""
return LinearTruncationLoglikelihood(self.basis_sum, new_data,\
self.error, information_criterion=self.information_criterion)
|
{"hexsha": "b59f1889fab1374f43932a34d43a925bf30956a8", "size": 11303, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylinex/loglikelihood/LinearTruncationLoglikelihood.py", "max_stars_repo_name": "CU-NESS/pylinex", "max_stars_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pylinex/loglikelihood/LinearTruncationLoglikelihood.py", "max_issues_repo_name": "CU-NESS/pylinex", "max_issues_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pylinex/loglikelihood/LinearTruncationLoglikelihood.py", "max_forks_repo_name": "CU-NESS/pylinex", "max_forks_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6980519481, "max_line_length": 79, "alphanum_fraction": 0.6166504468, "include": true, "reason": "import numpy", "num_tokens": 2307}
|
import numpy as np
import librosa
import matplotlib.pyplot as plt
from configs.configs import logger, DEBUG
from configs.modelConfigs import (
CHORUS_DURATION_SINGLE,
CHORUS_DURATION,
TUNE_SCOPE,
CLF_TARGET_LABEL,
MINIMUM_CHORUS_DUR,
)
from utility.common import (
mergeIntervals,
intervalIntersection,
singleChorusSection,
multiChorusSections,
removeNumber,
filterIntvs,
mirexLines,
)
def maxOverlap(mirexFmt, chorusDur=CHORUS_DURATION_SINGLE, centering=False):
intervals, labels = mergeIntervals(mirexFmt)
chorusIndices = np.nonzero(np.char.startswith(labels, CLF_TARGET_LABEL))[0]
dur = intervals[-1][1]
chorusIntsec = (
[]
) # select (begin, begin + 30s) with maximal overlap with detected chorus sections
for idx in chorusIndices:
begin = intervals[idx][0]
end = min(dur, begin + chorusDur)
intsec = np.sum(
[intervalIntersection((begin, end), intervals[j]) for j in chorusIndices]
)
chorusIntsec.append(intsec)
selectIndex = np.argmax(chorusIntsec)
idx = chorusIndices[selectIndex]
if not centering:
begin = intervals[idx][0]
end = min(dur, begin + chorusDur)
else:
center = np.mean(intervals[idx])
begin = center - chorusDur / 2
end = center + chorusDur / 2
return singleChorusSection(begin, end, dur)
def arousalPoint(time, times, pitches, window, begin, show=DEBUG):
def arousalScore(t):
before = pitches[(times >= t - TUNE_SCOPE / 2) & (times <= t)]
after = pitches[(times >= t) & (times <= t + TUNE_SCOPE / 2)]
before = (librosa.hz_to_midi(before + 0.1) * 6 / 12).astype(int)
after = (librosa.hz_to_midi(after + 0.1) * 6 / 12).astype(int)
score = np.sum(after) - np.sum(before)
return score / len(before)
mask = (times >= time - window / 2) & (times <= time + window / 2)
scores = [arousalScore(t) for t in times[mask]]
point = times[mask][np.argmax(scores)] if begin else times[mask][np.argmin(scores)]
if show:
logger.debug(
f"point={point} times={times[mask][0]}~{times[mask][-1]} window={window}"
)
plt.plot(times[mask], pitches[mask], label="pitch")
plt.plot(times[mask], scores, label="score")
plt.scatter(point, np.max(scores) if begin else np.min(scores))
plt.xlabel("time/s")
plt.ylabel("freq/Hz")
plt.legend()
plt.show()
return point
def tuneIntervals(mirexFmt, mels_f, chorusDur, window):
mirexFmt = removeNumber(mirexFmt)
mirexFmt = mergeIntervals(mirexFmt)
logger.debug(f"tune interval=\n{mirexLines(mirexFmt)}")
dur = mirexFmt[0][-1][1]
intvs = filterIntvs(mirexFmt, fun=CLF_TARGET_LABEL)
tuneIntvs = []
times, pitches = mels_f
for intv in intvs:
begin = arousalPoint(intv[0], times, pitches, window, True)
end = arousalPoint(intv[1], times, pitches, window, False)
end = min(dur, max(end, begin + chorusDur))
if end - begin > MINIMUM_CHORUS_DUR:
tuneIntvs.append((begin, end))
return multiChorusSections(tuneIntvs, dur)
|
{"hexsha": "e2e5aba574e968243cbcdb241d6e9bfdc8346611", "size": 3185, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/modules/structure_module/models/pickSingle.py", "max_stars_repo_name": "Himusoka/Beatmap-gen_Thesis", "max_stars_repo_head_hexsha": "1ed210ccdefaa367cc3c54b9f74f8c7f2906b3d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/modules/structure_module/models/pickSingle.py", "max_issues_repo_name": "Himusoka/Beatmap-gen_Thesis", "max_issues_repo_head_hexsha": "1ed210ccdefaa367cc3c54b9f74f8c7f2906b3d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/modules/structure_module/models/pickSingle.py", "max_forks_repo_name": "Himusoka/Beatmap-gen_Thesis", "max_forks_repo_head_hexsha": "1ed210ccdefaa367cc3c54b9f74f8c7f2906b3d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.247311828, "max_line_length": 87, "alphanum_fraction": 0.6398744113, "include": true, "reason": "import numpy", "num_tokens": 871}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import unittest
from pyentrp import entropy as ent
TIME_SERIES = [1, 1, 1, 2, 3, 4, 5]
TIME_SERIES_STRING = '1112345'
SHANNON_ENTROPY = 2.12809
TS_SAMPLE_ENTROPY = [1, 4, 5, 1, 7, 3, 1, 2, 5, 8, 9, 7, 3, 7, 9, 5, 4, 3, 9, 1, 2, 3, 4, 2, 9, 6, 7, 4, 9, 2, 9, 9, 6,
5, 1, 3, 8, 1, 5, 3, 8, 4, 1, 2, 2, 1, 6, 5, 3, 6, 5, 4, 8, 9, 6, 7, 5, 3, 2, 5, 4, 2, 5, 1, 6, 5,
3, 5, 6, 7, 8, 5, 2, 8, 6, 3, 8, 2, 7, 1, 7, 3, 5, 6, 2, 1, 3, 7, 3, 5, 3, 7, 6, 7, 7, 2, 3, 1, 7,
8]
PERM_ENTROPY_BANDT = [4, 7, 9, 10, 6, 11, 3]
np.random.seed(1234567)
RANDOM_TIME_SERIES = np.random.rand(1000)
class TestEntropy(unittest.TestCase):
def test_shannonEntropyString(self):
self.assertEqual(round(ent.shannon_entropy(TIME_SERIES_STRING), 5), SHANNON_ENTROPY)
def test_shannonEntropyInt(self):
self.assertEqual(round(ent.shannon_entropy(TIME_SERIES), 5), SHANNON_ENTROPY)
def test_sampleEntropy(self):
ts = TS_SAMPLE_ENTROPY
std_ts = np.std(ts)
sample_entropy = ent.sample_entropy(ts, 4, 0.2 * std_ts)
np.testing.assert_allclose(sample_entropy, np.array([2.26881823, 2.11119024, 2.33537492, 1.79175947]))
def test_multiScaleEntropy(self):
multi_scale_entropy = ent.multiscale_entropy(RANDOM_TIME_SERIES, 4, maxscale = 4 )
np.testing.assert_allclose(multi_scale_entropy, np.array([2.52572864, 2.31911439, 1.65292302, 1.86075234]))
def test_permutationEntropy(self):
self.assertEqual(np.round(ent.permutation_entropy(PERM_ENTROPY_BANDT, order=2, delay=1), 3), 0.918)
self.assertEqual(np.round(ent.permutation_entropy(PERM_ENTROPY_BANDT, order=3, delay=1), 3), 1.522)
# Assert that a fully random vector has an entropy of 0.99999...
self.assertEqual(np.round(ent.permutation_entropy(RANDOM_TIME_SERIES, order=3, delay=1, normalize=True), 3), 0.999)
def test_weightedPermuationEntropy(self):
self.assertEqual(np.round(ent.weighted_permutation_entropy(PERM_ENTROPY_BANDT, order=2, delay=1), 3), 0.913)
self.assertEqual(np.round(ent.weighted_permutation_entropy(PERM_ENTROPY_BANDT, order=3, delay=1), 3), 1.414)
# Assert that a fully random vector has an entropy of 0.99999...
self.assertEqual(np.round(ent.weighted_permutation_entropy(RANDOM_TIME_SERIES, order=3, delay=1, normalize=True), 3), 0.999)
def test_multiScalePermutationEntropy(self):
np.testing.assert_array_equal(np.round(ent.multiscale_permutation_entropy(TS_SAMPLE_ENTROPY, 3, 5, 2), 4),
np.array([2.4699, 2.5649]))
def test_utilSequence(self):
self.assertRaises(Exception, ent.util_pattern_space, (TIME_SERIES, 0, 2))
self.assertRaises(Exception, ent.util_pattern_space, (TIME_SERIES, 10, 20))
np.testing.assert_array_equal(ent.util_pattern_space(TIME_SERIES, 2, 3),
np.array([[1, 1, 3], [1, 2, 4], [1, 3, 5]]))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "8f8b41b08e6f5d0dd47dec3a4f6911981772ce7c", "size": 3127, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_entropy.py", "max_stars_repo_name": "samgdotson/pyEntropy", "max_stars_repo_head_hexsha": "c7f1fcade3597622d5325e608c25c8ab36f9cf25", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 205, "max_stars_repo_stars_event_min_datetime": "2016-03-08T18:29:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T13:22:51.000Z", "max_issues_repo_path": "tests/test_entropy.py", "max_issues_repo_name": "samgdotson/pyEntropy", "max_issues_repo_head_hexsha": "c7f1fcade3597622d5325e608c25c8ab36f9cf25", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2017-04-24T11:27:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-24T21:55:14.000Z", "max_forks_repo_path": "tests/test_entropy.py", "max_forks_repo_name": "samgdotson/pyEntropy", "max_forks_repo_head_hexsha": "c7f1fcade3597622d5325e608c25c8ab36f9cf25", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 80, "max_forks_repo_forks_event_min_datetime": "2017-01-20T15:44:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T21:22:06.000Z", "avg_line_length": 47.3787878788, "max_line_length": 132, "alphanum_fraction": 0.6536616565, "include": true, "reason": "import numpy", "num_tokens": 1099}
|
import numpy as np
import math
def classify(row, features, labels, k=None):
"""
Returns the predicted label of a feature vector given a set of features and labels.
Parameters
----------
row : array_like
Row vector of input features. Must only contain numerical values.
features : array_like
An array of column vectors describing the model features. Must only contain numerical values.
labels : numpy.ndarray
A column vector describing the model labels. Can contain any type.
k : int, optional
The number of neighbors the model will use to make a prediction. If value is left empty, the
default recommended k is used. k == math.floor(math.sqrt(n)), where n is the number of entries
in labels, and k is odd.
Returns
-------
greatest_label_count : any type
The model's predicted label
"""
# Set recommended k if none is given
if k is None:
k = math.floor(math.sqrt(len(labels)))
if k % 2 is 0 and k > 1:
k -= 1
# Calculate distances to row features
distances = np.full(labels.shape, 0)
for i in range(len(features)):
distances += ( features[i] - row[i] )**2
distances = np.sqrt(distances)
# Sort distances and search most common neighboring label
distances_sorted_indices = np.argsort(distances)
distances_labels = labels[distances_sorted_indices]
labels_by_count = np.unique(distances_labels[0:k], return_counts=True)
greatest_label_count = labels_by_count[0][np.argmax(labels_by_count[1])]
# Return result
return greatest_label_count
|
{"hexsha": "e52388b2106ea80b4f6564188ee9b91d5d2a7898", "size": 1640, "ext": "py", "lang": "Python", "max_stars_repo_path": "kNearestNeighbor/__init__.py", "max_stars_repo_name": "MatthiasKrijgsman/k-NearestNeighbor", "max_stars_repo_head_hexsha": "d102e6d87dbd89888110f8fe389e77717d74f209", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-01T17:41:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-01T17:41:50.000Z", "max_issues_repo_path": "kNearestNeighbor/__init__.py", "max_issues_repo_name": "MatthiasKrijgsman/kNearestNeighbor", "max_issues_repo_head_hexsha": "d102e6d87dbd89888110f8fe389e77717d74f209", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kNearestNeighbor/__init__.py", "max_forks_repo_name": "MatthiasKrijgsman/kNearestNeighbor", "max_forks_repo_head_hexsha": "d102e6d87dbd89888110f8fe389e77717d74f209", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8181818182, "max_line_length": 102, "alphanum_fraction": 0.668902439, "include": true, "reason": "import numpy", "num_tokens": 365}
|
/**
* @file
* @copyright defined in eos/LICENSE.txt
*/
#include <algorithm>
#include <vector>
#include <iterator>
#include <boost/test/unit_test.hpp>
#include <eos/chain/chain_controller.hpp>
#include <eos/chain/exceptions.hpp>
#include <eos/chain/permission_object.hpp>
#include <eos/chain/key_value_object.hpp>
#include <eos/chain/producer_objects.hpp>
#include <eos/utilities/tempdir.hpp>
#include <fc/crypto/digest.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/range/algorithm/find.hpp>
#include <boost/range/algorithm/find_if.hpp>
#include <boost/range/algorithm/permutation.hpp>
#include "../common/database_fixture.hpp"
using namespace eosio;
using namespace chain;
BOOST_AUTO_TEST_SUITE(special_account_tests)
//Check special accounts exits in genesis
BOOST_FIXTURE_TEST_CASE(accounts_exists, testing_fixture)
{ try {
Make_Blockchain(chain);
auto nobody = chain_db.find<account_object, by_name>(config::nobody_account_name);
BOOST_CHECK(nobody != nullptr);
const auto& nobody_active_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::nobody_account_name, config::active_name));
BOOST_CHECK_EQUAL(nobody_active_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(nobody_active_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(nobody_active_authority.auth.keys.size(), 0);
const auto& nobody_owner_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::nobody_account_name, config::owner_name));
BOOST_CHECK_EQUAL(nobody_owner_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(nobody_owner_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(nobody_owner_authority.auth.keys.size(), 0);
// TODO: check for anybody account
//auto anybody = chain_db.find<account_object, by_name>(config::anybody_account_name);
//BOOST_CHECK(anybody == nullptr);
auto producers = chain_db.find<account_object, by_name>(config::producers_account_name);
BOOST_CHECK(producers != nullptr);
auto& gpo = chain_db.get<global_property_object>();
const auto& producers_active_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::active_name));
BOOST_CHECK_EQUAL(producers_active_authority.auth.threshold, config::producers_authority_threshold);
BOOST_CHECK_EQUAL(producers_active_authority.auth.accounts.size(), gpo.active_producers.size());
BOOST_CHECK_EQUAL(producers_active_authority.auth.keys.size(), 0);
std::vector<account_name> active_auth;
for(auto& apw : producers_active_authority.auth.accounts) {
active_auth.emplace_back(apw.permission.account);
}
std::vector<account_name> diff;
std::set_difference(
active_auth.begin(),
active_auth.end(),
gpo.active_producers.begin(),
gpo.active_producers.end(),
std::inserter(diff, diff.begin())
);
BOOST_CHECK_EQUAL(diff.size(), 0);
const auto& producers_owner_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::owner_name));
BOOST_CHECK_EQUAL(producers_owner_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.keys.size(), 0);
} FC_LOG_AND_RETHROW() }
//Check correct authority when new set of producers are elected
BOOST_FIXTURE_TEST_CASE(producers_authority, testing_fixture)
{ try {
Make_Blockchain(chain)
Make_Account(chain, alice);
Make_Account(chain, bob);
Make_Account(chain, charlie);
Make_Account(chain, newproducer1);
Make_Account(chain, newproducer2);
Make_Account(chain, newproducer3);
chain.produce_blocks();
Make_Producer(chain, newproducer1);
Make_Producer(chain, newproducer2);
Make_Producer(chain, newproducer3);
Approve_Producer(chain, alice, newproducer1, true);
Approve_Producer(chain, bob, newproducer2, true);
Approve_Producer(chain, charlie, newproducer3, true);
chain.produce_blocks(config::blocks_per_round - chain.head_block_num() );
auto& gpo = chain_db.get<global_property_object>();
BOOST_REQUIRE(boost::find(gpo.active_producers, "newproducer1") != gpo.active_producers.end());
BOOST_REQUIRE(boost::find(gpo.active_producers, "newproducer2") != gpo.active_producers.end());
BOOST_REQUIRE(boost::find(gpo.active_producers, "newproducer3") != gpo.active_producers.end());
const auto& producers_active_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::active_name));
BOOST_CHECK_EQUAL(producers_active_authority.auth.threshold, config::producers_authority_threshold);
BOOST_CHECK_EQUAL(producers_active_authority.auth.accounts.size(), gpo.active_producers.size());
BOOST_CHECK_EQUAL(producers_active_authority.auth.keys.size(), 0);
std::vector<account_name> active_auth;
for(auto& apw : producers_active_authority.auth.accounts) {
active_auth.emplace_back(apw.permission.account);
}
std::vector<account_name> diff;
std::set_difference(
active_auth.begin(),
active_auth.end(),
gpo.active_producers.begin(),
gpo.active_producers.end(),
std::inserter(diff, diff.begin())
);
BOOST_CHECK_EQUAL(diff.size(), 0);
const auto& producers_owner_authority = chain_db.get<permission_object, by_owner>(boost::make_tuple(config::producers_account_name, config::owner_name));
BOOST_CHECK_EQUAL(producers_owner_authority.auth.threshold, 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.accounts.size(), 0);
BOOST_CHECK_EQUAL(producers_owner_authority.auth.keys.size(), 0);
} FC_LOG_AND_RETHROW() }
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "1050fc8c071c8b8d726f248a533dbc57d98c8cb3", "size": 5963, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/tests/special_accounts_tests.cpp", "max_stars_repo_name": "anauman/eosio", "max_stars_repo_head_hexsha": "84553e02c1f87f73594f44c497e31687dec8f250", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-08-19T03:03:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-19T03:04:46.000Z", "max_issues_repo_path": "tests/tests/special_accounts_tests.cpp", "max_issues_repo_name": "teddyvgt/eos", "max_issues_repo_head_hexsha": "b42aee975082216c2366a83f562e9361a5393cda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/tests/special_accounts_tests.cpp", "max_forks_repo_name": "teddyvgt/eos", "max_forks_repo_head_hexsha": "b42aee975082216c2366a83f562e9361a5393cda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-05-04T11:36:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T11:36:48.000Z", "avg_line_length": 40.0201342282, "max_line_length": 161, "alphanum_fraction": 0.7407345296, "num_tokens": 1323}
|
Module Type Machine.
Parameter Conf : Type.
Parameter Rel : Conf -> Conf -> Prop.
End Machine.
Module MetaTheory (mod : Machine).
Import mod.
Require Import List.
Import ListNotations.
Require Import Relations.
Infix "==>" := Rel(at level 80, no associativity).
Definition trc := clos_refl_trans Conf Rel.
Infix "=>>" := trc (at level 80, no associativity).
Lemma trc_refl c : c =>> c.
Proof. apply rt_refl. Qed.
Lemma trc_step c1 c2 : c1 ==> c2 -> c1 =>> c2.
Proof. apply rt_step. Qed.
Lemma trc_step_trans c1 c2 c3 : c1 =>> c2 -> c2 ==> c3 -> c1 =>> c3.
Proof. intros. eapply rt_trans; eauto using rt_step. Qed.
Lemma trc_step_trans' c1 c2 c3 : c1 ==> c2 -> c2 =>> c3 -> c1 =>> c3.
Proof. intros. eapply rt_trans; eauto using rt_step. Qed.
Lemma trc_trans c1 c2 c3 : c1 =>> c2 -> c2 =>> c3 -> c1 =>> c3.
Proof. apply rt_trans. Qed.
Hint Resolve trc_step trc_step_trans.
Hint Immediate trc_refl.
Lemma trc_ind' :
forall P : Conf -> Conf -> Prop,
(forall c : Conf, P c c) ->
(forall c1 c2 c3 : Conf, c1 ==> c2 -> c2 =>> c3 -> P c2 c3 -> P c1 c3) ->
forall c c0 : Conf, c =>> c0 -> P c c0.
Proof.
intros X Y Z c1 c2 S. unfold trc in S. rewrite -> clos_rt_rt1n_iff in S.
induction S; eauto. rewrite <- clos_rt_rt1n_iff in S. eauto.
Qed.
Fixpoint tuple (ts : list Type) : Type :=
match ts with
| [] => unit
| t :: ts' => (t * tuple ts')%type
end.
Inductive SetCom : list Type -> Type :=
| BaseSet : Conf -> Prop -> SetCom nil
| ExSet {t ts} : (t -> SetCom ts) -> SetCom (t :: ts).
Definition funtail {t ts A} (x : t) (f : forall (args : tuple (t :: ts)), A args)
: forall (args : tuple ts), A (x, args) :=
fun xs => f (x, xs).
Fixpoint mkSetCom {ts} (C : tuple ts -> Conf) (P : tuple ts -> Prop) : SetCom ts :=
match ts return (tuple ts -> Conf) -> (tuple ts -> Prop) -> SetCom ts with
| nil => fun C P => BaseSet (C tt) (P tt)
| t :: ts' => fun C P => @ExSet t ts' (fun x => @mkSetCom ts' (funtail x C) (funtail x P))
end C P.
Fixpoint getConf {ts} (S : SetCom ts) : tuple ts -> Conf :=
match S with
| BaseSet C P => fun xs => C
| ExSet _ _ ex => fun xs => let (x, xs') := xs in getConf (ex x) xs'
end.
Fixpoint getProp {ts} (S : SetCom ts) : tuple ts -> Prop :=
match S with
| BaseSet C P => fun xs => P
| ExSet _ _ ex => fun xs => let (x, xs') := xs in getProp (ex x) xs'
end.
Lemma getConf_sound ts (C : tuple ts -> Conf) P x : getConf (mkSetCom C P) x = C x.
Proof.
intros. induction ts; destruct x. reflexivity. simpl. apply (IHts (funtail a0 C)).
Qed.
Lemma getProp_sound ts (C : tuple ts -> Conf) P x : getProp (mkSetCom C P) x = P x.
Proof.
intros. induction ts;destruct x. reflexivity. simpl. apply (IHts _ (funtail a0 P)).
Qed.
Fixpoint SetComElem {ts} (C : Conf) (S : SetCom ts) : Prop :=
match S with
| BaseSet C' P => C' = C /\ P
| ExSet _ _ e => exists x, SetComElem C (e x)
end.
Lemma set_com_elem {ts} (C : Conf) (S : SetCom ts) :
SetComElem C S <-> exists xs, getConf S xs = C /\ getProp S xs.
Proof.
split; intros.
* induction S.
- exists tt. assumption.
- simpl in H. destruct H. apply H0 in H.
decompose [ex and] H. exists (x, x0). auto.
* induction S.
- decompose [ex and] H. simpl in *. tauto.
- decompose [ex and] H. simpl. destruct x.
exists t0. apply H0. exists t1. tauto.
Qed.
Inductive ConfSet : Type :=
| Sing {ts} : SetCom ts -> ConfSet
| Empty : ConfSet
| Union : ConfSet -> ConfSet -> ConfSet.
Fixpoint ConfElem (C : Conf) (S : ConfSet) : Prop :=
match S with
| Sing _ s => SetComElem C s
| Empty => False
| Union S1 S2 => ConfElem C S1 \/ ConfElem C S2
end.
Notation "{| C | P |}" := (Sing (mkSetCom C P)) (at level 70, no associativity).
Infix "∈" := ConfElem (at level 80, no associativity).
Infix "∪" := Union (at level 76, left associativity).
Notation "S ⊆ T" := (forall x, x ∈ S -> x ∈ T) (at level 80, no associativity).
Notation "S == T" := (S ⊆ T /\ T ⊆ S) (at level 80, no associativity).
Lemma mkSetComCompl' {ts} (S : SetCom ts) : {| getConf S | getProp S |} == Sing S.
Proof.
simpl. split; intros; induction S; auto; simpl in *; destruct H; eexists; apply H0; apply H.
Qed.
Lemma sing_set_elem {ts} (C' : Conf) (C : tuple ts -> Conf) P :
C' ∈ {| C | P |} <-> exists xs, C xs = C' /\ P xs.
Proof.
simpl. rewrite set_com_elem. split; intro H; decompose [ex and] H; eexists;
rewrite getConf_sound in *; rewrite getProp_sound in *; split; eassumption.
Qed.
Notation "{ x .. y , C | P }" := (Sing (ExSet ( fun x => .. (ExSet (fun y => BaseSet C P)) .. )))
(at level 70, x binder, y binder, no associativity).
Notation "{, C | P }" := (Sing (BaseSet C P))
(at level 70, no associativity).
Lemma union_incll S T : S ⊆ S ∪ T.
Proof. simpl. auto. Qed.
Lemma union_inclr S T : T ⊆ S ∪ T.
Proof. simpl. auto. Qed.
Hint Resolve union_incll union_inclr.
Definition active (c : Conf) := exists c', c ==> c'.
(* normal form, an irreducible configuration *)
Definition nf (c : Conf) := forall c', ~ (c ==> c').
Hint Unfold active nf.
Lemma nf_trc c c' : nf c -> c =>> c' -> c = c'.
Proof.
intros R S. destruct S using trc_ind'. reflexivity. autounfold in *. apply R in H. contradiction.
Qed.
Inductive barred (P : ConfSet) : Conf -> Prop :=
| barred_here c : c ∈ P -> barred P c
| barred_next c : (forall c', c ==> c' -> barred P c') -> active c -> barred P c.
Notation "C <| P" := (barred P C) (at level 80, no associativity).
Lemma barred_if c (P Q : ConfSet) : c <| P -> P ⊆ Q -> c <| Q.
Proof.
intros. induction H. apply barred_here. auto.
apply barred_next; assumption.
Qed.
Definition NF (C : ConfSet) := forall c, c ∈ C -> nf c.
Proposition barred_closed c1 c2 P :
NF P -> c1 <| P -> c1 =>> c2
-> exists c3, c2 =>> c3 /\ c3 ∈ P.
Proof.
intros A B S. generalize dependent B. induction S using trc_ind';intros.
- induction B.
+ eauto.
+ destruct H1. specialize (H0 _ H1). decompose [ex and] H0. eauto using trc_step_trans'.
- induction B.
+ apply A in H0. autounfold in *. apply H0 in H. contradiction.
+ eauto.
Qed.
Definition Barred (P Q : ConfSet) : Prop := forall c, c ∈ P -> c <| Q.
Hint Unfold Barred.
Infix "<<|" := Barred (at level 80, no associativity).
Proposition Barred_closed c1 c2 P C:
NF P -> C <<| P -> c1 ∈ C -> c1 =>> c2
-> exists c3, c2 =>> c3 /\ c3 ∈ P.
Proof.
intros. eapply barred_closed;eauto.
Qed.
Lemma Barred_refl_if P Q : P ⊆ Q -> P <<| Q.
Proof.
unfold Barred. intros. apply barred_here. auto.
Qed.
Lemma Barred_if P P' Q Q' : P <<| Q -> P' ⊆ P -> Q ⊆ Q' -> P' <<| Q'.
Proof. unfold Barred. intros. eapply barred_if; eauto. Qed.
Lemma Barred_trans P Q R : P <<| Q -> Q <<| R -> P <<| R.
Proof.
intros B1 B2.
unfold Barred in *.
intros. apply B1 in H. clear B1.
induction H. apply B2. assumption.
apply barred_next. intros. apply H0. assumption. assumption.
Qed.
Lemma Barred_union_left P PQ Q : P <<| PQ -> Q <<| PQ -> P ∪ Q <<| PQ.
Proof.
unfold Barred. intros. auto. simpl in H1. destruct H1; auto.
Qed.
Lemma Barred_union P P' Q Q' : P <<| P' -> Q <<| Q' -> P ∪ Q <<| P' ∪ Q'.
Proof.
intros. apply Barred_union_left; eauto using Barred_if.
Qed.
Definition Reach (P Q : ConfSet) : Prop := forall c2, c2 ∈ Q -> exists c1, c1 ∈ P /\ c1 =>> c2.
Hint Unfold Reach.
Notation "P >>> Q" := (Reach P Q) (at level 80, no associativity).
Lemma Reach_refl_if P Q : Q ⊆ P -> P >>> Q.
Proof.
unfold Reach. intros. exists c2. split. auto. apply trc_refl.
Qed.
Lemma Reach_if P P' Q Q' : P >>> Q -> P ⊆ P' -> Q' ⊆ Q -> P' >>> Q'.
Proof.
unfold Reach. intros. apply H1 in H2. apply H in H2. decompose [ex and] H2. eexists. split; eauto.
Qed.
Lemma Reach_trans P Q R : P >>> Q -> Q >>> R -> P >>> R.
Proof.
unfold Reach. intros. apply H0 in H1. decompose [ex and] H1.
apply H in H3. decompose [ex and] H3. eexists. split. eassumption.
eapply trc_trans; eassumption.
Qed.
Lemma Reach_union P P' Q Q' : P >>> P' -> Q >>> Q' -> P ∪ Q >>> P' ∪ Q'.
Proof.
unfold Reach. intros. auto. destruct H1; [apply H in H1| apply H0 in H1];
decompose [ex and] H1; eauto.
Qed.
Definition Bidir P Q := P >>> Q /\ P <<| Q.
Hint Unfold Reach.
Notation "P =|> Q" := (Bidir P Q) (at level 80, no associativity).
Lemma bidir_refl_iff P Q : P == Q -> P =|> Q.
Proof. split;[apply Reach_refl_if| apply Barred_refl_if]; tauto. Qed.
Lemma bidir_iff P P' Q Q' : P =|> Q -> P == P' -> Q == Q' -> P' =|> Q'.
Proof.
intros. destruct H. split; [eapply Reach_if| eapply Barred_if]; eauto; try tauto.
Qed.
Lemma bidir_refl P : P =|> P.
Proof. apply bidir_refl_iff. auto. Qed.
Lemma bidir_trans P Q R : P =|> Q -> Q =|> R -> P =|> R.
Proof.
unfold Bidir. intros. destruct H. destruct H0.
split. eapply Reach_trans; eassumption. eapply Barred_trans; eassumption.
Qed.
Lemma bidir_union P P' Q Q' : P =|> P' -> Q =|> Q' -> P ∪ Q =|> P' ∪ Q'.
Proof.
unfold Bidir. intros. split. apply Reach_union; tauto. apply Barred_union; tauto.
Qed.
Lemma Reach_step ts (C C' : tuple ts -> Conf) (P P' : tuple ts -> Prop) T:
(forall x, P' x -> C x ==> C' x) -> (forall x, P' x -> P x) -> {| C | P |} ∪ T >>> {| C' | P' |} ∪ T.
Proof.
unfold Reach. intros. destruct H1.
* rewrite sing_set_elem in H1. decompose [ex and] H1. subst. exists (C x). split.
- left. rewrite sing_set_elem. exists x. auto.
- apply trc_step. auto.
* exists c2. split; auto.
Qed.
Lemma Reach_barred ts (C C' : tuple ts -> Conf) (P P' : tuple ts -> Prop) T:
(forall x, P x -> C x <| {| C' | P' |} ∪ T) -> {| C | P |} ∪ T <<| {| C' | P' |} ∪ T.
Proof.
unfold Barred. intros. destruct H0.
* rewrite sing_set_elem in H0. decompose [ex and] H0. subst. auto.
* left. right. auto.
Qed.
Theorem bidir_step ts (C C' : tuple ts -> Conf) (P P' : tuple ts -> Prop) T:
(forall x, P' x -> C x ==> C' x) -> (forall x, P' x -> P x) -> (forall x, P x -> C x <| {| C' | P' |} ∪ T)
-> {| C | P |} ∪ T =|> {| C' | P' |} ∪ T.
Proof.
unfold Bidir. intros. auto using Reach_step, Reach_barred.
Qed.
(* The above lemma cannot be used directly in a proof
calculation. Therefore, we reformulat it using [getProp] and [getConf]
instead of the [ {| C | P |} ] construction. *)
Lemma union_sub_left S1 S2 T : S1 ⊆ S2 -> S1 ∪ T ⊆ S2 ∪ T .
Proof.
intros. simpl in *. destruct H0; auto.
Qed.
Lemma union_eq_left S1 S2 T : S1 == S2 -> S1 ∪ T == S2 ∪ T .
Proof.
simpl in *; split;intros; destruct H0; solve [left; destruct H; auto| right; destruct H; auto].
Qed.
Lemma union_sub_eq S1 S2 : S1 == S2 -> S1 ⊆ S2 .
Proof.
intros. simpl in *. destruct H. auto.
Qed.
Lemma set_eq_ref S T : S == T -> T == S.
Proof.
intros. destruct H. split; auto.
Qed.
Corollary bidir_step' ts (S S' : SetCom ts) T :
(forall x, getProp S' x -> getConf S x ==> getConf S' x) ->
(forall x, getProp S' x -> getProp S x) ->
(forall x, getProp S x -> getConf S x <| Sing S' ∪ T) ->
Sing S ∪ T =|> Sing S' ∪ T.
Proof.
intros.
assert ({| getConf S | getProp S |} ∪ T =|> {| getConf S' | getProp S' |} ∪ T).
apply bidir_step; auto. intros. eapply barred_if. apply H1. auto. apply union_sub_left.
apply union_sub_eq. apply set_eq_ref. apply mkSetComCompl'.
eapply bidir_iff. eassumption.
apply union_eq_left. apply mkSetComCompl'.
apply union_eq_left. apply mkSetComCompl'.
Qed.
Ltac bidir_iff := intros; eapply bidir_iff; eauto; simpl; tauto.
(* The following lemmas are for guiding the proof search *)
Lemma bidir_step_simp ts (S S' : SetCom ts) :
(forall x, getProp S' x -> getConf S x ==> getConf S' x) ->
(forall x, getProp S' x -> getProp S x) ->
(forall x, getProp S x -> getConf S x <| Sing S') ->
Sing S =|> Sing S'.
Proof.
intros. assert (Sing S ∪ Empty =|> Sing S' ∪ Empty).
apply bidir_step'; auto; intros. eapply barred_if. eauto. simpl; tauto. bidir_iff.
Qed.
Lemma bidir_assoc1 S1 S2 S T : S1 ∪ (S ∪ T) =|> S2 ∪ (S ∪ T) -> (S1 ∪ S) ∪ T =|> (S2 ∪ S) ∪ T.
Proof. bidir_iff. Qed.
Lemma bidir_assoc2 S1 S2 S T : S1 ∪ (S ∪ T) =|> S2 ∪ (S ∪ T) -> (S ∪ S1) ∪ T =|> (S ∪ S2) ∪ T.
Proof. bidir_iff. Qed.
Lemma bidir_comm S1 S2 T : S1 ∪ T =|> S2 ∪ T -> T ∪ S1 =|> T ∪ S2.
Proof. bidir_iff. Qed.
End MetaTheory.
|
{"author": "pa-ba", "repo": "calc-comp-rel", "sha": "2ffe6e4601e15ec926e1953dc5bf8793d37397aa", "save_path": "github-repos/coq/pa-ba-calc-comp-rel", "path": "github-repos/coq/pa-ba-calc-comp-rel/calc-comp-rel-2ffe6e4601e15ec926e1953dc5bf8793d37397aa/Machine.v"}
|
Base.promote_array_type{FSA <: FixedArray, T}(F, ::Type{T}, ::Type{FSA}) = FSA
# operations
const unaryOps = (:-, :~, :conj, :abs,
:sin, :cos, :tan, :sinh, :cosh, :tanh,
:asin, :acos, :atan, :asinh, :acosh, :atanh,
:sec, :csc, :cot, :asec, :acsc, :acot,
:sech, :csch, :coth, :asech, :acsch, :acoth,
:sinc, :cosc, :cosd, :cotd, :cscd, :secd,
:sind, :tand, :acosd, :acotd, :acscd, :asecd,
:asind, :atand, :rad2deg, :deg2rad,
:log, :log2, :log10, :log1p, :exponent, :exp,
:exp2, :expm1, :cbrt, :sqrt, :erf,
:erfc, :erfcx, :erfi, :dawson, :ceil, :floor,
:trunc, :round, :significand, :lgamma,
:gamma, :lfact, :frexp, :modf, :airy, :airyai,
:airyprime, :airyaiprime, :airybi, :airybiprime,
:besselj0, :besselj1, :bessely0, :bessely1,
:eta, :zeta, :digamma)
# vec-vec and vec-scalar
const binaryOps = (:.+, :.-,:.*, :./, :.\, :.^,
:.==, :.!=, :.<, :.<=, :.>, :.>=, :+, :-,
:min, :max,
:div, :fld, :rem, :mod, :mod1, :cmp,
:atan2, :besselj, :bessely, :hankelh1, :hankelh2,
:besseli, :besselk, :beta, :lbeta)
const matrixOps = (:*, :/)
const reductions = ((:sum,:+), (:prod,:*), (:minimum,:min), (:maximum,:max))
function gen_functor(func::Symbol, unary::Int)
functor_name = gensym()
arguments = ntuple(i->symbol("arg$i"), unary)
functor_expr = quote
immutable $functor_name <: Func{$unary} end
@inline call(::$functor_name, $(arguments...)) = $func($(arguments...))
end
return (functor_name, functor_expr)
end
for (callfun, reducefun) in reductions
functor_name, functor_expr = gen_functor(reducefun, 2)
eval(quote
$functor_expr
@inline $(callfun){T <: FixedArray}(x::T) = reduce($functor_name(), x)
end)
end
for op in unaryOps
functor_name, functor_expr = gen_functor(op, 1)
eval(quote
$functor_expr
@inline $(op){T <: FixedArray}(x::T) = map($functor_name(), x)
end)
end
for op in binaryOps
functor_name, functor_expr = gen_functor(op, 2)
eval(quote
$functor_expr
@inline $op{T <: FixedArray}(x::T, y::T) = map($functor_name(), x, y)
@inline $op{T, T2, NDIM, SIZE}(x::FixedArray{T, NDIM, SIZE}, y::FixedArray{T2, NDIM, SIZE}) = $op(promote(x, y)...)
@inline $op{T <: Number}(x::T, y::FixedArray{T}) = map($functor_name(), x, y)
@inline $op{T1 <: Number, T2}(x::T1, y::FixedArray{T2}) = $op(promote(x, y)...)
@inline $op{T <: Number}(x::FixedArray{T}, y::T) = map($functor_name(), x, y)
@inline $op{T1, T2 <: Number}(x::FixedArray{T1}, y::T2) = $op(promote(x, y)...)
end)
end
for op in matrixOps
functor_name, functor_expr = gen_functor(op, 2)
eval(quote
$functor_expr
@inline $op{T <: Number}(x::T, y::FixedArray{T}) = map($functor_name(), x, y)
@inline $op{T1 <: Number, T2}(x::T1, y::FixedArray{T2}) = $op(promote(x, y)...)
@inline $op{T <: Number}(x::FixedArray{T}, y::T) = map($functor_name(), x, y)
@inline $op{T1, T2 <: Number}(x::FixedArray{T1}, y::T2) = $op(promote(x, y)...)
end)
end
@inline function promote{T1 <: FixedArray, T2 <: FixedArray}(a::T1, b::T2)
T = promote_type(eltype(T1), eltype(T2))
map(T, a), map(T, b)
end
function promote{T1, T2 <: Number}(a::FixedArray{T1}, b::T2)
T = promote_type(T1, T2)
map(T, a), T(b)
end
function promote{T1 <: Number, T2}(a::T1, b::FixedArray{T2})
T = promote_type(T1, T2)
T(a), map(T, b)
end
function promote_rule{N, T, X<:Number}(::Type{Vec{N,T}}, ::Type{X})
Vec{N, promote_type(T, X)}
end
@inline ctranspose{R, C, T}(a::Mat{R, C, T}) = Mat(ntuple(CRowFunctor(a), Val{R}))
@generated function ctranspose{N,T}(b::Vec{N,T})
expr = [:(b._[$i]',) for i=1:N]
return quote
Mat{1,N,T}($(expr...))
end
end
@inline transpose{R, C, T}(a::Mat{R, C, T}) = Mat(ntuple(RowFunctor(a), Val{R}))
@generated function transpose{N,T}(b::Vec{N,T})
expr = [:(transpose(b._[$i]),) for i=1:N]
return quote
Mat{1,N,T}($(expr...))
end
end
@inline Base.hypot{T}(v::FixedVector{2,T}) = hypot(v[1],v[2])
immutable DotFunctor <: Func{2} end
call(::DotFunctor, a, b) = a'*b
@inline dot{T <: FixedArray}(a::T, b::T) = sum(map(DotFunctor(), a, b))
immutable BilinearDotFunctor <: Func{2} end
call(::BilinearDotFunctor, a, b) = a*b
@inline bilindot{T <: Union{FixedArray, Tuple}}(a::T, b::T) = sum(map(DotFunctor(), a, b))
@inline bilindot{T}(a::NTuple{1,T}, b::NTuple{1,T}) = @inbounds return a[1]*b[1]
@inline bilindot{T}(a::NTuple{2,T}, b::NTuple{2,T}) = @inbounds return (a[1]*b[1] + a[2]*b[2])
@inline bilindot{T}(a::NTuple{3,T}, b::NTuple{3,T}) = @inbounds return (a[1]*b[1] + a[2]*b[2] + a[3]*b[3])
@inline bilindot{T}(a::NTuple{4,T}, b::NTuple{4,T}) = @inbounds return (a[1]*b[1] + a[2]*b[2] + a[3]*b[3]+a[4]*b[4])
#cross{T}(a::FixedVector{2, T}, b::FixedVector{2, T}) = a[1]*b[2]-a[2]*b[1] # not really used!?
@inline cross{T<:Real}(a::FixedVector{3, T}, b::FixedVector{3, T}) = @inbounds return typeof(a)(
a[2]*b[3]-a[3]*b[2],
a[3]*b[1]-a[1]*b[3],
a[1]*b[2]-a[2]*b[1]
)
@inline norm{T, N}(a::FixedVector{T, N}) = sqrt(dot(a,a))
@inline normalize{FSA <: FixedArray}(a::FSA) = a / norm(a)
#Matrix
@inline det{T}(A::FixedMatrix{1, 1, T}) = @inbounds return ( A[1] )
@inline det{T}(A::FixedMatrix{2, 2, T}) = @inbounds return ( A[1,1]*A[2,2] - A[1,2]*A[2,1])
@inline det{T}(A::FixedMatrix{3, 3, T}) = @inbounds return (
A[1,1]*(A[2,2]*A[3,3]-A[2,3]*A[3,2]) -
A[1,2]*(A[2,1]*A[3,3]-A[2,3]*A[3,1]) +
A[1,3]*(A[2,1]*A[3,2]-A[2,2]*A[3,1])
)
@inline det{T}(A::FixedMatrix{4, 4, T}) = @inbounds return (
A[13] * A[10] * A[7] * A[4] - A[9] * A[14] * A[7] * A[4] -
A[13] * A[6] * A[11] * A[4] + A[5] * A[14] * A[11] * A[4] +
A[9] * A[6] * A[15] * A[4] - A[5] * A[10] * A[15] * A[4] -
A[13] * A[10] * A[3] * A[8] + A[9] * A[14] * A[3] * A[8] +
A[13] * A[2] * A[11] * A[8] - A[1] * A[14] * A[11] * A[8] -
A[9] * A[2] * A[15] * A[8] + A[1] * A[10] * A[15] * A[8] +
A[13] * A[6] * A[3] * A[12] - A[5] * A[14] * A[3] * A[12] -
A[13] * A[2] * A[7] * A[12] + A[1] * A[14] * A[7] * A[12] +
A[5] * A[2] * A[15] * A[12] - A[1] * A[6] * A[15] * A[12] -
A[9] * A[6] * A[3] * A[16] + A[5] * A[10] * A[3] * A[16] +
A[9] * A[2] * A[7] * A[16] - A[1] * A[10] * A[7] * A[16] -
A[5] * A[2] * A[11] * A[16] + A[1] * A[6] * A[11] * A[16]
)
trace(A::FixedMatrix{1,1}) = A[1,1]
trace(A::FixedMatrix{2,2}) = A[1,1] + A[2,2]
trace(A::FixedMatrix{3,3}) = A[1,1] + A[2,2] + A[3,3]
trace(A::FixedMatrix{4,4}) = A[1,1] + A[2,2] + A[3,3] + A[4,4]
\{m,n,T}(mat::Mat{m,n,T}, v::Vec{n, T}) = inv(mat)*v
@inline inv{T}(A::Mat{1, 1, T}) = @inbounds return Mat{1, 1, T}(inv(A[1]))
@inline function inv{T}(A::Mat{2, 2, T})
determinant = det(A)
@inbounds return Mat{2, 2, T}(
(A[2,2] /determinant, -A[2,1]/determinant),
(-A[1,2]/determinant, A[1,1] /determinant)
)
end
@inline function inv{T}(A::Mat{3, 3, T})
determinant = det(A)
@inbounds return Mat{3, 3, T}(
((A[2,2]*A[3,3]-A[2,3]*A[3,2]) /determinant,
-(A[2,1]*A[3,3]-A[2,3]*A[3,1])/determinant,
(A[2,1]*A[3,2]-A[2,2]*A[3,1]) /determinant),
(-(A[1,2]*A[3,3]-A[1,3]*A[3,2])/determinant,
(A[1,1]*A[3,3]-A[1,3]*A[3,1]) /determinant,
-(A[1,1]*A[3,2]-A[1,2]*A[3,1])/determinant),
((A[1,2]*A[2,3]-A[1,3]*A[2,2]) /determinant,
-(A[1,1]*A[2,3]-A[1,3]*A[2,1])/determinant,
(A[1,1]*A[2,2]-A[1,2]*A[2,1]) /determinant)
)
end
@inline function inv{T}(A::Mat{4, 4, T})
determinant = det(A)
@inbounds return Mat{4, 4, T}(
((A[2,3]*A[3,4]*A[4,2] - A[2,4]*A[3,3]*A[4,2] + A[2,4]*A[3,2]*A[4,3] - A[2,2]*A[3,4]*A[4,3] - A[2,3]*A[3,2]*A[4,4] + A[2,2]*A[3,3]*A[4,4]) / determinant,
(A[2,4]*A[3,3]*A[4,1] - A[2,3]*A[3,4]*A[4,1] - A[2,4]*A[3,1]*A[4,3] + A[2,1]*A[3,4]*A[4,3] + A[2,3]*A[3,1]*A[4,4] - A[2,1]*A[3,3]*A[4,4]) / determinant,
(A[2,2]*A[3,4]*A[4,1] - A[2,4]*A[3,2]*A[4,1] + A[2,4]*A[3,1]*A[4,2] - A[2,1]*A[3,4]*A[4,2] - A[2,2]*A[3,1]*A[4,4] + A[2,1]*A[3,2]*A[4,4]) / determinant,
(A[2,3]*A[3,2]*A[4,1] - A[2,2]*A[3,3]*A[4,1] - A[2,3]*A[3,1]*A[4,2] + A[2,1]*A[3,3]*A[4,2] + A[2,2]*A[3,1]*A[4,3] - A[2,1]*A[3,2]*A[4,3]) / determinant),
((A[1,4]*A[3,3]*A[4,2] - A[1,3]*A[3,4]*A[4,2] - A[1,4]*A[3,2]*A[4,3] + A[1,2]*A[3,4]*A[4,3] + A[1,3]*A[3,2]*A[4,4] - A[1,2]*A[3,3]*A[4,4]) / determinant,
(A[1,3]*A[3,4]*A[4,1] - A[1,4]*A[3,3]*A[4,1] + A[1,4]*A[3,1]*A[4,3] - A[1,1]*A[3,4]*A[4,3] - A[1,3]*A[3,1]*A[4,4] + A[1,1]*A[3,3]*A[4,4]) / determinant,
(A[1,4]*A[3,2]*A[4,1] - A[1,2]*A[3,4]*A[4,1] - A[1,4]*A[3,1]*A[4,2] + A[1,1]*A[3,4]*A[4,2] + A[1,2]*A[3,1]*A[4,4] - A[1,1]*A[3,2]*A[4,4]) / determinant,
(A[1,2]*A[3,3]*A[4,1] - A[1,3]*A[3,2]*A[4,1] + A[1,3]*A[3,1]*A[4,2] - A[1,1]*A[3,3]*A[4,2] - A[1,2]*A[3,1]*A[4,3] + A[1,1]*A[3,2]*A[4,3]) / determinant),
((A[1,3]*A[2,4]*A[4,2] - A[1,4]*A[2,3]*A[4,2] + A[1,4]*A[2,2]*A[4,3] - A[1,2]*A[2,4]*A[4,3] - A[1,3]*A[2,2]*A[4,4] + A[1,2]*A[2,3]*A[4,4]) / determinant,
(A[1,4]*A[2,3]*A[4,1] - A[1,3]*A[2,4]*A[4,1] - A[1,4]*A[2,1]*A[4,3] + A[1,1]*A[2,4]*A[4,3] + A[1,3]*A[2,1]*A[4,4] - A[1,1]*A[2,3]*A[4,4]) / determinant,
(A[1,2]*A[2,4]*A[4,1] - A[1,4]*A[2,2]*A[4,1] + A[1,4]*A[2,1]*A[4,2] - A[1,1]*A[2,4]*A[4,2] - A[1,2]*A[2,1]*A[4,4] + A[1,1]*A[2,2]*A[4,4]) / determinant,
(A[1,3]*A[2,2]*A[4,1] - A[1,2]*A[2,3]*A[4,1] - A[1,3]*A[2,1]*A[4,2] + A[1,1]*A[2,3]*A[4,2] + A[1,2]*A[2,1]*A[4,3] - A[1,1]*A[2,2]*A[4,3]) / determinant),
((A[1,4]*A[2,3]*A[3,2] - A[1,3]*A[2,4]*A[3,2] - A[1,4]*A[2,2]*A[3,3] + A[1,2]*A[2,4]*A[3,3] + A[1,3]*A[2,2]*A[3,4] - A[1,2]*A[2,3]*A[3,4]) / determinant,
(A[1,3]*A[2,4]*A[3,1] - A[1,4]*A[2,3]*A[3,1] + A[1,4]*A[2,1]*A[3,3] - A[1,1]*A[2,4]*A[3,3] - A[1,3]*A[2,1]*A[3,4] + A[1,1]*A[2,3]*A[3,4]) / determinant,
(A[1,4]*A[2,2]*A[3,1] - A[1,2]*A[2,4]*A[3,1] - A[1,4]*A[2,1]*A[3,2] + A[1,1]*A[2,4]*A[3,2] + A[1,2]*A[2,1]*A[3,4] - A[1,1]*A[2,2]*A[3,4]) / determinant,
(A[1,2]*A[2,3]*A[3,1] - A[1,3]*A[2,2]*A[3,1] + A[1,3]*A[2,1]*A[3,2] - A[1,1]*A[2,3]*A[3,2] - A[1,2]*A[2,1]*A[3,3] + A[1,1]*A[2,2]*A[3,3]) / determinant)
)
end
# Matrix
(*){T, M, N, O, K}(a::FixedMatrix{M, N, T}, b::FixedMatrix{O, K, T}) = throw(DimensionMismatch("$N != $O in $(typeof(a)) and $(typeof(b))"))
(*){T, M, N, O}(a::FixedMatrix{M, N, T}, b::FixedVector{O, T}) = throw(DimensionMismatch("$N != $O in $(typeof(a)) and $(typeof(b))"))
@generated function *{T, N}(a::FixedVector{N, T}, b::FixedMatrix{1, N, T})
expr = Expr(:tuple, [Expr(:tuple, [:(a[$i] * b[$j]) for i in 1:N]...) for j in 1:N]...)
:( Mat($(expr)) )
end
@generated function *{T, M, N}(a::Mat{M, N, T}, b::Vec{N,T})
expr = [:(bilindot(row(a, $i), b.(1))) for i=1:M]
:( Vec($(expr...)) )
end
@generated function *{T, M, N, R}(a::Mat{M, N, T}, b::Mat{N, R, T})
expr = Expr(:tuple, [Expr(:tuple, [:(bilindot(row(a, $i), column(b,$j))) for i in 1:M]...) for j in 1:R]...)
:( Mat($(expr)) )
end
function (==)(a::FixedVectorNoTuple, b::FixedVectorNoTuple)
s_a = size(a)
s_b = size(b)
s_a == s_b || return false
@inbounds for i = 1:length(a)
a[i] == b[i] || return false
end
true
end
(==)(a::FixedArray, b::FixedArray) = a.(1) == b.(1)
(==){R, T, FSA <: FixedVector}(a::FSA, b::Mat{R, 1, T}) = a.(1) == column(b,1)
(==){R, T, FSA <: FixedVector}(a::Mat{R, 1, T}, b::FSA) = column(a,1) == b.(1)
function (==)(a::FixedArray, b::AbstractArray)
s_a = size(a)
s_b = size(b)
s_a == s_b || return false
@inbounds for i = 1:length(a)
a[i] == b[i] || return false
end
true
end
(==)(a::AbstractArray, b::FixedArray) = b == a
# To support @test_approx_eq
Base.Test.approx_full(a::FixedArray) = a
# UniformScaling
*(J::Base.LinAlg.UniformScaling, A::FixedArray) = J.λ*A
*(A::FixedArray, J::Base.LinAlg.UniformScaling) = A*J.λ
/(A::FixedArray, J::Base.LinAlg.UniformScaling) = A/J.λ
+{m, n, T}(A::Mat{m,n, T}, J::Base.LinAlg.UniformScaling) = A + J.λ*eye(Mat{m,n,T})
+{m, n, T}(J::Base.LinAlg.UniformScaling, A::Mat{m,n, T}) = A + J
-{m, n, T}(A::Mat{m,n, T}, J::Base.LinAlg.UniformScaling) = A + (-J)
-{m, n, T}(J::Base.LinAlg.UniformScaling, A::Mat{m,n, T}) = J.λ*eye(Mat{m,n,T}) - A
|
{"hexsha": "0c741cbe282dad1cb49e07f184929ab7770c1d95", "size": 12662, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ops.jl", "max_stars_repo_name": "mschauer/FixedSizeArrays.jl", "max_stars_repo_head_hexsha": "3642f02d94620f09f13e868a2759187b653986a7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ops.jl", "max_issues_repo_name": "mschauer/FixedSizeArrays.jl", "max_issues_repo_head_hexsha": "3642f02d94620f09f13e868a2759187b653986a7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-10T13:35:41.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-10T13:35:41.000Z", "max_forks_repo_path": "src/ops.jl", "max_forks_repo_name": "mschauer/FixedSizeArrays.jl", "max_forks_repo_head_hexsha": "3642f02d94620f09f13e868a2759187b653986a7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4280701754, "max_line_length": 161, "alphanum_fraction": 0.4947875533, "num_tokens": 6167}
|
from fnal_column_analysis_tools.lookup_tools.lookup_base import lookup_base
import numpy as np
from awkward.array.jagged import JaggedArray
from copy import deepcopy
import numba
from numpy import sqrt,log
from numpy import maximum as max
from numpy import minimum as min
def numbaize(fstr, varlist):
"""
Convert function string to numba function
Supports only simple math for now
"""
lstr = "lambda %s: %s" % (",".join(varlist), fstr)
func = eval(lstr)
nfunc = numba.njit(func)
return nfunc
@numba.njit
def masked_bin_eval(dim1_indices, dimN_bins, dimN_vals):
dimN_indices = np.empty_like(dim1_indices)
for i in np.unique(dim1_indices):
dimN_indices[dim1_indices==i] = np.searchsorted(dimN_bins[i],dimN_vals[dim1_indices==i],side='right')
dimN_indices[dim1_indices==i] = min(dimN_indices[dim1_indices==i]-1,len(dimN_bins[i])-1)
dimN_indices[dim1_indices==i] = max(dimN_indices[dim1_indices==i]-1,0)
return dimN_indices
class jet_energy_corrector(lookup_base):
def __init__(self,formula,bins_and_orders,clamps_and_vars,parms_and_orders):
super(jet_energy_corrector,self).__init__()
self._dim_order = bins_and_orders[1]
self._bins = bins_and_orders[0]
self._eval_vars = clamps_and_vars[2]
self._eval_clamp_mins = clamps_and_vars[0]
self._eval_clamp_maxs = clamps_and_vars[1]
self._parm_order = parms_and_orders[1]
self._parms = parms_and_orders[0]
self._formula_str = formula
self._formula = numbaize(formula,self._parm_order+self._eval_vars)
for binname in self._dim_order[1:]:
binsaslists = self._bins[binname].tolist()
self._bins[binname] = [np.array(bins) for bins in binsaslists]
#get the jit to compile if we've got more than one bin dim
if len(self._dim_order) > 1:
masked_bin_eval(np.array([0]),self._bins[self._dim_order[1]],np.array([0.0]))
self._signature = deepcopy(self._dim_order)
for eval in self._eval_vars:
if eval not in self._signature:
self._signature.append(eval)
self._dim_args = {self._dim_order[i]:i for i in range(len(self._dim_order))}
self._eval_args = {}
for i,argname in enumerate(self._eval_vars):
self._eval_args[argname] = i + len(self._dim_order)
if argname in self._dim_args.keys():
self._eval_args[argname] = self._dim_args[argname]
def _evaluate(self,*args):
bin_vals = {argname:args[self._dim_args[argname]] for argname in self._dim_order}
eval_vals = {argname:args[self._eval_args[argname]] for argname in self._eval_vars}
#lookup the bins that we care about
dim1_name = self._dim_order[0]
dim1_indices = np.searchsorted(self._bins[dim1_name],bin_vals[dim1_name],side='right')
dim1_indices = np.clip(dim1_indices-1,0,self._bins[dim1_name].size-1)
bin_indices = [dim1_indices]
for binname in self._dim_order[1:]:
bin_indices.append(masked_bin_eval(bin_indices[0],self._bins[binname],bin_vals[binname]))
bin_tuple = tuple(bin_indices)
#get clamp values and clip the inputs
eval_values = []
for eval_name in self._eval_vars:
clamp_mins = self._eval_clamp_mins[eval_name][bin_tuple]
clamp_maxs = self._eval_clamp_maxs[eval_name][bin_tuple]
eval_values.append(np.clip(eval_vals[eval_name],clamp_mins,clamp_maxs))
#get parameter values
parm_values = [parm[bin_tuple] for parm in self._parms]
return self._formula(*tuple(parm_values+eval_values))
def __repr__(self):
out = 'binned dims: %s\n'%(self._dim_order)
out += 'eval vars : %s\n'%(self._eval_vars)
out += 'parameters : %s\n'%(self._parm_order)
out += 'formula : %s\n'%(self._formula_str)
out += 'signature : (%s)\n'%(','.join(self._signature))
return out
|
{"hexsha": "b680e138784a8069ca21fc08ab94784935033174", "size": 4067, "ext": "py", "lang": "Python", "max_stars_repo_path": "fnal_column_analysis_tools/lookup_tools/jet_energy_corrector.py", "max_stars_repo_name": "fnavarro94/fnal-column-analysis-tools", "max_stars_repo_head_hexsha": "81cf9153194f8594484a3c63df4edd0ee80d6fff", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fnal_column_analysis_tools/lookup_tools/jet_energy_corrector.py", "max_issues_repo_name": "fnavarro94/fnal-column-analysis-tools", "max_issues_repo_head_hexsha": "81cf9153194f8594484a3c63df4edd0ee80d6fff", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fnal_column_analysis_tools/lookup_tools/jet_energy_corrector.py", "max_forks_repo_name": "fnavarro94/fnal-column-analysis-tools", "max_forks_repo_head_hexsha": "81cf9153194f8594484a3c63df4edd0ee80d6fff", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8105263158, "max_line_length": 109, "alphanum_fraction": 0.6624047209, "include": true, "reason": "import numpy,from numpy,import numba", "num_tokens": 1067}
|
from __future__ import division, print_function
import numpy
import numba
from math import floor
@numba.jit(nopython=True)
def interpolation_search(x, z):
n = len(x)
assert n > 1
if z < x[1] or n == 2:
return 0
elif z >= x[-2]:
return n - 2
imin = 0
imax = n - 1
while imax - imin > 1:
s = (z - x[imin]) / (x[imax] - x[imin])
j = int(imin + floor((imax - imin) * s))
if z >= x[j + 1]:
imin = j + 1
elif z < x[j]:
imax = j
else:
return j
return imin
@numba.jit(nopython=True)
def lerp(y, theta):
return (1 - theta) * y[..., 0] + theta * y[..., 1]
class interp1d(object):
"""
Adapted from:
Fast multithreaded linear interpolation, 1D and 2D - Ver. 3.4
This versione: 15/06/2018
@author: Marco Maffezzoli, Universita Bocconi
http://didattica.unibocconi.it/mypage/upload/49183_20180615_035144_INTERPOLATION.PY
"""
def __init__(self, x_new, x):
(self._index, self._theta) = self._locate(x_new, x)
@staticmethod
@numba.guvectorize("(i8[:],f8[:],f8[:],f8[:])", "(m),(m),(n)->(m)")
def _linear(index, theta, y, y_new):
for (j, (i, t)) in enumerate(zip(index, theta)):
y_new[j] = lerp(y[i : i + 2], t)
def __call__(self, y):
return self._linear(self._index, self._theta, y)
@numba.guvectorize("(f8[:],f8[:],i8[:],f8[:])", "(),(n)->(),()")
def _locate(x_new, x, index, theta):
index[0] = i = interpolation_search(x, x_new[0])
theta[0] = (x_new[0] - x[i]) / (x[i + 1] - x[i])
|
{"hexsha": "deaa6ca49b757f01d2fc49e0a7b51af7e8c89296", "size": 1611, "ext": "py", "lang": "Python", "max_stars_repo_path": "foldedleastsquares/interpolation.py", "max_stars_repo_name": "martindevora/tls", "max_stars_repo_head_hexsha": "61811815d5b24dab5b28fb5a2263039711205541", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2019-01-09T11:20:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T18:39:20.000Z", "max_issues_repo_path": "foldedleastsquares/interpolation.py", "max_issues_repo_name": "martindevora/tls", "max_issues_repo_head_hexsha": "61811815d5b24dab5b28fb5a2263039711205541", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 99, "max_issues_repo_issues_event_min_datetime": "2018-10-17T07:49:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T14:50:12.000Z", "max_forks_repo_path": "foldedleastsquares/interpolation.py", "max_forks_repo_name": "martindevora/tls", "max_forks_repo_head_hexsha": "61811815d5b24dab5b28fb5a2263039711205541", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-01-28T21:06:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T23:30:59.000Z", "avg_line_length": 27.3050847458, "max_line_length": 87, "alphanum_fraction": 0.5412787089, "include": true, "reason": "import numpy,import numba", "num_tokens": 567}
|
import threading
import time
import numpy
from ..Player import *
from .Game import Game
from .GuiBase import GuiBase
class GameController(object):
"""
control the drawing logic of gobang and the move of player1 and player2 whether human player or AI player
"""
def __init__(self, player1: Player, player2: Player, game: Game, gui: GuiBase):
assert player1.get_chess_color() != player2.get_chess_color()
if player1.get_chess_color().is_black():
self._player = {Chess.BLACK: player1, Chess.WHITE: player2}
else:
self._player = {Chess.BLACK: player2, Chess.WHITE: player1}
self._game = game
self._gui = gui
self._thread = None
self._exit = False
def _move(self, x, y):
if self._gui is not None:
self._gui.draw_chess(ChessLocation(x, y, self._game.get_chess_now()))
self._game.move(x, y)
def _function(self):
"""
the function keep game proceed
"""
while not self._game.is_win() or self._exit:
ret = self._player[self._game.get_chess_now()].get_next(self._game.board)
if ret is None:
time.sleep(0.01)
continue
x, y = ret
self._move(x, y)
self._game.is_win()
self.stop()
def start(self):
self._refresh()
self._exit = False
if self._thread is None or not self._thread.isAlive():
self._thread = threading.Thread(target=self._function)
self._thread.start()
def _refresh(self):
if self._gui is None:
return
self._gui.clear()
self._gui.draw_board()
self._gui.draw_chesses(self._game.board.get_order())
def start_no_thread(self):
self._function()
def stop(self):
self._exit = True
def is_stop(self):
return self._exit
def back(self):
if self._player[self._game.get_chess_now().exchange()].is_auto():
self._game.back2steps()
else:
self._game.back()
# refresh
if self.is_stop():
self.start()
else:
self._refresh()
def clear(self):
self._game.clear()
self._refresh()
# refresh
if self.is_stop():
self.start()
else:
self._refresh()
def _need_draw_scores(self)->bool:
return isinstance(self._player[self._game.get_chess_now().exchange()], PlayerAI)
def _draw_scores(self):
"""
try to draw the scores's context to screen
:return: success to get the scores in AI player
"""
player = self._player[self._game.get_chess_now().exchange()]
if isinstance(player, PlayerAI):
scores = player.get_scores()
for chess_score in scores:
x, y, score = chess_score.x, chess_score.y, chess_score.score
if score == numpy.infty:
self._gui.draw_text(x, y, 'inf')
else:
self._gui.draw_text(x, y, '%d' % score)
def set_click_event(self, x, y):
"""
allocate the click event to human player
"""
player = self._player[self._game.get_chess_now()]
if isinstance(player, PlayerHuman):
player.set_touched_coordinate(x, y)
|
{"hexsha": "dc94642156e28504b8493c32de1e8fac8d4b1f52", "size": 2788, "ext": "py", "lang": "Python", "max_stars_repo_path": "Helper/Controller/GameController.py", "max_stars_repo_name": "jingege315/gobang", "max_stars_repo_head_hexsha": "983a0ce34dc2120464f42441ef6b190ef2433f08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-20T07:04:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-23T14:12:15.000Z", "max_issues_repo_path": "Helper/Controller/GameController.py", "max_issues_repo_name": "jingege315/gobang_alphazero", "max_issues_repo_head_hexsha": "983a0ce34dc2120464f42441ef6b190ef2433f08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Helper/Controller/GameController.py", "max_forks_repo_name": "jingege315/gobang_alphazero", "max_forks_repo_head_hexsha": "983a0ce34dc2120464f42441ef6b190ef2433f08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6725663717, "max_line_length": 106, "alphanum_fraction": 0.6947632712, "include": true, "reason": "import numpy", "num_tokens": 770}
|
# -*- coding: utf-8 -*-
#%%
from sklearn.model_selection import GridSearchCV
import numpy as np
import pandas as pd
#from sklearn import linear_model
import matplotlib.pyplot as plt
import fitting as ft
import preprocessing as pp
from datetime import datetime
# Import GBDT
from sklearn.ensemble import GradientBoostingClassifier
# Import Random Forest
from sklearn.ensemble import RandomForestClassifier
# Import CART
from sklearn import tree
#%%
data_ori = pd.read_csv("data/adult.data", low_memory=False, encoding=u'utf-8', header=None)
data_ori.columns = ["age",
"workclass",
"fnlwgt",
"education",
"education-num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capital-gain",
"capital-loss",
"hours-per-week",
"native-country",
"income"]
#%%
data_0 = data_ori.iloc[:100, :]
cat_features = ["workclass",
"education",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"native-country"]
con_features = ["age",
"fnlwgt",
"education-num",
"capital-gain",
"capital-loss",
"hours-per-week"]
label = ["income"]
params = dict()
params["con_features"] = con_features
params["cat_features"] = cat_features
params["label"] = label
params["n_disc"] = 5
params["frac"] = 0.7
params["sparse"] = False
params["dropNaN"] = True
params["way_disc"] = "equal_width"
#%%
X_norm, Y, onehot_names = pp.BuildFeatures(data_0, params)
dataset = pd.concat([X_norm, Y], axis=1)
features= list(X_norm.columns.values)
label = Y.columns.values[0]
frac = params["frac"]
X_train, Y_train, X_test, Y_test = ft.BuildDataSet(dataset
, features, label , frac )
#%%
gbdt1=ft.GBDT(X_train, Y_train, [300, 0.01, 3, 'auto'])
gbdt2=ft.GBDT(X_train, Y_train, [5, 0.5, 5, 'auto'])
rf1 = ft.RF(X_train, Y_train, [200, 5])
rf2 = ft.RF(X_train, Y_train, [50, 2])
toc = datetime.now()
elapsed_time = toc-tic
print("fitting elapsed time: " + str(elapsed_time))
models = [("gbdt1",gbdt1), ("gbdt2", gbdt2),
("rf1", rf1), ("rf2", rf2)]
pr_plt, metrics_roc = ft.eval_roc(models, X_test, Y_test)
pr_plt.show()
pr_plt, metrics_pr = ft.eval_pr(models, X_test, Y_test)
pr_plt.show()
best = ft.BestModel(metrics_roc)
toc = datetime.now()
elapsed_time = toc-tic
print("total elapsed time: " + str(elapsed_time))
#%%
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
gbdt_parameters = [{'n_estimators' : [20, 100],
'learning_rate' : [0.05, 0.1],
'max_depth' : [1, 6],
'max_features' : ['auto']}]
scores = ['roc_auc']
#%%
from sklearn.svm import SVC
svc_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10]}]
#scores = ['roc_auc']
score = 'roc_auc'
clf = GridSearchCV(SVC( probability=True),
svc_parameters, cv=3,
scoring= score ) #'%s_macro' % score)
clf.fit(X_train, Y_train)
print("Best parameters set found on development set:")
bestModel = clf.best_estimator_
bestParam = clf.best_params_
print(bestParam)
#%%
def GBDT(X_train, Y_train, params=None, model_dump=False) :
gbdt_parameters = [{'n_estimators' : [50, 100, 300, 600],
'learning_rate' : [0.05, 0.1, 0.5, 1],
'max_depth' : [1, 6, 20],
'max_features' : ['auto']}]
#scores = ['roc_auc']
score = 'roc_auc'
clf = GridSearchCV(GradientBoostingClassifier(),
gbdt_parameters, cv=5,
scoring= score ) #'%s_macro' % score)
clf.fit(X_train, Y_train)
print("Best parameters set found on development set:")
bestModel = clf.best_estimator_
bestParam = clf.best_params_
return bestModel, bestParam
'''
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = Y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
'''
#%%
def f(x) :
return x, x+1
x1 = f(5)
|
{"hexsha": "8e87a54fa0ea5e20aaca3d833e789411c958b594", "size": 4545, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/gridsearch_test.py", "max_stars_repo_name": "rexzhang2014/Easi-ML", "max_stars_repo_head_hexsha": "5ff084b81b2c516d0ebea75f1dc0db1ebcb775db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-01-29T08:41:23.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-29T08:41:23.000Z", "max_issues_repo_path": "test/gridsearch_test.py", "max_issues_repo_name": "rexzhang2014/Easi-ML", "max_issues_repo_head_hexsha": "5ff084b81b2c516d0ebea75f1dc0db1ebcb775db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/gridsearch_test.py", "max_forks_repo_name": "rexzhang2014/Easi-ML", "max_forks_repo_head_hexsha": "5ff084b81b2c516d0ebea75f1dc0db1ebcb775db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-04-04T04:24:31.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-11T09:01:51.000Z", "avg_line_length": 25.8238636364, "max_line_length": 91, "alphanum_fraction": 0.6224422442, "include": true, "reason": "import numpy", "num_tokens": 1297}
|
import json
import os.path
import pickle
import traceback
from typing import List, Set, Tuple
import click
import mdtraj
import numpy
from nagl.utilities.toolkits import stream_from_file
from openff.toolkit.topology import Molecule
from openff.toolkit.utils import (
OpenEyeToolkitWrapper,
ToolkitRegistry,
UndefinedStereochemistryError,
)
from openff.utilities import temporary_cd
from qcelemental.models.common_models import DriverEnum, Model
from qcelemental.models.molecule import Molecule as QCMolecule
from qcelemental.models.procedures import (
OptimizationInput,
OptimizationResult,
QCInputSpecification,
)
from qcelemental.molutil import guess_connectivity
from qcengine import compute_procedure
from simtk import unit
from tqdm import tqdm
def _find_h_bonds(molecule: Molecule, conformer: unit.Quantity) -> Set[Tuple[int, int]]:
conformer = conformer.value_in_unit(unit.nanometers).tolist()
mdtraj_topology = mdtraj.Topology.from_openmm(molecule.to_topology().to_openmm())
mdtraj_trajectory = mdtraj.Trajectory(
numpy.array([conformer]) * unit.nanometers, mdtraj_topology
)
h_bonds = mdtraj.baker_hubbard(mdtraj_trajectory, freq=0.0, periodic=False)
return {(h_index, a_index) for _, h_index, a_index in h_bonds}
def _validate_optimization(
molecule: Molecule,
initial_connectivity: Set[Tuple[int, int]],
initial_h_bonds: Set[Tuple[int, int]],
final_molecule: QCMolecule,
) -> bool:
smiles = molecule.to_smiles(explicit_hydrogens=False)
final_connectivity = {
tuple(sorted(connection))
for connection in guess_connectivity(
final_molecule.symbols, final_molecule.geometry
)
}
if initial_connectivity != final_connectivity:
print(
f"connectivity of {smiles} changed - "
f"old={initial_connectivity} new={final_connectivity}"
)
return False
final_geometry = final_molecule.geometry.reshape(-1, 3) * unit.bohr
final_h_bonds = _find_h_bonds(molecule, final_geometry)
if initial_h_bonds != final_h_bonds:
print(f"h-bonding has changed - old={initial_h_bonds} new={final_h_bonds}")
return False
return True
def _generate_conformers(
smiles: str,
n_conformers: int,
qc_settings: List[Tuple[str, str, str]],
n_processes: int,
memory: int,
) -> unit.Quantity:
toolkit_registry = ToolkitRegistry([OpenEyeToolkitWrapper()])
# 1. Generate a diverse set of ELF conformers.
try:
off_molecule: Molecule = Molecule.from_smiles(smiles)
except UndefinedStereochemistryError:
off_molecule: Molecule = Molecule.from_smiles(
smiles, allow_undefined_stereo=True
)
stereoisomers = off_molecule.enumerate_stereoisomers(
undefined_only=True, max_isomers=1
)
if len(stereoisomers) > 0:
off_molecule = stereoisomers[0]
off_molecule.generate_conformers(
n_conformers=500,
rms_cutoff=0.5 * unit.angstrom,
toolkit_registry=toolkit_registry,
)
off_molecule.apply_elf_conformer_selection(toolkit_registry=toolkit_registry)
initial_connectivity = {
tuple(sorted([bond.atom1_index, bond.atom2_index]))
for bond in off_molecule.bonds
}
final_conformers = []
for i, conformer in enumerate(
tqdm(off_molecule.conformers[:n_conformers], desc="CONF")
):
initial_h_bonds = _find_h_bonds(off_molecule, conformer)
initial_molecule = off_molecule.to_qcschema(conformer=i)
current_molecule = initial_molecule
# 2. minimize the conformer using the requested QC settings.
for program, method, basis in qc_settings:
optimization_input = OptimizationInput(
keywords={
"program": program,
"coordsys": "dlc",
"convergence_set": "GAU_LOOSE",
"maxiter": 300,
},
input_specification=QCInputSpecification(
model=Model(method=method, basis=basis),
driver=DriverEnum.gradient,
),
initial_molecule=current_molecule,
)
# noinspection PyTypeChecker
result: OptimizationResult = compute_procedure(
optimization_input,
"geometric",
raise_error=True,
local_options={
"ncores": n_processes,
"nnodes": 1,
"jobs_per_node": 1,
"memory": memory,
},
)
final_molecule: QCMolecule = result.trajectory[-1].molecule
current_molecule = final_molecule
is_valid = _validate_optimization(
off_molecule, initial_connectivity, initial_h_bonds, final_molecule
)
if not is_valid:
tqdm.write(f"conformer {i} did not validate")
continue
final_conformers.append(final_molecule.geometry.reshape(-1, 3))
return final_conformers * unit.bohr
@click.command()
@click.option(
"--input",
"input_path",
help="The path to the input molecules. This should either be an SDF, a GZipped "
"SDF, or a SMI file.",
type=click.Path(exists=True, file_okay=True, dir_okay=False),
required=True,
)
@click.option(
"--output",
"output_directory",
help="The directory to store the output in.",
type=click.Path(exists=False, file_okay=False, dir_okay=True),
required=True,
)
@click.option(
"--n-conformers",
type=int,
default=5,
help="The maximum number of ELF conformers to generate per molecule.",
show_default=True,
)
@click.option(
"--qc-settings",
help="Settings that describe the program, method, and basis to use when minimizing "
"each conformer.",
type=(str, str, str),
default=("psi4", "hf", "6-31G*"),
show_default=True,
)
@click.option(
"--batch-size",
type=int,
default=128,
help="The size of the batch to compute.",
show_default=True,
)
@click.option(
"--batch-idx",
"batch_index",
type=int,
default=0,
help="The (zero-based) index of the batch to compute.",
show_default=True,
)
@click.option(
"--n-processes",
type=int,
default=16,
help="The number of processes to parallelize psi4 across.",
show_default=True,
)
@click.option(
"--memory",
type=int,
default=256,
help="The maximum memery available to psi4 in GiB.",
show_default=True,
)
def main(
input_path: str,
output_directory: str,
n_conformers: int,
qc_settings: Tuple[str, str, str],
batch_size: int,
batch_index: int,
n_processes: int,
memory: int,
):
smiles_list = [*stream_from_file(input_path, as_smiles=True)]
smiles_list = smiles_list[batch_index * batch_size : (batch_index + 1) * batch_size]
qc_settings = [qc_settings]
completed, failed = [], []
os.makedirs(output_directory, exist_ok=True)
with temporary_cd(output_directory):
if os.path.isfile(f"completed-{batch_index}.pkl"):
with open(f"completed-{batch_index}.pkl", "rb") as file:
completed = pickle.load(file)
tqdm.write(f"{len(completed)} SMILES already complete")
for smiles, _ in completed:
assert smiles in smiles_list
smiles_list.remove(smiles)
for smiles in tqdm(smiles_list, desc="SMILES"):
try:
conformers = _generate_conformers(
smiles, n_conformers, qc_settings, n_processes, memory
)
completed.append((smiles, conformers))
except BaseException as e:
failed.append(
(
smiles,
"\n".join(
traceback.format_exception(type(e), e, e.__traceback__)
),
)
)
with open(f"completed-{batch_index}.pkl", "wb") as file:
pickle.dump(completed, file)
with open(f"failed-{batch_index}.json", "w") as file:
json.dump(failed, file)
if __name__ == "__main__":
main()
|
{"hexsha": "2af245003ce74282fac72f0b323e85f41a7f27f8", "size": 8363, "ext": "py", "lang": "Python", "max_stars_repo_path": "data-set-curation/qc-esp/generate-conformers.py", "max_stars_repo_name": "SimonBoothroyd/gnn-charge-models", "max_stars_repo_head_hexsha": "5342a175fac878629dcd93e617410b64447b4398", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data-set-curation/qc-esp/generate-conformers.py", "max_issues_repo_name": "SimonBoothroyd/gnn-charge-models", "max_issues_repo_head_hexsha": "5342a175fac878629dcd93e617410b64447b4398", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data-set-curation/qc-esp/generate-conformers.py", "max_forks_repo_name": "SimonBoothroyd/gnn-charge-models", "max_forks_repo_head_hexsha": "5342a175fac878629dcd93e617410b64447b4398", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7388316151, "max_line_length": 88, "alphanum_fraction": 0.6301566424, "include": true, "reason": "import numpy", "num_tokens": 1932}
|
import numpy as np
positions = np.loadtxt("input.txt", dtype=int, delimiter=",")
part1_fuel = np.abs(
np.tile(positions, (positions.size, 1))
- np.arange(1, positions.size + 1).reshape(-1, 1)
)
part2_fuel = part1_fuel * (part1_fuel + 1) // 2
print("Part 1:", part1_fuel.sum(axis=1).min())
print("Part 2:", part2_fuel.sum(axis=1).min())
|
{"hexsha": "224082ab534f85ebfe5062be87315e6abb90f99a", "size": 347, "ext": "py", "lang": "Python", "max_stars_repo_path": "aoc-2021/day-07/day_7.py", "max_stars_repo_name": "bsamseth/advent-of-code-2018", "max_stars_repo_head_hexsha": "bdd3969e61fac3f1543c20983260aa9fda912e99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-03T17:09:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-03T17:09:13.000Z", "max_issues_repo_path": "aoc-2021/day-07/day_7.py", "max_issues_repo_name": "bsamseth/advent-of-code-2018", "max_issues_repo_head_hexsha": "bdd3969e61fac3f1543c20983260aa9fda912e99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-01T23:09:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T01:08:40.000Z", "max_forks_repo_path": "aoc-2021/day-07/day_7.py", "max_forks_repo_name": "bsamseth/advent-of-code", "max_forks_repo_head_hexsha": "65d40548057a86b6fda37aec8e7d5d473d627124", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6923076923, "max_line_length": 61, "alphanum_fraction": 0.6570605187, "include": true, "reason": "import numpy", "num_tokens": 119}
|
struct SemCor{S}
filepaths::Vector{S}
end
function SemCor(dirpath)
@assert(isdir(dirpath), dirpath)
if "brown1" ∈ readdir(dirpath) && "brown2" ∈ readdir(dirpath)
tagfile_paths = joinpath.(dirpath, ["brown1", "brown2"], "tagfiles")
paths = joinpath.(tagfile_paths[1], readdir(tagfile_paths[1]))
append!(paths, joinpath.(tagfile_paths[2], readdir(tagfile_paths[2])))
elseif "tagfiles" ∈ readdir(dirpath)
paths = joinpath.(dirpath,"tagfiles", readdir(joinpath(dirpath, "tagfiles")))
else
paths = joinpath.(dirpath, readdir(dirpath))
end
SemCor(paths)
end
SemCor() = SemCor(datadep"SemCor 3.0")
MultiResolutionIterators.levelname_map(::Type{SemCor}) = [
:doc=>1, :contextfile=>1, :context=>1,
:para=>2, :paragraph=>2,
:sent=>3, :sentence=>3,
:word=>4, :token=>4,
:char=>5, :character=>5
]
function parse_sense_annotated_word(line::AbstractString)
captures = match(r"<wf cmd=done.* pos=(.*) lemma=(.*) wnsn=(.*) lexsn=(\d.*:\d*).*>(.*).*</wf>", line).captures
pos, lemma, wnsn, lexsn, word = captures
if ';' in wnsn
# Discard Extra Senses
wnsn = split(wnsn, ';') |> first
lexsn = split(lexsn, ';') |> first
end
SenseAnnotatedWord(pos, lemma, parse(Int, wnsn), lexsn, word)
end
function parse_tagged_word(line::AbstractString)
captures = match(r"<wf cmd=.* pos=(.*).*>(.*)</wf>", line).captures
PosTaggedWord(captures...)
end
function parse_punc(line::AbstractString)
captures = match(r"<punc>(.*)</punc>", line).captures
PosTaggedWord("PUNC", first(captures))
end
function parse_semcorfile(filename)
local sent
local para
paras = @NestedVector(TaggedWord,3)()
context = Document(intern(basename(filename)), paras)
# structure
function new_paragraph(line)
para = @NestedVector(TaggedWord,2)()
push!(paras, para)
end
function new_sentence(line)
sent = @NestedVector(TaggedWord,1)()
push!(para, sent)
end
# words
get_tagged(line) = push!(sent, parse_tagged_word(line))
get_sense_annotated(line) = push!(sent, parse_sense_annotated_word(line))
get_punc(line) = push!(sent, parse_punc(line))
# Parse
subparsers = [
"<wf cmd=tag"=> get_tagged,
"<wf cmd=ignore"=> get_tagged,
"<wf cmd=done"=> line -> contains(line,"lemma=") ? get_sense_annotated(line) : get_tagged(line),
"<punc>"=> get_punc,
"<context"=> ignore,
"</context" => ignore,
"<p" => new_paragraph,
"</p" => ignore,
"<s" => new_sentence,
"</s" => ignore
]
apply_subparsers(filename,subparsers)
return context
end
function load(corpus::SemCor, doc_buffersize=16)
Channel(;ctype=Document{@NestedVector(TaggedWord, 3), String}, csize=doc_buffersize) do ch
for fn in corpus.filepaths
doc = parse_semcorfile(fn)
put!(ch, doc)
end
end
end
|
{"hexsha": "799cf382b4258cc173cab60699683884510f5afc", "size": 3077, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SemCor.jl", "max_stars_repo_name": "Ayushk4/CorpusLoaders.jl", "max_stars_repo_head_hexsha": "2bb7d7e0b4a04fc534f6e4900f4e9a2b04c30945", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/SemCor.jl", "max_issues_repo_name": "Ayushk4/CorpusLoaders.jl", "max_issues_repo_head_hexsha": "2bb7d7e0b4a04fc534f6e4900f4e9a2b04c30945", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/SemCor.jl", "max_forks_repo_name": "Ayushk4/CorpusLoaders.jl", "max_forks_repo_head_hexsha": "2bb7d7e0b4a04fc534f6e4900f4e9a2b04c30945", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0283018868, "max_line_length": 115, "alphanum_fraction": 0.6018849529, "num_tokens": 877}
|
import math
import numpy as np
from tqdm import tqdm
from scalesim.scale_config import scale_config as cfg
class systolic_compute_is:
def __init__(self):
# Params set by user
self.config = cfg()
self.ifmap_op_mat = np.zeros((1, 1))
self.ofmap_op_mat = np.zeros((1, 1))
self.filter_op_mat = np.zeros((1, 1))
# Derived parameters
self.Sr = 0
self.Sc = 0
self.T = 0
self.arr_row = 0
self.arr_col = 0
self.row_fold = 1
self.col_fold = 1
# Generated matrices
self.ifmap_op_mat_trans = np.zeros((1,1))
self.ifmap_prefetch_matrix = np.zeros((1,1))
self.filter_prefetch_matrix = np.zeros((1,1))
self.ifmap_demand_matrix = np.zeros((1,1))
self.ofmap_demand_matrix = np.zeros((1,1))
self.filter_demand_matrix = np.zeros((1,1))
# Generated metrics
self.ifmap_reads = 0
self.filter_reads = 0
self.ofmap_writes = 0
self.mapping_efficiency_per_fold = []
self.compute_utility_per_fold = []
# Flags
self.params_set_flag = False
self.prefetch_mat_ready_flag = False
self.demand_mat_ready_flag = False
#
def set_params(self,
config_obj=cfg(),
ifmap_op_mat = np.zeros((1,1)),
ofmap_op_mat = np.zeros((1,1)),
filter_op_mat = np.zeros((1,1))
):
self.config = config_obj
self.ifmap_op_mat = ifmap_op_mat
self.filter_op_mat = filter_op_mat
self.ofmap_op_mat = ofmap_op_mat
self.ifmap_op_mat_trans = np.transpose(self.ifmap_op_mat)
ifmap_col = self.ifmap_op_mat.shape[1]
filter_row= self.filter_op_mat.shape[0]
assert ifmap_col == filter_row, "Dimension mismatch between operands"
self.Sr = self.ifmap_op_mat.shape[1]
self.Sc = self.ifmap_op_mat.shape[0]
self.T = self.filter_op_mat.shape[1]
self.arr_row, self.arr_col = self.config.get_array_dims()
self.row_fold = math.ceil(self.Sr / self.arr_row)
self.col_fold = math.ceil(self.Sc / self.arr_col)
self.params_set_flag = True
#
def create_prefetch_matrices(self):
assert self.params_set_flag, 'Parameters are not set'
self.create_ifmap_prefetch_mat()
self.create_filter_prefetch_mat()
self.prefetch_mat_ready_flag = True
#
def create_ifmap_prefetch_mat(self):
assert self.params_set_flag, 'Parameters are not set'
for fc in range(self.col_fold):
start_col_idx = fc * self.arr_col
end_col_idx = min(start_col_idx + self.arr_col, self.Sc)
delta = self.arr_col - (end_col_idx - start_col_idx)
this_fold_prefetch = self.ifmap_op_mat_trans[:,start_col_idx: end_col_idx]
#If there is under utilization, fill them with null requests
if delta > 0:
null_req_mat = np.ones((self.Sr, delta)) * -1
this_fold_prefetch = np.concatenate((this_fold_prefetch, null_req_mat), axis=1)
if fc == 0:
self.ifmap_prefetch_matrix = this_fold_prefetch
else:
self.ifmap_prefetch_matrix = np.concatenate((self.ifmap_prefetch_matrix, this_fold_prefetch), axis=0)
# Note: ISSUE #15: no skewing happens in the IFMAP for IS so this issue does not apply.
#
def create_filter_prefetch_mat(self):
assert self.params_set_flag, 'Parameters are not set'
for fr in range(self.row_fold):
row_start_id = fr * self.arr_row
row_end_id = min(row_start_id + self.arr_row, self.Sr)
delta = self.arr_row - (row_end_id - row_start_id)
this_fold_prefetch = self.filter_op_mat[row_start_id:row_end_id, :]
this_fold_prefetch = np.transpose(this_fold_prefetch)
if delta > 0:
null_req_mat = np.ones((self.T, delta)) * -1
this_fold_prefetch = np.concatenate((this_fold_prefetch, null_req_mat), axis=1)
if fr == 0:
self.filter_prefetch_matrix = this_fold_prefetch
else:
self.filter_prefetch_matrix = np.concatenate((self.filter_prefetch_matrix, this_fold_prefetch), axis=0)
# Fixing ISSUE #15, #16
# Roll out the matrices along the diagonal to account for temporal locality when there is a skew in demand
M, N = self.filter_prefetch_matrix.shape
num_elems = M * N
num_diags = M + N
prefetches = np.zeros((1, num_elems))
idx = 0
pbar = tqdm(total=M * N, disable=True)
# print('DEBUG: Total = ' + str(num_elems) + ' Diags = ' + str(num_diags))
for diag_id in range(num_diags):
max_row_id = min(diag_id, M - 1)
min_row_id = max(0, diag_id - N + 1)
valid_rows = max_row_id - min_row_id + 1
for offset in range(valid_rows):
row_id = max_row_id - offset
col_id = diag_id - row_id
elem = self.filter_prefetch_matrix[row_id][col_id]
prefetches[0, idx] = elem
idx += 1
pbar.update(1)
pbar.close()
self.filter_prefetch_matrix = prefetches
#
def create_demand_matrices(self):
assert self.params_set_flag, 'Parameters are not set'
self.create_ifmap_demand_mat()
self.create_filter_demand_mat()
self.create_ofmap_demand_mat()
assert self.ifmap_demand_matrix.shape[0] == self.filter_demand_matrix.shape[0], 'IFMAP and Filter demands out of sync'
assert self.ofmap_demand_matrix.shape[0] == self.filter_demand_matrix.shape[0], 'OFMAP and Filter demands out of sync'
assert self.ifmap_demand_matrix.shape[1] == self.arr_col, 'IFMAP demands exceed the rows'
assert self.filter_demand_matrix.shape[1] == self.arr_row,'Filter demands exceed the cols'
assert self.ofmap_demand_matrix.shape[1] == self.arr_col, 'OFMAP demands exceed the cols'
self.demand_mat_ready_flag = True
#
def create_ifmap_demand_mat(self):
assert self.params_set_flag, 'Parameters are not set'
inter_fold_gap_suffix = self.arr_row + self.arr_col + self.T - 2
inter_fold_gap_suffix_mat = np.ones((inter_fold_gap_suffix, self.arr_col)) * -1
for fc in range(self.col_fold):
for fr in range(self.row_fold):
row_start_id = fr * self.arr_row
row_end_idx = min(row_start_id + self.arr_row, self.Sr)
row_delta = self.arr_row - (row_end_idx - row_start_id)
col_start_id = fc * self.arr_col
col_end_idx = min(col_start_id + self.arr_col, self.Sc)
col_delta = self.arr_col - (col_end_idx - col_start_id)
# Indexing the cols with row start and row end idx are correct
# See the comment on ifmap_prefetch generation
this_fold_demand = self.ifmap_op_mat_trans[row_start_id:row_end_idx, col_start_id: col_end_idx]
self.ifmap_reads += this_fold_demand.shape[0] * this_fold_demand.shape[1]
# Take into account under utilization
if col_delta > 0:
null_req_mat = np.ones((this_fold_demand.shape[0], col_delta)) * -1
this_fold_demand = np.concatenate((this_fold_demand, null_req_mat), axis=1)
if row_delta > 0:
null_req_mat = np.ones((row_delta, self.arr_col)) * -1
this_fold_demand = np.concatenate((this_fold_demand, null_req_mat), axis=0)
# The IFMAP elems are needed to be filled in reverse order to ensure that
# top element is pushed in last to maintain alignment with the input elements
this_fold_demand = np.flip(this_fold_demand, 0)
# Account for the cycles for partial sum generation and accumulation
this_fold_demand = np.concatenate((this_fold_demand, inter_fold_gap_suffix_mat), axis=0)
# Calculate the mapping efficiency
row_used = min(self.arr_row, row_end_idx - row_start_id)
col_used = min(self.arr_col, col_end_idx - col_start_id)
mac_used = row_used * col_used
mapping_eff_this_fold = mac_used / (self.arr_row * self.arr_col)
cycles_this_fold = this_fold_demand.shape[0] + this_fold_demand.shape[1] - 1
compute_cycles_this_fold = mac_used * self.T
compute_util_this_fold = compute_cycles_this_fold / (self.arr_row * self.arr_col * cycles_this_fold)
self.mapping_efficiency_per_fold.append(mapping_eff_this_fold)
self.compute_utility_per_fold.append(compute_util_this_fold)
if fr == 0 and fc == 0:
self.ifmap_demand_matrix = this_fold_demand
else:
self.ifmap_demand_matrix = np.concatenate((self.ifmap_demand_matrix, this_fold_demand), axis=0)
# Skew is not needed in IFMAP for IS
#
def create_filter_demand_mat(self):
assert self.params_set_flag, 'Parameters are not set'
inter_fold_gap_prefix = self.arr_row
inter_fold_gap_prefix_mat = np.ones((inter_fold_gap_prefix, self.arr_row)) * -1
inter_fold_gap_suffix = self.arr_col - 1
inter_fold_gap_suffix_mat = np.ones((inter_fold_gap_suffix, self.arr_row)) * -1
for fc in range(self.col_fold):
for fr in range(self.row_fold):
row_start_id = fr * self.arr_row
row_end_idx = min(row_start_id + self.arr_row, self.Sr)
delta = self.arr_row - (row_end_idx - row_start_id)
# Indexing the cols with row start and row end idx are correct
# See the comment on ifmap_prefetch generation
this_fold_demand = self.filter_op_mat[row_start_id: row_end_idx, :]
this_fold_demand = np.transpose(this_fold_demand)
self.filter_reads += this_fold_demand.shape[0] * this_fold_demand.shape[1]
# Take into account under utilization
if delta > 0:
null_req_mat = np.ones((self.T, delta)) * -1
this_fold_demand = np.concatenate((this_fold_demand, null_req_mat), axis=1)
# Account for the cycles for weights to load
this_fold_demand = np.concatenate((inter_fold_gap_prefix_mat, this_fold_demand), axis=0)
# Account for the cycles for final output to drain out
this_fold_demand = np.concatenate((this_fold_demand, inter_fold_gap_suffix_mat), axis=0)
# Add skew to the IFMAP demand matrix to reflect systolic pipeline fill
this_fold_demand = skew_matrix(this_fold_demand)
if fr == 0 and fc == 0:
self.filter_demand_matrix = this_fold_demand
else:
self.filter_demand_matrix = np.concatenate((self.filter_demand_matrix, this_fold_demand), axis=0)
# END of filter demand generation
#
def create_ofmap_demand_mat(self):
assert self.params_set_flag, 'Parameters are not set'
inter_fold_gap_prefix = 2 * self.arr_row - 1
inter_fold_gap_prefix_mat = np.ones((inter_fold_gap_prefix, self.arr_col)) * -1
for fc in range(self.col_fold):
for fr in range(self.row_fold):
col_start_id = fc * self.arr_col
col_end_idx = min(col_start_id + self.arr_col, self.Sc)
col_delta = self.arr_col - (col_end_idx - col_start_id)
this_fold_demand = self.ofmap_op_mat[col_start_id: col_end_idx, :]
this_fold_demand = np.transpose(this_fold_demand)
self.ofmap_writes = this_fold_demand.shape[0] * this_fold_demand.shape[1]
# Adding null requests when there is under utilization ie. no mapping along a few rows or cols
if col_delta > 0:
null_req_mat = np.ones((self.T, col_delta)) * -1
this_fold_demand = np.concatenate((this_fold_demand, null_req_mat), axis=1)
# Now add the prefix matrix
# These are the null demands to account for when the operands are streamed in
# and the OFMAPS are not ready
this_fold_demand = np.concatenate((inter_fold_gap_prefix_mat, this_fold_demand), axis=0)
# Add skew to the OFMAP demand matrix to reflect systolic pipeline fill
this_fold_demand = skew_matrix(this_fold_demand)
if fr == 0 and fc == 0:
self.ofmap_demand_matrix = this_fold_demand
else:
self.ofmap_demand_matrix = np.concatenate((self.ofmap_demand_matrix, this_fold_demand), axis=0)
# END of OFMAP demand generation
#
def get_ifmap_prefetch_mat(self):
if not self.prefetch_mat_ready_flag:
self.create_prefetch_matrices()
return self.ifmap_prefetch_matrix
#
def get_filter_prefetch_mat(self):
if not self.prefetch_mat_ready_flag:
self.create_prefetch_matrices()
return self.filter_prefetch_matrix
#
def get_prefetch_matrices(self):
if not self.prefetch_mat_ready_flag:
self.create_prefetch_matrices()
return self.ifmap_prefetch_matrix, self.filter_prefetch_matrix
#
def get_ifmap_demand_mat(self):
if not self.demand_mat_ready_flag:
self.create_demand_matrices()
return self.ifmap_demand_matrix
#
def get_filter_demand_mat(self):
if not self.demand_mat_ready_flag:
self.create_demand_matrices()
return self.filter_demand_matrix
#
def get_ofmap_demand_mat(self):
if not self.demand_mat_ready_flag:
self.create_demand_matrices()
return self.ofmap_demand_matrix
#
def get_demand_matrices(self):
if not self.demand_mat_ready_flag:
self.create_demand_matrices()
return self.ifmap_demand_matrix, self.filter_demand_matrix, self.ofmap_demand_matrix
#
def get_avg_mapping_efficiency(self):
assert self.demand_mat_ready_flag, 'Computes not ready yet'
agg = sum(self.mapping_efficiency_per_fold)
num = len(self.mapping_efficiency_per_fold)
avg_mapping_eff = agg / num
return avg_mapping_eff
#
def get_avg_compute_utilization(self):
assert self.demand_mat_ready_flag, 'Computes not ready yet'
agg = sum(self.compute_utility_per_fold)
num = len(self.compute_utility_per_fold)
avg_compute_util = agg / num
return avg_compute_util
#
def get_ifmap_requests(self):
assert self.demand_mat_ready_flag, 'Computes not ready yet'
return self.ifmap_reads
#
def get_filter_requests(self):
assert self.demand_mat_ready_flag, 'Computes not ready yet'
return self.filter_reads
#
def get_ofmap_requests(self):
assert self.demand_mat_ready_flag, 'Computes not ready yet'
return self.ofmap_writes
#
def skew_matrix(input_matrix_np):
rows = input_matrix_np.shape[0]
cols = input_matrix_np.shape[1]
out_matrix_np = np.zeros((1,1))
for c in range(cols):
if c == 0:
down_padding = -1 * np.ones((cols-1, 1))
mat_col = input_matrix_np[:,c].reshape((rows,1))
out_matrix_np = np.concatenate((mat_col, down_padding), axis=0)
else:
if c == cols -1:
up_padding = -1 * np.ones((cols-1, 1))
mat_col = input_matrix_np[:, c].reshape((rows, 1))
this_col = np.concatenate((up_padding, mat_col), axis=0)
out_matrix_np = np.concatenate((out_matrix_np, this_col), axis=1)
else:
up_padding = -1 * np.ones((c, 1))
mat_col = input_matrix_np[:, c].reshape((rows, 1))
down_padding = -1 * np.ones((cols - c-1, 1))
this_col = np.concatenate((up_padding, mat_col, down_padding), axis=0)
out_matrix_np = np.concatenate((out_matrix_np, this_col), axis=1)
return out_matrix_np
|
{"hexsha": "c5531a3f81593868108e303adab0919df78ad2f0", "size": 16498, "ext": "py", "lang": "Python", "max_stars_repo_path": "version2/compute/systolic_compute_is.py", "max_stars_repo_name": "KangSK-KAIST/SCALE-Sim", "max_stars_repo_head_hexsha": "349dd7b7053ea5bac482183597d2c403ad061560", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "version2/compute/systolic_compute_is.py", "max_issues_repo_name": "KangSK-KAIST/SCALE-Sim", "max_issues_repo_head_hexsha": "349dd7b7053ea5bac482183597d2c403ad061560", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "version2/compute/systolic_compute_is.py", "max_forks_repo_name": "KangSK-KAIST/SCALE-Sim", "max_forks_repo_head_hexsha": "349dd7b7053ea5bac482183597d2c403ad061560", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1898148148, "max_line_length": 126, "alphanum_fraction": 0.6249242332, "include": true, "reason": "import numpy", "num_tokens": 3755}
|
__doc__ = """
"""
__author__ = "Rui Campos"
print("Importing `.materials.electron.makeGOS`")
from numpy import *
from scipy.optimize import fsolve
ELECTRON_REST_MASS = 0.51099895000e6
#taken from PENELOPE
EXCITATION = [19.2, 41.8, 40.0, 63.7,
76.0,81.0,82.0, 95.0, 115.0, 137.0, 149.0, 156.0, 166.0, 173.0, 173.0, 180.0, 174.0, 188.0, 190.0, 191.0, 216.0,
233.0, 245.0, 257.0, 272.0, 286.0, 297.0, 311.0, 322.0,
330.0, 334.0, 350.0, 347.0, 348.0, 343.0, 352.0, 363.0,
366.0, 379.0, 393.0, 417.0, 424.0, 428.0, 441.0, 449.0,
470.0, 470.0, 469.0, 488.0, 488.0, 487.0, 485.0, 491.0,
482.0, 488.0, 491.0, 501.0, 523.0, 535.0, 546.0, 560.0,
574.0, 580.0, 591.0, 614.0, 628.0, 650.0,
658.0, 674.0, 684.0, 694.0, 705.0, 718.0, 727.0, 736.0, 746.0, 757.0, 790.0, 790.0, 800.0, 810.0, 823.0,
823.0, 830.0, 825.0, 794.0, 827.0, 826.0, 841.0, 847.0, 878.0, 890.0, 902.0, 921.0, 934.0, 939.0, 952.0, 966.0, 980.0]
def getData(Z):
from MontyCarlo.settings import __montecarlo__
from MontyCarlo.tools.data import getAxis, getTable
file_name = str(Z) + ".txt"
file_path = __montecarlo__/'materials'/'EADL'/file_name
del file_name
path = str(file_path)
del file_path
# directory = str(__materials__)
#path = directory + "\\EADL\\" + str(Z) + ".txt"
with open(path, "r") as file:
text = file.readlines()
text = [line.strip('\n') for line in text]
bookmarks = [0]
for n, line in enumerate(text):
if line == " 1":
bookmarks += [n + 1]
#gather all bookmarked text into a dict
bookmark_ = bookmarks[0:-1]
_ookmarks = bookmarks[1:]
line0 = text[0]
Z = int(line0[0:3])
Aw = float(line0[13:24])
bookmarked_text = {}
for i, j in zip(bookmark_, _ookmarks):
line1, line2 = text[i], text[i+1]
#on line 1
Yi = float(line1[7:9]) #particle identifier
Yo = float(line1[10:12]) #secondary particle designator
#on line 2
C = float(line2[0:2]) #reaction descriptor
I = float(line2[2:5]) #reaction property
S = float(line2[5:8]) #reaction modifier
X1 = float(line2[22:32]) #subshell designator
flags = (Yi, C, S, X1, Yo, I)
flags = tuple(map(int, flags))
bookmarked_text[flags] = text[i+2:j-1]
data = {Id:getTable(bookmarked_text[Id]) for Id in bookmarked_text}
return Aw, data, EXCITATION[Z-1]
class Molecule:
def __init__(self, formula, density):
self.formula = formula
self.density = density
#PREPPING FOR A LONG INIT
self.ATOMS = []
#CONSTRUCTING ATOMS AND RELEVENT QUANTITIES
for Zx, x in formula.items():
# atom = Atom(Zx, x, formula.relax[Zx])
atom = Atom(Zx, x)
self.ATOMS.append(atom)
def __repr__(self):
repres = ""
for atom in self:
repres += f" {atom.x}x{atom} \n"
return repres
def __iter__(self):
yield from self.ATOMS
def __getitem__(self, i):
return self.ATOMS[i]
class Atom:
SHELLS = []
def __init__(self, Z, x):
self.x = x
self.Z = Z
self.Aw, data, self.I = getData(Z)
# GETTING SHELL DATA
DATA = data[(0, 91, 0, 0, 0, 912)]
DESIGNATORS = [int(design) for design in DATA[:, 0]]
EL_NUMBERS = DATA[:, 1]
BINDING_ENERGIES = data[(0, 91, 0, 0, 0, 913)][:, 1]
# CREATING SHELLS
SHELLS = []
for i in range( len(DESIGNATORS) ):
SHELLS.append(
Shell(index = i,
i = DESIGNATORS[i] ,
fk = EL_NUMBERS[i] ,
Uk = BINDING_ENERGIES[i]*1e6 ,
)
)
self.SHELLS = SHELLS
def __iter__(self):
yield from self.SHELLS
def __repr__(self):
repres = f"<Atom Z={self.Z}, Aw = {self.Aw} amu, I = {self.I} eV> \n"
for shell in self:
repres += f" {shell} \n"
return repres
def __len__(self):
return len(self.SHELLS)
def __getitem__(self, aslice):
return self.SHELLS[aslice]
def pop(self, i):
return self.SHELLS.pop(i)
class Shell:
Wk = None
def __init__(self, index = 0, i = 0, fk = 0, Uk = 0):
self.INDEX = [index]
self.BE = [Uk]
self.designators = [i]
self.i = i
self.fk = fk
self.Uk = Uk
def __repr__(self):
return f"<Shell #{self.i}, fk = {self.fk}, Uk = {self.Uk} eV, Wk = {self.Wk} eV>"
def __bool__(self):
return not self.empty
def select_shells(molecule):
"""
Remove outer shells of all atoms in molecule. Skip hydrogen and helium.
"""
shells = []
for atom in molecule:
if len(atom) < 2:
continue
#select shell that will contribute to plasmon oscillations
shells.append(atom.pop(-1))
return shells
Na = 6.0221409e+23
H_BAR = 6.5821e-16 #eV s
MASS_ELECTRON = 9.1094e-28 #g
ELECTRON_CHARGE = 4.8032e-10 #statC
from numpy import pi
def makeCB(molecule, selected_shells):
Am = sum(atom.Aw*atom.x for atom in molecule) #total atomic mass
N = molecule.density * Na / Am #number density
Ztot = sum(atom.x*atom.Z for atom in molecule) #total number of electrons
omega2 = 4*pi*N*Ztot*H_BAR**2 * ELECTRON_CHARGE**2 / MASS_ELECTRON
omega = sqrt(omega2)
#CREATOMG CONDUCTION BAND
#cb = select_shells(molecule)
fcb = sum(shell.fk for shell in selected_shells)
Wcb = (fcb/Ztot)**.5 * omega
cb = Shell(i = 0, Uk = 0, fk = fcb)
cb.Wk = Wcb
#INSERTING USEFUL QUANTITIES
molecule.omega = omega
molecule.N = N
molecule.Ztot = Ztot
return cb
def calculate_ressonance(molecule, cb):
from scipy.optimize import fsolve
from numpy import exp, log
ZlnI = sum(atom.x*atom.Z*log(atom.I) for atom in molecule)
I = exp(ZlnI/molecule.Ztot)
molecule.ZlnI = ZlnI
molecule.I = I
A = cb.fk*log(cb.Wk) - ZlnI if cb else -ZlnI
B = 2*molecule.omega**2 / 3 / molecule.Ztot
def eqn(a):
_sum = 0
for atom in molecule:
from_atom = 0
for shell in atom:
val = (a*shell.Uk)**2 + B*shell.fk
from_atom += shell.fk * log(val)
_sum += atom.x*from_atom
#val = (a*shell.Uk)**2 + B*shell.fk*atom.x
#_sum += atom.x*shell.fk*log( (a*shell.Uk)**2 + B*shell.fk*atom.x )
#_sum += atom.x*( shell.fk*log( (a*shell.Uk)**2 + B*shell.fk*atom.x )
return A +.5*_sum
res = fsolve(eqn, exp(1/2))
a = res[0]
molecule.a = a
print(f" > a = {a}")
omega = molecule.omega
Ztot = molecule.Ztot
for atom in molecule:
for shell in atom:
shell.Wk = ((shell.Uk*a)**2 + 2/3 * shell.fk * omega**2 / Ztot)**.5
_ZlnI = cb.fk*log(cb.Wk) if cb else 0
for atom in molecule:
for shell in atom:
_ZlnI += atom.x*shell.fk*log(shell.Wk)
err = (ZlnI - _ZlnI)/ZlnI *100
print(" err = ", err, "%")
return res[0]
from numpy import array
def makeF(molecule, cb):
SHELLS = []
for atom in molecule:
for shell in atom:
SHELLS.append(shell)
SHELLS.append(cb)
fk = array([shell.fk for shell in SHELLS])
Wk = array([shell.Wk for shell in SHELLS])
omega = molecule.omega
Ztot = molecule.Ztot
def F(L):
return omega**2 / Ztot * sum(fk/(Wk**2 + L**2))
return F, fk, Wk
def makeDelta(molecule, fk, Wk):
omega = molecule.omega
Ztot = molecule.Ztot
def delta(L, beta2):
return sum(fk*log(1 + L**2/Wk**2))/Ztot - L**2/omega**2 * (1 - beta2)
return delta
def getLfrom(F, beta2):
def eqn(L):
return F(L) - 1 + beta2
return fsolve(eqn, 5)[0]
def calculate_ZlnI(molecule, cb):
ZlnI = cb.fk*log(cb.Wk) if cb else 0
for atom in molecule:
for shell in atom:
ZlnI += shell.fk * log(shell.Wk)
return ZlnI
def newShellFrom(group):
if not group:
return []
if len(group) == 1:
return group
ZlnI = 0
Ztot = 0
Uk = 0
for shell in group:
#print(shell)
Uk += shell.Uk
Ztot += shell.fk
ZlnI += shell.fk*log(shell.Wk)
Uk = Uk/len(group) #Uk is not needed, just keeping it for book keeping
Wk = exp(ZlnI/Ztot)
shell = Shell(i = group[0].i, fk = Ztot, Uk = Uk)
shell.designators = []
shell.INDEX = []
shell.BE = []
for _shell in group:
shell.designators += _shell.designators
shell.INDEX += _shell.INDEX
shell.BE += _shell.BE
shell.Wk = Wk
return [shell]
def newMolecule(molecule):
for atom in molecule:
if atom.Z == 1: continue
if atom.Z <= 27: #only K shell has binidng energy ABOVE 1keV, group everything else!
newSHELLS = [atom[0]] #K shell
newSHELLS += newShellFrom(atom[1:])
atom.SHELLS = newSHELLS
continue
if atom.Z <= 51: #condition to classify L shells as inner
newSHELLS = [atom[0]]
innerL, outerL = [], []
for L in atom[1:4]:
if L.Uk < 1e3: outerL.append(L)
else: innerL.append(L)
if len(innerL) == 1:
newSHELLS += innerL[0]
newSHELLS += newShellFrom(outerL + atom[4:])
else:
newSHELLS += newShellFrom(innerL)
newSHELLS += newShellFrom(outerL + atom[4:])
atom.SHELLS = newSHELLS
continue
if atom.Z <= 84: #condition to classify M shells as inner
newSHELLS = [atom[0]] # keeping the K shell
newSHELLS += newShellFrom(atom[1:4]) # if we are classifying M shells as inner, L shells are automatically inner
innerM, outerM = [], []
for M in atom[4:9]:
if M.Uk < 1e3: outerM.append(M)
else: innerM.append(M)
if len(innerM) == 1:
newSHELLS += innerM[0]
newSHELLS += newShellFrom(outerM + atom[9:])
else:
newSHELLS += newShellFrom(innerM)
newSHELLS += newShellFrom(outerM + atom[9:])
atom.SHELLS = newSHELLS
continue
#everyone else has N shells with binding energy above 1keV
newSHELLS = [atom[0]] # K shell
newSHELLS += newShellFrom(atom[1:4]) # L shell
newSHELLS += newShellFrom(atom[4:9]) # M shell
inner, outer = [], []
for shell in atom[9:]:
if shell.Uk < 1e3: outer.append(shell)
else: inner.append(shell)
newSHELLS += newShellFrom(inner)
newSHELLS += newShellFrom(outer)
atom.SHELLS = newSHELLS
continue
#newSHELLS = [atom[0]]
#newSHELLS += newShellFrom(atom[1:4])
#newSHELLS += newShellFrom(atom[4:9])
#newSHELLS += newShellFrom(atom[9:])
#atom.SHELLS = newSHELLS
def pyGOS(formula, density):
from builtins import sum
molecule = Molecule(formula, density)
shells = select_shells(molecule)
cb = makeCB(molecule, shells)
if shells == []:
cb.empty = True
else:
cb.empty = False
calculate_ressonance(molecule, cb)
F, fk, Wk = makeF(molecule, cb)
delta = makeDelta(molecule, fk, Wk)
delta1 = lambda beta2: delta(getLfrom(F, beta2), beta2)
#we have F and delta! getLfrom
old = calculate_ZlnI(molecule, cb)
newMolecule(molecule)
new = calculate_ZlnI(molecule, cb)
formula.log.add_paragraph(f"Ressonance energy of cb shell: {cb.Wk}")
for atom in molecule:
formula.log.add_paragraph(f"-----> Z = {atom.Z}")
for shell in atom:
formula.log.add_paragraph(f"------------> Wk = {shell.Wk}")
#print(old, new)
#print( (old - new)/old * 100, "%" )
return molecule, cb, delta1
|
{"hexsha": "1c80b75c1bb973ddb9be56679e7ab99a6a454c58", "size": 13219, "ext": "py", "lang": "Python", "max_stars_repo_path": "MontyCarlo/materials/electron/makeGOS.py", "max_stars_repo_name": "ruicamposcolabpt/MontyCarlo", "max_stars_repo_head_hexsha": "8f9e7af78f010f44fda81a4ab064e32421a205f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MontyCarlo/materials/electron/makeGOS.py", "max_issues_repo_name": "ruicamposcolabpt/MontyCarlo", "max_issues_repo_head_hexsha": "8f9e7af78f010f44fda81a4ab064e32421a205f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MontyCarlo/materials/electron/makeGOS.py", "max_forks_repo_name": "ruicamposcolabpt/MontyCarlo", "max_forks_repo_head_hexsha": "8f9e7af78f010f44fda81a4ab064e32421a205f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.818359375, "max_line_length": 124, "alphanum_fraction": 0.506165368, "include": true, "reason": "from numpy,from scipy", "num_tokens": 4013}
|
# Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
from typing import Any, Dict, Optional, Sequence, Tuple
import numpy as np
import pytest
import tiledb
from tiledb.cf import AttrMetadata, DimMetadata, Group, GroupSchema, from_netcdf
from tiledb.cf.netcdf_engine import NetCDF4ConverterEngine
netCDF4 = pytest.importorskip("netCDF4")
class ConvertNetCDFBase:
"""Base class for NetCDF converter tests of NetCDF files with a single group.
Parameters:
name: Short descriptive name for naming NetCDF file.
dimension_args: Arguments to use as input for creating NetCDF dimensions.
variable_kwargs: Keyword argurments to use as input for creating NetCDF
variables.
variable_data: Map for NetCDF variable name to variable data.
variable_metadata: Map from NetCDF variable name to a dictionary of metadata.
group_metadata: A dictionary of metadata for the NetCDF group.
attr_to_var_map: Map from TileDB attribute name to NetCDF variable name when
converting with ``coords_to_dims=False``.
"""
name = "base"
dimension_args: Sequence[Tuple[str, Optional[int]]] = []
variable_kwargs: Sequence[Dict[str, Any]] = []
variable_data: Dict[str, np.ndarray] = {}
variable_metadata: Dict[str, Dict[str, Any]] = {}
group_metadata: Dict[str, Any] = {}
attr_to_var_map: Dict[str, str] = {}
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns a NetCDF file created from the group variables.
Use group variables to create a NetCDF file with a single root group.
"""
filepath = tmpdir_factory.mktemp("input_file").join(f"{self.name}.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
if self.group_metadata:
dataset.setncatts(self.group_metadata)
for dim_args in self.dimension_args:
dataset.createDimension(*dim_args)
for var_kwargs in self.variable_kwargs:
variable = dataset.createVariable(**var_kwargs)
if variable.name in self.variable_data:
variable[...] = self.variable_data[variable.name]
if variable.name in self.variable_metadata:
variable.setncatts(self.variable_metadata[variable.name])
return filepath
def check_attrs(self, group_uri):
"""Checks the values in each attribute match the values from the NetCDF
variable it was copied from.
"""
with Group(group_uri) as group:
for attr_name, var_name in self.attr_to_var_map.items():
with group.open_array(attr=attr_name) as array:
nonempty_domain = array.nonempty_domain()
result = array.multi_index[nonempty_domain]
np.testing.assert_equal(
result[attr_name], self.variable_data[var_name]
), f"unexpected values for attribute '{attr_name}'"
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_from_netcdf(self, netcdf_file, tmpdir, collect_attrs):
"""Integration test for `from_netcdf_file` function call."""
uri = str(tmpdir.mkdir("output").join(self.name))
from_netcdf(netcdf_file, uri, coords_to_dims=False, collect_attrs=collect_attrs)
self.check_attrs(uri)
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_converter_from_netcdf(self, netcdf_file, tmpdir, collect_attrs):
"""Integration test for converting using `from_file` and `convert_to_group`."""
converter = NetCDF4ConverterEngine.from_file(
netcdf_file, coords_to_dims=False, collect_attrs=collect_attrs
)
uri = str(tmpdir.mkdir("output").join(self.name))
assert isinstance(repr(converter), str)
converter.convert_to_group(uri)
self.check_attrs(uri)
def test_converter_html_repr(self, netcdf_file):
"""Test generated HTML is valid."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
try:
tidylib = pytest.importorskip("tidylib")
html_summary = converter._repr_html_()
_, errors = tidylib.tidy_fragment(html_summary)
except OSError:
pytest.skip("unable to import libtidy backend")
assert not bool(errors), str(errors)
class TestConverterSimpleNetCDF(ConvertNetCDFBase):
"""NetCDF conversion test cases for a simple NetCDF file.
Dimensions:
row (8)
Variables:
float x1(row)
"""
name = "simple1"
dimension_args = (("row", 8),)
variable_kwargs = (
{"varname": "x1", "datatype": np.float64, "dimensions": ("row",)},
)
variable_data = {"x1": np.linspace(1.0, 4.0, 8)}
attr_to_var_map = {"x1": "x1"}
def test_convert_dense_with_non_netcdf_dims(self, netcdf_file, tmpdir):
"""Test converting the NetCDF variable 'x1' into a TileDB array with
an extra non-NetCDF dimension."""
uri = str(tmpdir.mkdir("output").join("dense_assigned_dim_values"))
converter = NetCDF4ConverterEngine()
dim_dtype = np.dtype("uint32")
converter.add_shared_dim("col", domain=(0, 4), dtype=dim_dtype)
with netCDF4.Dataset(netcdf_file) as netcdf_group:
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["row"], dtype=dim_dtype
)
converter.add_array_converter("array", ("row", "col"))
converter.add_var_to_attr_converter(netcdf_group.variables["x1"], "array")
converter.convert_to_array(
uri, input_netcdf_group=netcdf_group, assigned_dim_values={"col": 2}
)
with tiledb.open(uri) as array:
nonempty_domain = array.nonempty_domain()
data = array[:, 2]
assert nonempty_domain == ((0, 7), (2, 2))
tiledb_array = data["x1"]
original = self.variable_data["x1"]
np.testing.assert_equal(tiledb_array, original)
def test_append_to_group(self, netcdf_file, tmpdir):
"""Tests adding the arrays from the converter to an existing group."""
uri = str(tmpdir.mkdir("output").join("append_example"))
tiledb.group_create(uri)
schema = tiledb.ArraySchema(
domain=tiledb.Domain(tiledb.Dim("d0", dtype=np.int32, domain=(0, 4))),
attrs=[tiledb.Attr("a2", dtype=np.float64)],
)
tiledb.Array.create(f"{uri}/original_array", schema)
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.convert_to_group(uri, append=True)
group_schema = GroupSchema.load(uri)
assert set(group_schema.keys()) == {"original_array", "array0"}
def test_append_to_group_bad_name_error(self, netcdf_file, tmpdir):
"""Tests raising error when appending to group with an array name that is in
the NetCDF4ConverterEngine."""
uri = str(tmpdir.mkdir("output").join("append_example"))
tiledb.group_create(uri)
schema = tiledb.ArraySchema(
domain=tiledb.Domain(tiledb.Dim("d0", dtype=np.int32, domain=(0, 4))),
attrs=[tiledb.Attr("a2", dtype=np.float64)],
)
tiledb.Array.create(f"{uri}/array0", schema)
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
with pytest.raises(ValueError):
converter.convert_to_group(uri, append=True)
def test_convert_to_sparse_array(self, netcdf_file, tmpdir):
uri = str(tmpdir.mkdir("output").join("sparse_example"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
for array_creator in converter.array_creators():
array_creator.sparse = True
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="x1") as array:
data = array[:]
index = np.argsort(data["row"])
x1 = data["x1"][index]
expected = np.linspace(1.0, 4.0, 8)
np.testing.assert_equal(x1, expected)
def test_no_collect_tiles_by_var(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=False,
collect_attrs=False,
tiles_by_var={"x1": (2,)},
)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["x1"].domain)
assert tiles == (2,)
def test_no_collect_tiles_by_dims(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=False,
collect_attrs=False,
tiles_by_dims={("row",): (2,)},
)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["x1"].domain)
assert tiles == (2,)
@pytest.mark.parametrize("collect_attrs", (True, False))
def test_coords_filter(self, netcdf_file, collect_attrs):
"""Tests all arrays have coords_filters set to desired value."""
filters = tiledb.FilterList([tiledb.ZstdFilter(7)])
converter = NetCDF4ConverterEngine.from_file(
netcdf_file, collect_attrs=collect_attrs, coords_filters=filters
)
for array_creator in converter.array_creators():
assert array_creator.coords_filters == filters
@pytest.mark.parametrize("collect_attrs", (True, False))
def test_offsets_filter(self, netcdf_file, collect_attrs):
"""Tests all arrays have coords_filters set to desired value."""
filters = tiledb.FilterList([tiledb.ZstdFilter(7)])
converter = NetCDF4ConverterEngine.from_file(
netcdf_file, collect_attrs=collect_attrs, offsets_filters=filters
)
for array_creator in converter.array_creators():
assert array_creator.offsets_filters == filters
@pytest.mark.parametrize("collect_attrs", (True, False))
def test_attrs_filter(self, netcdf_file, collect_attrs):
"""Tests all arrays have coords_filters set to desired value."""
filters = tiledb.FilterList([tiledb.ZstdFilter(7)])
converter = NetCDF4ConverterEngine.from_file(
netcdf_file, collect_attrs=collect_attrs, attrs_filters=filters
)
for array_creator in converter.array_creators():
for attr_creator in array_creator:
assert attr_creator.filters == filters
def test_rename_array(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
converter.get_array_creator("array0").name = "A1"
names = {array_creator.name for array_creator in converter.array_creators()}
assert names == set(["A1"])
def test_rename_attr(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
converter.get_array_creator_by_attr("x1").attr_creator("x1").name = "y1"
attr_names = {
attr_creator.name for attr_creator in next(converter.array_creators())
}
assert attr_names == set(["y1"])
def test_rename_dim(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
converter.get_shared_dim("row").name = "col"
dim_names = {shared_dim.name for shared_dim in converter.shared_dims()}
assert dim_names == set(["col"])
def test_non_netcdf_attr(self, netcdf_file, tmpdir):
"""Tests converting a NetCDF file with an external attribute."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
converter.add_attr_creator("a1", "array0", np.float64)
a1_data = np.linspace(-1.0, 1.0, 8, dtype=np.float64)
uri = str(tmpdir.mkdir("non-netcdf-attr-test"))
converter.convert_to_group(uri, assigned_attr_values={"a1": a1_data})
with Group(uri) as group:
with group.open_array(attr="a1") as array:
result = array[:]
np.testing.assert_equal(a1_data, result)
def test_non_netcdf_attr_missing_data_error(self, netcdf_file, tmpdir):
"""Tests error converting a NetCDF file with an external attribute when data is
missing."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
converter.add_attr_creator("a1", "array0", np.float64)
uri = str(tmpdir.mkdir("non-netcdf-attr-test"))
with pytest.raises(KeyError):
converter.convert_to_group(uri)
def test_non_netcdf_attr_bad_shape_error(self, netcdf_file, tmpdir):
"""Tests error converting a NetCDF file with an external attribue when the size
of the provided data is mismatched."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
converter.add_attr_creator("a1", "array0", np.float64)
a1_data = np.linspace(-1.0, 1.0, 4, dtype=np.float64)
uri = str(tmpdir.mkdir("non-netcdf-attr-test"))
with pytest.raises(tiledb.libtiledb.TileDBError):
converter.convert_to_group(uri, assigned_attr_values={"a1": a1_data})
def test_inject_dim_creator(self, netcdf_file):
"""Tests injecting a dimension into a NetCDF4ConverterArray."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.add_shared_dim("dim0", domain=(0, 4), dtype=np.uint32)
array_converter = converter.get_array_creator("array0")
array_converter.domain_creator.inject_dim_creator("dim0", 0)
dim_names = tuple(
dim_creator.name for dim_creator in array_converter.domain_creator
)
assert dim_names == ("dim0", "row")
def test_inject_dim_mismatch_attr_dims_error(self, netcdf_file):
"""Tests injecting a dimension into a NetCDF4ConverterArray."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
with netCDF4.Dataset("inmemory", mode="w", diskless=True) as dataset:
dim = dataset.createDimension("dim0", 10)
converter.add_dim_to_dim_converter(dim)
array_converter = converter.get_array_creator("array0")
with pytest.raises(ValueError):
array_converter.domain_creator.inject_dim_creator("dim0", 0)
def test_remove_dim_needed_by_attr_error(self, netcdf_file):
"""Tests raising an error when attempting to remove a dimension in a
NetCDF4ConverterArray that is needed by an AttrCreator."""
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
array_converter = converter.get_array_creator("array0")
with pytest.raises(ValueError):
array_converter.domain_creator.remove_dim_creator("row")
def test_bad_array_name_error(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
with pytest.raises(ValueError):
converter.add_array_converter("array0", tuple())
class TestConvertNetCDFSimpleCoord1(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with a coordinate variable.
Dimensions:
x (4)
Variables:
real x (x)
real y (x)
"""
name = "simple_coord_1"
group_metadata = {"name": name}
dimension_args = (("x", 4),)
variable_kwargs = (
{"varname": "x", "datatype": np.float64, "dimensions": ("x",)},
{"varname": "y", "datatype": np.float64, "dimensions": ("x",)},
)
variable_data = {
"x": np.array([2.0, 5.0, -1.0, 4.0]),
"y": np.array([4.0, 25.0, 1.0, 16.0]),
}
variable_metadata = {
"x": {"description": "x array"},
"y": {"description": "y array"},
}
attr_to_var_map = {"x.data": "x", "y": "y"}
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_convert_coordinate(self, netcdf_file, tmpdir, collect_attrs):
uri = str(tmpdir.mkdir("output").join("coordinate_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=collect_attrs,
)
shared_x = converter.get_shared_dim("x")
shared_x.domain = (None, None)
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="y") as array:
schema = array.schema
assert schema.sparse
data = array[:]
attr_meta = AttrMetadata(array.meta, "y")
assert (
attr_meta["description"] == "y array"
), "attribute metadata not correctly copied."
dim_meta = DimMetadata(array.meta, "x")
assert (
dim_meta["description"] == "x array"
), "dim metadata not correctly copied."
index = np.argsort(data["x"])
x = data["x"][index]
y = data["y"][index]
np.testing.assert_equal(x, np.array([-1.0, 2.0, 4.0, 5.0]))
np.testing.assert_equal(y, np.array([1.0, 4.0, 16.0, 25.0]))
def test_convert_to_array(self, netcdf_file, tmpdir):
uri = str(tmpdir.mkdir("output").join("array_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=True,
)
shared_x = converter.get_shared_dim("x")
shared_x.domain = (None, None)
converter.convert_to_array(uri)
with tiledb.open(uri, attr="y") as array:
schema = array.schema
assert schema.sparse
data = array[:]
metadata_name = array.meta["name"]
assert metadata_name == self.name
index = np.argsort(data["x"])
x = data["x"][index]
y = data["y"][index]
np.testing.assert_equal(x, np.array([-1.0, 2.0, 4.0, 5.0]))
np.testing.assert_equal(y, np.array([1.0, 4.0, 16.0, 25.0]))
@pytest.mark.parametrize(
"collect_attrs, array_name", [(True, "array0"), (False, "y")]
)
def test_coordinate_tiles_by_var(self, netcdf_file, collect_attrs, array_name):
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=collect_attrs,
tiles_by_var={"y": (100.0,)},
)
domain_creator = converter.get_array_creator(array_name).domain_creator
tile = domain_creator.dim_creator(0).tile
assert tile == 100.0
@pytest.mark.parametrize(
"collect_attrs, array_name", [(True, "array0"), (False, "y")]
)
def test_coordinate_tiles_by_dims(self, netcdf_file, collect_attrs, array_name):
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=collect_attrs,
tiles_by_dims={("x",): (100.0,)},
)
domain_creator = converter.get_array_creator(array_name).domain_creator
tile = domain_creator.dim_creator(0).tile
assert tile == 100.0
def test_convert_coordinate_domain_not_set_error(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=True)
with pytest.raises(ValueError):
converter.to_schema()
class TestConvertNetCDFMultiCoords(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with a coordinate variable.
Dimensions:
x (4)
Variables:
real x (x)
real y (y)
"""
name = "multicoords"
dimension_args = (("x", 2), ("y", 2))
variable_kwargs = (
{"varname": "x", "datatype": np.float64, "dimensions": ("x",)},
{"varname": "y", "datatype": np.float64, "dimensions": ("y",)},
{"varname": "z", "datatype": np.float64, "dimensions": ("x", "y")},
)
variable_data = {
"x": np.array([2.0, 5.0]),
"y": np.array([-1.0, 4.0]),
"z": np.array([[4.0, 25.0], [1.0, 16.0]]),
}
attr_to_var_map = {"x.data": "x", "y.data": "y", "z": "z"}
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_convert_coordinate(self, netcdf_file, tmpdir, collect_attrs):
uri = str(tmpdir.mkdir("output").join("coordinate_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=collect_attrs,
)
shared_x = converter.get_shared_dim("x")
shared_x.domain = (None, None)
shared_y = converter.get_shared_dim("y")
shared_y.domain = (None, None)
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="z") as array:
schema = array.schema
assert schema.sparse
data = array[:]
result = tuple(zip(data["x"], data["y"], data["z"]))
expected = (
(2.0, -1.0, 4.0),
(2.0, 4.0, 25.0),
(5.0, -1.0, 1.0),
(5.0, 4.0, 16.0),
)
assert result == expected
class TestConvertNetCDFCoordWithTiles(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with a coordinate variable.
Dimensions:
index (4)
Variables:
int index (index)
real y (index)
"""
name = "coord_with_chunks"
dimension_args = (("index", 4),)
variable_kwargs = (
{
"varname": "index",
"datatype": np.int32,
"dimensions": ("index",),
},
{
"varname": "y",
"datatype": np.float64,
"dimensions": ("index",),
"chunksizes": (4,),
},
)
variable_data = {
"index": np.array([1, 2, 3, 4]),
"y": np.array([4.0, 25.0, 1.0, 16.0]),
}
attr_to_var_map = {"index.data": "index", "y": "y"}
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_convert_coordinate(self, netcdf_file, tmpdir, collect_attrs):
uri = str(tmpdir.mkdir("output").join("coordinate_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=collect_attrs,
)
index_dim = converter.get_shared_dim("index")
index_dim.domain = (1, 4)
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="y") as array:
schema = array.schema
assert schema.sparse
data = array[:]
index_order = np.argsort(data["index"])
index = data["index"][index_order]
y = data["y"][index_order]
np.testing.assert_equal(index, self.variable_data["index"])
np.testing.assert_equal(y, self.variable_data["y"])
class TestConvertNetCDFUnlimitedDim(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with an unlimited dimension.
Dimensions:
row (None)
col (4)
Variables:
uint16 x (row)
uint16 y (col)
uint16 data (row, col)
"""
name = "simple_unlim_dim"
dimension_args = (("row", None), ("col", 4))
variable_kwargs = [
{
"varname": "data",
"datatype": np.dtype("uint16"),
"dimensions": ("row", "col"),
},
{"varname": "x", "datatype": np.dtype("uint16"), "dimensions": ("row",)},
{"varname": "y", "datatype": np.dtype("uint16"), "dimensions": ("col",)},
]
variable_data = {
"data": np.array(
([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16])
),
"x": np.array([1, 2, 3, 4]),
"y": np.array([5, 6, 7, 8]),
}
attr_to_var_map = {"data": "data", "x": "x", "y": "y"}
def test_convert_dense_with_non_netcdf_dims(self, netcdf_file, tmpdir):
"""Test converting the NetCDF variable 'x1' into a TileDB array with
an extra non-NetCDF dimension."""
uri = str(tmpdir.mkdir("output").join("dense_assigned_dim_values"))
converter = NetCDF4ConverterEngine()
dim_dtype = np.dtype("uint32")
converter.add_shared_dim("extra", domain=(0, 4), dtype=dim_dtype)
with netCDF4.Dataset(netcdf_file) as netcdf_group:
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["row"], dtype=dim_dtype
)
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["col"], dtype=dim_dtype
)
converter.add_array_converter("array", ("row", "extra", "col"))
converter.add_var_to_attr_converter(netcdf_group.variables["data"], "array")
converter.convert_to_array(
uri, input_netcdf_group=netcdf_group, assigned_dim_values={"extra": 2}
)
with tiledb.open(uri) as array:
nonempty_domain = array.nonempty_domain()
data = array[:, 2, :]
assert nonempty_domain == ((0, 3), (2, 2), (0, 3))
tiledb_array = data["data"]
original = self.variable_data["data"]
np.testing.assert_equal(tiledb_array, original)
def test_convert_sparse_with_non_netcdf_dims(self, netcdf_file, tmpdir):
"""Test converting the NetCDF variable 'x1' into a TileDB array with
an extra non-NetCDF dimension."""
uri = str(tmpdir.mkdir("output").join("dense_assigned_dim_values"))
converter = NetCDF4ConverterEngine()
dim_dtype = np.dtype("uint32")
converter.add_shared_dim("extra", domain=(0, 4), dtype=dim_dtype)
with netCDF4.Dataset(netcdf_file) as netcdf_group:
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["row"], dtype=dim_dtype
)
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["col"], dtype=dim_dtype
)
converter.add_array_converter("array", ("row", "extra", "col"), sparse=True)
converter.add_var_to_attr_converter(netcdf_group.variables["data"], "array")
converter.convert_to_array(
uri, input_netcdf_group=netcdf_group, assigned_dim_values={"extra": 2}
)
with tiledb.open(uri) as array:
nonempty_domain = array.nonempty_domain()
data = array[:, 2, :]
assert nonempty_domain == ((0, 3), (2, 2), (0, 3))
tiledb_array = data["data"]
original = self.variable_data["data"].reshape(-1)
np.testing.assert_equal(tiledb_array, original)
def test_copy_missing_dim(self, netcdf_file, tmpdir):
"""Test converting the NetCDF variable 'x1' into a TileDB array with
an extra non-NetCDF dimension."""
uri = str(tmpdir.mkdir("output").join("dense_assigned_dim_values"))
converter = NetCDF4ConverterEngine()
dim_dtype = np.dtype("uint32")
converter.add_shared_dim("extra", domain=(0, 4), dtype=dim_dtype)
with netCDF4.Dataset(netcdf_file) as netcdf_group:
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["row"], dtype=dim_dtype
)
converter.add_dim_to_dim_converter(
netcdf_group.dimensions["col"], dtype=dim_dtype
)
converter.add_array_converter("array", ("row", "extra", "col"))
converter.add_var_to_attr_converter(netcdf_group.variables["data"], "array")
with pytest.raises(KeyError):
converter.convert_to_array(uri, input_netcdf_group=netcdf_group)
class TestConvertUnpackVariables(ConvertNetCDFBase):
"""NetCDF conversion test cases for a packed NetCDF variable.
Dimensions:
x (4)
Variables:
real x (x)
real y (x)
"""
name = "simple_coord_1"
group_metadata = {"name": name}
dimension_args = (("x", 4),)
variable_kwargs = (
{"varname": "x", "datatype": np.int32, "dimensions": ("x",)},
{"varname": "y", "datatype": np.float64, "dimensions": ("x",)},
)
variable_data = {
"x": np.array([1, 2, 3, 4]),
"y": np.array([0.0, 0.5, 1.0, 1.5]),
}
variable_metadata = {
"x": {"scale_factor": np.float64(0.5), "add_offset": np.float64(1.0)},
}
attr_to_var_map = {"x.data": "x", "y": "y"}
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_unpack_to_attr(self, netcdf_file, tmpdir, collect_attrs):
"""Tests NetCDF variable is correctly unpacked when mapping to a TileDB
attribute."""
uri = str(tmpdir.mkdir("output").join("coordinate_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
collect_attrs=collect_attrs,
unpack_vars=True,
)
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="x.data") as array:
x = array[:]
meta = AttrMetadata(array.meta, "x.data")
assert "scale_factor" not in meta
assert "add_offset" not in meta
assert x.dtype == np.float64
np.testing.assert_equal(x, np.array([1.5, 2.0, 2.5, 3.0]))
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_unpack_to_dim(self, netcdf_file, tmpdir, collect_attrs):
"""Tests NetCDF variable is correctly unpacked when mapping to a TileDB
dimension."""
uri = str(tmpdir.mkdir("output").join("coordinate_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=True,
collect_attrs=collect_attrs,
unpack_vars=True,
)
shared_x = converter.get_shared_dim("x")
shared_x.domain = (-10.0, 10.0)
shared_x.tile = 2.0
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="y") as array:
data = array[:]
meta = DimMetadata(array.meta, "x")
assert "scale_factor" not in meta
assert "add_offset" not in meta
x = data["x"]
y = data["y"]
assert x.dtype == np.float64
np.testing.assert_equal(x, np.array([1.5, 2.0, 2.5, 3.0]))
np.testing.assert_equal(y, np.array([0.0, 0.5, 1.0, 1.5]))
class TestConvertNetCDFMultipleScalarVariables(ConvertNetCDFBase):
"""NetCDF conversion test cases for NetCDF with multiple scalar variables.
Variables:
int32 x () = [1]
int32 y () = [5]
"""
name = "scalar_variables"
variable_kwargs = [
{"varname": "x", "datatype": np.dtype("int32")},
{"varname": "y", "datatype": np.dtype("int32")},
]
variable_data = {
"x": np.array([1]),
"y": np.array([5]),
}
attr_to_var_map = {"x": "x", "y": "y"}
def test_sparse_array(self, netcdf_file, tmpdir):
uri = str(tmpdir.mkdir("output").join("sparse_scalar_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file, coords_to_dims=False, collect_attrs=False
)
for array_creator in converter.array_creators():
array_creator.sparse = True
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(array="scalars") as array:
data = array[0]
np.testing.assert_equal(data["x"], self.variable_data["x"])
np.testing.assert_equal(data["y"], self.variable_data["y"])
def test_scalar_assigned_dim_value(self, netcdf_file, tmpdir):
"""Tests setting the value for a scalar dimension when converting NetCDF
scalars to TileDB."""
uri = str(tmpdir.mkdir("output").join("scalar_assign_value_example"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
scalar_dim = converter.get_shared_dim("__scalars")
scalar_dim.name = "dim0"
scalar_dim.domain = (0, 10)
converter.convert_to_group(uri, assigned_dim_values={"dim0": 1})
with tiledb.cf.Group(uri) as group:
with group.open_array(array="array0") as array:
data = array[1:2]
np.testing.assert_equal(data["x"], self.variable_data["x"])
np.testing.assert_equal(data["y"], self.variable_data["y"])
def test_inject_dim(self, netcdf_file, tmpdir):
"""Tests converting NetCDF scalar dimensions to TileDB with an extra dimension
added."""
uri = str(tmpdir.mkdir("output").join("scalar_extra_dim_example"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.add_shared_dim(dim_name="t", domain=(1, 8), dtype=np.dtype("uint64"))
array_creator = converter.get_array_creator("array0")
array_creator.domain_creator.inject_dim_creator("t", 1)
converter.convert_to_group(uri, assigned_dim_values={"t": 1})
with tiledb.cf.Group(uri) as group:
with group.open_array(array="array0") as array:
data = array[:, 1]
np.testing.assert_equal(data["x"], self.variable_data["x"])
np.testing.assert_equal(data["y"], self.variable_data["y"])
class TestConvertNetCDFMatchingChunks(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with two variables over the
same diemnsions with the same chunksizes.
Dimensions:
row (8)
col (8)
Variables:
int32 x1(row, col) with chunksizes (4, 4)
int32 x2(row, col) with chunksizes (4, 4)
"""
name = "matching_chunks"
dimension_args = [("row", 8), ("col", 8)]
variable_kwargs = [
{
"varname": "x1",
"datatype": np.int32,
"dimensions": ("row", "col"),
"chunksizes": (4, 4),
},
{
"varname": "x2",
"datatype": np.int32,
"dimensions": ("row", "col"),
"chunksizes": (4, 4),
},
]
variable_data = {
"x1": np.arange(64).reshape(8, 8),
"x2": np.arange(64, 128).reshape(8, 8),
}
attr_to_var_map = {"x1": "x1", "x2": "x2"}
def test_tile(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["array0"].domain)
assert tiles == (4, 4)
def test_collect_attrs_tiles_by_dims(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
tiles_by_dims={("row", "col"): (2, 4)},
coords_to_dims=False,
)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["array0"].domain)
assert tiles == (2, 4)
class TestConvertNetCDFMismatchingChunks(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with two variables over the same
dimensions and different chunksizes.
Dimensions:
row (8)
col (8)
Variables:
int32 x1 (row, col) with chunksizes (4, 4)
int32 x2 (row, col) with chunksizes (2, 2)
"""
name = "mismatching_chunks"
dimension_args = (("row", 8), ("col", 8))
variable_kwargs = [
{
"varname": "x1",
"datatype": np.int32,
"dimensions": ("row", "col"),
"chunksizes": (4, 4),
},
{
"varname": "x2",
"datatype": np.int32,
"dimensions": ("row", "col"),
"chunksizes": (2, 2),
},
]
variable_data = {
"x1": np.arange(64).reshape(8, 8),
"x2": np.arange(64, 128).reshape(8, 8),
}
attr_to_var_map = {"x1": "x1", "x2": "x2"}
def test_tile(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["array0"].domain)
assert tiles == (8, 8)
@pytest.mark.parametrize("collect_attrs", [True, False])
def test_convert_sparse_arrays(self, tmpdir, netcdf_file, collect_attrs):
uri = str(tmpdir.mkdir("output").join("sparse_multidim_example"))
converter = NetCDF4ConverterEngine.from_file(
netcdf_file,
coords_to_dims=False,
)
for array_creator in converter.array_creators():
array_creator.sparse = True
converter.convert_to_group(uri)
with tiledb.cf.Group(uri) as group:
with group.open_array(attr="x1") as array:
x1_result = array[:, :]["x1"]
x1_expected = np.arange(64, dtype=np.int32)
np.testing.assert_equal(x1_result, x1_expected)
with group.open_array(attr="x2") as array:
x2_result = array[:, :]["x2"]
x2_expected = np.arange(64, 128, dtype=np.int32)
np.testing.assert_equal(x2_result, x2_expected)
class TestConvertNetCDFSingleVariableChunk(ConvertNetCDFBase):
"""NetCDF conversion test cases for a NetCDF file with two variables: one with the
chunksize defined and the other with no chunksize specified.
Dimensions:
row (8)
col (8)
Variables:
int32 x1 (row, col) with chunksize=(4,4)
int32 x2 (row, col)
"""
name = "single_chunk_variable"
dimension_args = (("row", 8), ("col", 8))
variable_kwargs = (
{
"varname": "x1",
"datatype": np.int32,
"dimensions": ("row", "col"),
"chunksizes": (4, 4),
},
{
"varname": "x2",
"datatype": np.int32,
"dimensions": ("row", "col"),
},
)
variable_data = {
"x1": np.arange(64).reshape(8, 8),
"x2": np.arange(64, 128).reshape(8, 8),
}
attr_to_var_map = {"x1": "x1", "x2": "x2"}
def test_tile_from_single_variable_chunks(self, netcdf_file):
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=False)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["array0"].domain)
assert tiles == (4, 4)
class TestConverterNetCDFVariabelWithFill(ConvertNetCDFBase):
"""NetCDF conversion test cases for NetCDF variables with explicitly set fill
value.
Dimensions:
t (4)
Variables:
int64 x(t) with fill = -1
float64 scalar with fill = 0.0
"""
name = "test_fill_values"
dimension_args = (("t", 4),)
variable_kwargs = (
{"varname": "x", "datatype": np.int64, "dimensions": ("t",), "fill_value": -1},
{
"varname": "a",
"datatype": np.float64,
"dimensions": tuple(),
"fill_value": 0.0,
},
)
variable_data = {"x": np.array((-1, 2, -1, -1)), "a": np.array([0.0])}
attr_to_var_map = {"x": "x", "a": "a"}
@pytest.mark.parametrize("sparse", [True, False])
def test_change_fill(self, netcdf_file, tmpdir, sparse):
"""Test changing the fill value for a standard NetCDF variable."""
uri = str(tmpdir.mkdir("output").join(self.name))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
x_array = converter.get_array_creator_by_attr("x")
x_array.sparse = sparse
x_array.attr_creator("x").fill = 0
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="x") as array:
result = array.multi_index[:]["x"]
expected = np.array((0, 2, 0, 0))
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize("sparse", [True, False])
def test_change_fill_scalar(self, netcdf_file, tmpdir, sparse):
"""Test changing the fill value for a NetCDF scalar variable."""
uri = str(tmpdir.mkdir("output").join(self.name))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
# Test change fill for standard variable
scalar_array = converter.get_array_creator_by_attr("a")
scalar_array.sparse = sparse
scalar_array.attr_creator("a").fill = np.nan
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="a") as array:
result = array.multi_index[:]["a"]
expected = np.array([np.nan])
np.testing.assert_equal(result, expected)
def test_fill_value_not_in_metadata(self, netcdf_file, tmpdir):
"""Test that NetCDF attribute `_FillValue` is not copied to TileDB."""
uri = str(tmpdir.mkdir("output").join(self.name))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="x") as array:
assert "_FillValue" not in array.meta
def test_virtual_from_netcdf(group1_netcdf_file, tmpdir):
uri = str(tmpdir.mkdir("output").join("virtual1"))
from_netcdf(
group1_netcdf_file,
uri,
coords_to_dims=False,
use_virtual_groups=True,
collect_attrs=False,
)
x = np.linspace(-1.0, 1.0, 8)
y = np.linspace(-1.0, 1.0, 4)
# Test root
with tiledb.open(f"{uri}_x1", attr="x1") as array:
x1 = array[:]
np.testing.assert_equal(x1, x)
# # Test group 3
with tiledb.open(f"{uri}_group3_A1", attr="A1") as array:
A1 = array[:, :]
with tiledb.open(f"{uri}_group3_A2", attr="A2") as array:
A2 = array[:, :]
with tiledb.open(f"{uri}_group3_A3", attr="A3") as array:
A3 = array[:, :]
np.testing.assert_equal(A1, np.outer(y, y))
np.testing.assert_equal(A2, np.zeros((4, 4), dtype=np.float64))
np.testing.assert_equal(A3, np.identity(4, dtype=np.int32))
def test_virtual_from_file(simple2_netcdf_file, tmpdir):
uri = str(tmpdir.mkdir("output").join("virtual2"))
converter = NetCDF4ConverterEngine.from_file(
str(simple2_netcdf_file.filepath),
coords_to_dims=False,
)
converter.convert_to_virtual_group(uri)
assert isinstance(tiledb.ArraySchema.load(f"{uri}_array0"), tiledb.ArraySchema)
with tiledb.open(uri) as array:
assert array.meta["name"] == "simple2"
def test_virtual_from_group(simple2_netcdf_file, tmpdir):
uri = str(tmpdir.mkdir("output").join("virtual3"))
with netCDF4.Dataset(simple2_netcdf_file.filepath, mode="r") as dataset:
converter = NetCDF4ConverterEngine.from_group(dataset, coords_to_dims=False)
converter.convert_to_virtual_group(uri, input_netcdf_group=dataset)
assert isinstance(tiledb.ArraySchema.load(f"{uri}_array0"), tiledb.ArraySchema)
with tiledb.open(uri) as array:
assert array.meta["name"] == "simple2"
def test_group_metadata(tmpdir):
filepath = str(tmpdir.mkdir("data").join("test_group_metadata.nc"))
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.setncattr("name", "Group metadata example")
dataset.setncattr("array", [0.0, 1.0, 2.0])
uri = str(tmpdir.mkdir("output").join("test_group_metadata"))
from_netcdf(filepath, uri, coords_to_dims=False)
with Group(uri) as group:
assert group.meta["name"] == "Group metadata example"
assert group.meta["array"] == (0.0, 1.0, 2.0)
def test_group_metadata_no_copy(tmpdir):
filepath = str(tmpdir.mkdir("data").join("test_group_metadata.nc"))
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.setncattr("name", "Group metadata example")
dataset.setncattr("array", [0.0, 1.0, 2.0])
uri = str(tmpdir.mkdir("output").join("test_group_metadata"))
from_netcdf(filepath, uri, coords_to_dims=False, copy_metadata=False)
with Group(uri) as group:
assert "name" not in group.meta
assert "array" not in group.meta
def test_variable_metadata(tmpdir):
filepath = str(tmpdir.mkdir("data").join("test_variable_metadata.nc"))
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("row", 4)
variable = dataset.createVariable("x1", np.float64, ("row",))
variable[:] = np.array([1.0, 2.0, 3.0, 4.0])
variable.setncattr("fullname", "Example variable")
variable.setncattr("array", [1, 2])
variable.setncattr("singleton", [1.0])
uri = str(tmpdir.mkdir("output").join("test_variable_metadata"))
from_netcdf(filepath, uri, coords_to_dims=False)
with Group(uri) as group:
with group.open_array(attr="x1") as array:
attr_meta = AttrMetadata(array.meta, "x1")
assert attr_meta is not None
assert attr_meta["fullname"] == "Example variable"
assert attr_meta["array"] == (1, 2)
assert attr_meta["singleton"] == 1.0
def test_variable_metadata_no_copy(tmpdir):
filepath = str(tmpdir.mkdir("data").join("test_variable_metadata.nc"))
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("row", 4)
variable = dataset.createVariable("x1", np.float64, ("row",))
variable[:] = np.array([1.0, 2.0, 3.0, 4.0])
variable.setncattr("fullname", "Example variable")
variable.setncattr("array", [1, 2])
variable.setncattr("singleton", [1.0])
uri = str(tmpdir.mkdir("output").join("test_variable_metadata"))
from_netcdf(filepath, uri, coords_to_dims=False, copy_metadata=False)
with Group(uri) as group:
with group.open_array(attr="x1") as array:
attr_meta = AttrMetadata(array.meta, "x1")
assert attr_meta is not None
assert "fullname" not in attr_meta
assert "array" not in attr_meta
assert "singleton" not in attr_meta
def test_nested_groups(tmpdir, group1_netcdf_file):
root_uri = str(tmpdir.mkdir("output").join("test_example_group1"))
from_netcdf(group1_netcdf_file, root_uri, coords_to_dims=False)
x = np.linspace(-1.0, 1.0, 8)
y = np.linspace(-1.0, 1.0, 4)
# Test root
with Group(root_uri) as group:
with group.open_array(attr="x1") as array:
x1 = array[:]
np.testing.assert_equal(x1, x)
# Test group 1
with Group(root_uri + "/group1") as group:
with group.open_array(attr="x2") as array:
x2 = array[:]
np.testing.assert_equal(x2, 2.0 * x)
# Test group 2
with Group(root_uri + "/group1/group2") as group:
with group.open_array(attr="y1") as array:
y1 = array[:]
np.testing.assert_equal(y1, y)
# Test group 3
with tiledb.open(root_uri + "/group3/array0") as array:
array0 = array[:, :]
A1 = array0["A1"]
A2 = array0["A2"]
A3 = array0["A3"]
np.testing.assert_equal(A1, np.outer(y, y))
np.testing.assert_equal(A2, np.zeros((4, 4), dtype=np.float64))
np.testing.assert_equal(A3, np.identity(4, dtype=np.int32))
def test_variable_fill(tmpdir):
"""Test converting a NetCDF variable with the _FillValue NetCDF attribute set."""
filepath = str(tmpdir.mkdir("sample_netcdf").join("test_fill.nc"))
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("row", 4)
dataset.createVariable("x1", np.dtype("int64"), ("row",), fill_value=-1)
converter = NetCDF4ConverterEngine.from_group(dataset, coords_to_dims=False)
attr_creator = converter._registry.get_attr_creator("x1")
assert attr_creator.fill == -1
def test_collect_attrs_tile_by_var(simple2_netcdf_file):
converter = NetCDF4ConverterEngine.from_file(
simple2_netcdf_file.filepath,
tiles_by_var={"x1": (4,)},
coords_to_dims=False,
)
group_schema = converter.to_schema()
tiles = tuple(dim.tile for dim in group_schema["array0"].domain)
assert tiles == (4,)
def test_copy_no_var_error(tmpdir, simple1_netcdf_file, simple2_netcdf_file):
converter = NetCDF4ConverterEngine.from_file(
simple2_netcdf_file.filepath,
coords_to_dims=False,
)
print(converter)
uri = str(tmpdir.mkdir("output").join("test_copy_error"))
converter.create_group(uri)
with pytest.raises(KeyError):
converter.copy_to_group(uri, input_file=simple1_netcdf_file.filepath)
def test_mismatched_netcdf_dims():
with netCDF4.Dataset("example.nc", mode="w", diskless=True) as dataset:
x_dim = dataset.createDimension("x")
y_dim = dataset.createDimension("y")
var = dataset.createVariable("value", np.float64, ("y", "x"))
converter = NetCDF4ConverterEngine()
converter.add_dim_to_dim_converter(x_dim)
converter.add_dim_to_dim_converter(y_dim)
converter.add_array_converter("array", ("x", "y"))
with pytest.raises(ValueError):
converter.add_var_to_attr_converter(array_name="array", ncvar=var)
|
{"hexsha": "0e36bb1600af478dd4ead56a25517472f8c33572", "size": 49118, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/netcdf_engine/test_netcdf4_converter_engine.py", "max_stars_repo_name": "TileDB-Inc/TileDB-CF-Py", "max_stars_repo_head_hexsha": "9aab0fe9ba7346a1846c7458a5d08b123dcf90a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-06-07T16:51:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T12:48:00.000Z", "max_issues_repo_path": "tests/netcdf_engine/test_netcdf4_converter_engine.py", "max_issues_repo_name": "TileDB-Inc/TileDB-CF-Py", "max_issues_repo_head_hexsha": "9aab0fe9ba7346a1846c7458a5d08b123dcf90a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 72, "max_issues_repo_issues_event_min_datetime": "2021-04-28T21:49:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T13:58:11.000Z", "max_forks_repo_path": "tests/netcdf_engine/test_netcdf4_converter_engine.py", "max_forks_repo_name": "TileDB-Inc/TileDB-CF-Py", "max_forks_repo_head_hexsha": "9aab0fe9ba7346a1846c7458a5d08b123dcf90a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-08-11T16:33:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T20:31:12.000Z", "avg_line_length": 41.1718357083, "max_line_length": 88, "alphanum_fraction": 0.6229488171, "include": true, "reason": "import numpy", "num_tokens": 11975}
|
# -*- coding: utf-8 -*-
#
# Test proper functionality of Syncopy's `BaseData` class + helpers
#
# Builtin/3rd party package imports
import os
import tempfile
import h5py
import time
import pytest
import numpy as np
from numpy.lib.format import open_memmap
from memory_profiler import memory_usage
# Local imports
from syncopy.datatype import AnalogData
import syncopy.datatype as spd
from syncopy.datatype.base_data import VirtualData
from syncopy.shared.errors import SPYValueError, SPYTypeError
from syncopy.tests.misc import is_win_vm, is_slurm_node
# Construct decorators for skipping certain tests
skip_in_vm = pytest.mark.skipif(is_win_vm(), reason="running in Win VM")
skip_in_slurm = pytest.mark.skipif(is_slurm_node(), reason="running on cluster node")
class TestVirtualData():
# Allocate test-dataset
nChannels = 5
nSamples = 30
data = np.arange(1, nChannels * nSamples + 1).reshape(nSamples, nChannels)
def test_alloc(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "vdat")
np.save(fname, self.data)
dmap = open_memmap(fname + ".npy")
# illegal type
with pytest.raises(SPYTypeError):
VirtualData({})
# 2darray expected
d3 = np.ones((2, 3, 4))
np.save(fname + "3", d3)
d3map = open_memmap(fname + "3.npy")
with pytest.raises(SPYValueError):
VirtualData([d3map])
# rows/cols don't match up
with pytest.raises(SPYValueError):
VirtualData([dmap, dmap.T])
# check consistency of VirtualData object
for vk in range(2, 6):
vdata = VirtualData([dmap] * vk)
assert vdata.dtype == dmap.dtype
assert vdata.M == dmap.shape[0]
assert vdata.N == vk * dmap.shape[1]
# Delete all open references to file objects b4 closing tmp dir
del dmap, vdata, d3map
def test_retrieval(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "vdat.npy")
fname2 = os.path.join(tdir, "vdat2.npy")
np.save(fname, self.data)
np.save(fname2, self.data * 2)
dmap = open_memmap(fname)
dmap2 = open_memmap(fname2)
# ensure stacking is performed correctly
vdata = VirtualData([dmap, dmap2])
assert np.array_equal(vdata[:, :self.nChannels], self.data)
assert np.array_equal(vdata[:, self.nChannels:], 2 * self.data)
assert np.array_equal(vdata[:, 0].flatten(), self.data[:, 0].flatten())
assert np.array_equal(vdata[:, self.nChannels].flatten(), 2 * self.data[:, 0].flatten())
assert np.array_equal(vdata[0, :].flatten(),
np.hstack([self.data[0, :], 2 * self.data[0, :]]))
vdata = VirtualData([dmap, dmap2, dmap])
assert np.array_equal(vdata[:, :self.nChannels], self.data)
assert np.array_equal(vdata[:, self.nChannels:2 * self.nChannels], 2 * self.data)
assert np.array_equal(vdata[:, 2 * self.nChannels:], self.data)
assert np.array_equal(vdata[:, 0].flatten(), self.data[:, 0].flatten())
assert np.array_equal(vdata[:, self.nChannels].flatten(),
2 * self.data[:, 0].flatten())
assert np.array_equal(vdata[0, :].flatten(),
np.hstack([self.data[0, :], 2 * self.data[0, :], self.data[0, :]]))
# illegal indexing type
with pytest.raises(SPYTypeError):
vdata[{}, :]
# queried indices out of bounds
with pytest.raises(SPYValueError):
vdata[:, self.nChannels * 3]
with pytest.raises(SPYValueError):
vdata[self.nSamples * 2, 0]
# Delete all open references to file objects b4 closing tmp dir
del dmap, dmap2, vdata
@skip_in_vm
@skip_in_slurm
def test_memory(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "vdat.npy")
data = np.ones((1000, 5000)) # ca. 38.2 MB
np.save(fname, data)
del data
dmap = open_memmap(fname)
# allocation of VirtualData object must not consume memory
mem = memory_usage()[0]
vdata = VirtualData([dmap, dmap, dmap])
assert np.abs(mem - memory_usage()[0]) < 1
# test consistency and efficacy of clear method
vd = vdata[:, :]
vdata.clear()
assert np.array_equal(vd, vdata[:, :])
mem = memory_usage()[0]
vdata.clear()
assert (mem - memory_usage()[0]) > 100
# Delete all open references to file objects b4 closing tmp dir
del dmap, vdata
# Test BaseData methods that work identically for all regular classes
class TestBaseData():
# Allocate test-datasets for AnalogData, SpectralData, SpikeData and EventData objects
nChannels = 10
nSamples = 30
nTrials = 5
nFreqs = 15
nSpikes = 50
data = {}
trl = {}
# Generate 2D array simulating an AnalogData array
data["AnalogData"] = np.arange(1, nChannels * nSamples + 1).reshape(nSamples, nChannels)
trl["AnalogData"] = np.vstack([np.arange(0, nSamples, 5),
np.arange(5, nSamples + 5, 5),
np.ones((int(nSamples / 5), )),
np.ones((int(nSamples / 5), )) * np.pi]).T
# Generate a 4D array simulating a SpectralData array
data["SpectralData"] = np.arange(1, nChannels * nSamples * nTrials * nFreqs + 1).reshape(nSamples, nTrials, nFreqs, nChannels)
trl["SpectralData"] = trl["AnalogData"]
# Use a fixed random number generator seed to simulate a 2D SpikeData array
seed = np.random.RandomState(13)
data["SpikeData"] = np.vstack([seed.choice(nSamples, size=nSpikes),
seed.choice(nChannels, size=nSpikes),
seed.choice(int(nChannels/2), size=nSpikes)]).T
trl["SpikeData"] = trl["AnalogData"]
# Use a simple binary trigger pattern to simulate EventData
data["EventData"] = np.vstack([np.arange(0, nSamples, 5),
np.zeros((int(nSamples / 5), ))]).T
data["EventData"][1::2, 1] = 1
trl["EventData"] = trl["AnalogData"]
# Define data classes to be used in tests below
classes = ["AnalogData", "SpectralData", "SpikeData", "EventData"]
# Allocation to `data` property is tested with all members of `classes`
def test_data_alloc(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "dummy.npy")
hname = os.path.join(tdir, "dummy.h5")
for dclass in self.classes:
# attempt allocation with random file
with open(fname, "w") as f:
f.write("dummy")
# with pytest.raises(SPYValueError):
# getattr(spd, dclass)(fname)
# allocation with HDF5 file
h5f = h5py.File(hname, mode="w")
h5f.create_dataset("dummy", data=self.data[dclass])
h5f.close()
# dummy = getattr(spd, dclass)(filename=hname)
# assert np.array_equal(dummy.data, self.data[dclass])
# assert dummy.filename == hname
# del dummy
# allocation using HDF5 dataset directly
dset = h5py.File(hname, mode="r+")["dummy"]
dummy = getattr(spd, dclass)(data=dset)
assert np.array_equal(dummy.data, self.data[dclass])
assert dummy.mode == "r+", dummy.data.file.mode
del dummy
# # allocation with memmaped npy file
# np.save(fname, self.data[dclass])
# dummy = getattr(spd, dclass)(filename=fname)
# assert np.array_equal(dummy.data, self.data[dclass])
# assert dummy.filename == fname
# del dummy
# allocation using memmap directly
np.save(fname, self.data[dclass])
mm = open_memmap(fname, mode="r")
dummy = getattr(spd, dclass)(data=mm)
assert np.array_equal(dummy.data, self.data[dclass])
assert dummy.mode == "r"
# attempt assigning data to read-only object
with pytest.raises(SPYValueError):
dummy.data = self.data[dclass]
# allocation using array + filename
del dummy, mm
dummy = getattr(spd, dclass)(data=self.data[dclass], filename=fname)
assert dummy.filename == fname
assert np.array_equal(dummy.data, self.data[dclass])
del dummy
# attempt allocation using HDF5 dataset of wrong shape
h5f = h5py.File(hname, mode="r+")
del h5f["dummy"]
dset = h5f.create_dataset("dummy", data=np.ones((self.nChannels,)))
with pytest.raises(SPYValueError):
getattr(spd, dclass)(data=dset)
# # attempt allocation using illegal HDF5 file
del h5f["dummy"]
h5f.create_dataset("dummy1", data=self.data[dclass])
# FIXME: unused: h5f.create_dataset("dummy2", data=self.data[dclass])
h5f.close()
# with pytest.raises(SPYValueError):
# getattr(spd, dclass)(hname)
# allocate with valid dataset of "illegal" file
dset = h5py.File(hname, mode="r")["dummy1"]
dummy = getattr(spd, dclass)(data=dset, filename=fname)
# attempt data access after backing file of dataset has been closed
dset.file.close()
with pytest.raises(SPYValueError):
dummy.data[0, ...]
# attempt allocation with HDF5 dataset of closed file
with pytest.raises(SPYValueError):
getattr(spd, dclass)(data=dset)
# attempt allocation using memmap of wrong shape
np.save(fname, np.ones((self.nChannels,)))
with pytest.raises(SPYValueError):
getattr(spd, dclass)(data=open_memmap(fname))
time.sleep(0.01)
del dummy
# Assignment of trialdefinition array is tested with all members of `classes`
def test_trialdef(self):
for dclass in self.classes:
dummy = getattr(spd, dclass)(self.data[dclass],
trialdefinition=self.trl[dclass])
assert np.array_equal(dummy.sampleinfo, self.trl[dclass][:, :2])
assert np.array_equal(dummy._t0, self.trl[dclass][:, 2])
assert np.array_equal(dummy.trialinfo.flatten(), self.trl[dclass][:, 3])
# Test ``clear`` with `AnalogData` only - method is independent from concrete data object
@skip_in_vm
def test_clear(self):
with tempfile.TemporaryDirectory() as tdir:
fname = os.path.join(tdir, "dummy.npy")
data = np.ones((5000, 1000)) # ca. 38.2 MB
np.save(fname, data)
del data
dmap = open_memmap(fname)
# test consistency and efficacy of clear method
dummy = AnalogData(dmap)
data = np.array(dummy.data)
dummy.clear()
assert np.array_equal(data, dummy.data)
mem = memory_usage()[0]
dummy.clear()
time.sleep(1)
assert np.abs(mem - memory_usage()[0]) > 30
# Delete all open references to file objects b4 closing tmp dir
del dmap, dummy
# Test ``_gen_filename`` with `AnalogData` only - method is independent from concrete data object
def test_filename(self):
# ensure we're salting sufficiently to create at least `numf`
# distinct pseudo-random filenames in `__storage__`
numf = 1000
dummy = AnalogData()
fnames = []
for k in range(numf):
fnames.append(dummy._gen_filename())
assert np.unique(fnames).size == numf
# Object copying is tested with all members of `classes`
def test_copy(self):
# test shallow copy of data arrays (hashes must match up, since
# shallow copies are views in memory)
for dclass in self.classes:
dummy = getattr(spd, dclass)(self.data[dclass],
trialdefinition=self.trl[dclass])
dummy2 = dummy.copy()
assert dummy.filename == dummy2.filename
assert hash(str(dummy.data)) == hash(str(dummy2.data))
assert hash(str(dummy.sampleinfo)) == hash(str(dummy2.sampleinfo))
assert hash(str(dummy._t0)) == hash(str(dummy2._t0))
assert hash(str(dummy.trialinfo)) == hash(str(dummy2.trialinfo))
# test shallow + deep copies of memmaps + HDF5 files
with tempfile.TemporaryDirectory() as tdir:
for dclass in self.classes:
fname = os.path.join(tdir, "dummy.npy")
hname = os.path.join(tdir, "dummy.h5")
np.save(fname, self.data[dclass])
h5f = h5py.File(hname, mode="w")
h5f.create_dataset("dummy", data=self.data[dclass])
h5f.close()
mm = open_memmap(fname, mode="r")
# hash-matching of shallow-copied memmap
dummy = getattr(spd, dclass)(data=mm, trialdefinition=self.trl[dclass])
dummy2 = dummy.copy()
assert dummy.filename == dummy2.filename
assert hash(str(dummy.data)) == hash(str(dummy2.data))
assert hash(str(dummy.sampleinfo)) == hash(str(dummy2.sampleinfo))
assert hash(str(dummy._t0)) == hash(str(dummy2._t0))
assert hash(str(dummy.trialinfo)) == hash(str(dummy2.trialinfo))
# test integrity of deep-copy
dummy3 = dummy.copy(deep=True)
assert dummy3.filename != dummy.filename
assert np.array_equal(dummy.trialdefinition, dummy3.trialdefinition)
assert np.array_equal(dummy.data, dummy3.data)
assert np.array_equal(dummy._t0, dummy3._t0)
assert np.array_equal(dummy.trialinfo, dummy3.trialinfo)
assert np.array_equal(dummy.sampleinfo, dummy3.sampleinfo)
# hash-matching of shallow-copied HDF5 dataset
dummy = getattr(spd, dclass)(data=h5py.File(hname)["dummy"],
trialdefinition=self.trl[dclass])
dummy2 = dummy.copy()
assert dummy.filename == dummy2.filename
assert hash(str(dummy.data)) == hash(str(dummy2.data))
assert hash(str(dummy.sampleinfo)) == hash(str(dummy2.sampleinfo))
assert hash(str(dummy._t0)) == hash(str(dummy2._t0))
assert hash(str(dummy.trialinfo)) == hash(str(dummy2.trialinfo))
# test integrity of deep-copy
dummy3 = dummy.copy(deep=True)
assert dummy3.filename != dummy.filename
assert np.array_equal(dummy.sampleinfo, dummy3.sampleinfo)
assert np.array_equal(dummy._t0, dummy3._t0)
assert np.array_equal(dummy.trialinfo, dummy3.trialinfo)
assert np.array_equal(dummy.data, dummy3.data)
# Delete all open references to file objects b4 closing tmp dir
del mm, dummy, dummy2, dummy3
time.sleep(0.01)
# remove file for next round
os.unlink(hname)
|
{"hexsha": "fa3ffeb7f62cc9006a3a1e49763c78b7c3f4e0aa", "size": 16193, "ext": "py", "lang": "Python", "max_stars_repo_path": "syncopy/tests/test_basedata.py", "max_stars_repo_name": "KatharineShapcott/syncopy", "max_stars_repo_head_hexsha": "7b24eda65cf752e395538db5260cd3075029081f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-01-18T10:10:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T09:49:09.000Z", "max_issues_repo_path": "syncopy/tests/test_basedata.py", "max_issues_repo_name": "KatharineShapcott/syncopy", "max_issues_repo_head_hexsha": "7b24eda65cf752e395538db5260cd3075029081f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 106, "max_issues_repo_issues_event_min_datetime": "2020-10-26T11:13:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T09:34:30.000Z", "max_forks_repo_path": "syncopy/tests/test_basedata.py", "max_forks_repo_name": "KatharineShapcott/syncopy", "max_forks_repo_head_hexsha": "7b24eda65cf752e395538db5260cd3075029081f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-10-24T14:29:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T12:13:48.000Z", "avg_line_length": 43.2967914439, "max_line_length": 130, "alphanum_fraction": 0.5630828136, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3699}
|
import json
import yaml
import copy
import numpy as np
from . import log_util
from .deprecated import deprecated
logger = log_util.get_logger()
def json_or_yaml(filename):
"""
This function would be obsolete when pyyaml supports yaml 1.2
With yaml 1.2 pyyaml can also read json files
:return:
"""
import re
from pathlib import Path
commas = re.compile(r',(?=(?![\"]*[\s\w\?\.\"\!\-\_]*,))(?=(?![^\[]*\]))')
"""
Find all commas which are standalone
- not between quotes - comments, answers
- not between brackets - lists
"""
file_path = Path(filename)
signs = commas.findall(file_path.open('r').read())
return "json" if len(signs) > 0 else "yaml"
def load_dict_from_json_file(filename):
dict_obj = json.load(open(filename, 'r'))
return dict_obj
def load_dict_from_yaml_file(filename):
dict_obj = yaml.safe_load(open(filename, 'r'))
return dict_obj
def load_dict_from_json_str(string):
dict_obj = json.loads(string)
return dict_obj
class NPJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NPJSONEncoder, self).default(obj)
def save_to_json_file(obj, filename, sort_keys=False, indent=4):
if not isinstance(obj, dict):
obj = obj.__dict__
json.dump(obj, open(filename, 'w'), indent=indent, sort_keys=sort_keys, cls = NPJSONEncoder)
def save_to_yaml_file(obj, filename):
if not isinstance(obj, dict):
obj = obj.__dict__
yaml.dump(obj, open(filename, 'w'))
class ObjDict(dict):
"""
TODO: Create an init method that converts nested dicts in this object.
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __str__(self):
return self.json_dumps()
def json_dumps(self, sort_keys=False, indent=2):
return json.dumps(self, indent=indent, sort_keys=sort_keys, cls = NPJSONEncoder)
def save_to_json_file(self, filename, sort_keys=False, indent=2):
save_to_json_file(self, filename, sort_keys, indent)
return self
def save_to_yaml_file(self, filename):
save_to_yaml_file(self, filename)
return self
@staticmethod
def load_from_file(filename):
filetype = json_or_yaml(filename)
if filetype == "json":
obj=ObjDict.load_from_json_file(filename)
elif filetype == "yaml":
obj=ObjDict.load_from_yaml_file(filename)
return obj
@staticmethod
def load_from_json_file(filename):
obj = ObjDict(load_dict_from_json_file(filename))
return obj
@staticmethod
def load_from_yaml_file(filename):
obj = ObjDict(load_dict_from_yaml_file(filename))
return obj
def deepcopy(self):
return copy.deepcopy(self)
@deprecated("Use ObjDict")
class DictObj(object):
'''
Dictionaries that also work like objects
Usage:
1. Create by passing a dictionary or nothing.
mydict = DictObj(d=a_dict)
2. If created empty, you can populate using oe of the load methods:
load_from_dict
load_from_json_file
load_from_json_str
'''
def __init__(self, d=None):
if isinstance(d, dict):
self.__dict__ = d
# This code is commented out since the str could be a JSON string or file hence use new methods:
# load_from_json_file or load_from_json_str
#elif isinstance(d, str): # d is filename # could be JSON String
# self.__dict__ = load_dict_from_json(d)
else:
logger.info('Making empty DictObj because parameters passed is not a dict')
def __getitem__(self, key):
return self.__dict__[key]
# Return the value for key if key is in the dictionary, else default.
# If default is not given, it defaults to None
def get(self, key, default=None):
return self.__dict__.get(key,default)
# If key is in the dictionary, return its value.
# If key is not in, insert key with a value of default and return default.
def setdefault(self, key,default=None):
return self.__dict__.setdefault(key,default)
def pop(self, key, default=None):
return self.__dict__.pop(key, default)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return self.dumps_json()
def update(self, obj):
if isinstance(obj, dict):
self.__dict__.update(obj)
else:
self.__dict__.update(obj.__dict__)
return self
def save_to_json_file(self, filename, sort_keys=False, indent=4):
save_to_json_file(self, filename, sort_keys, indent)
return self
def save_to_yaml_file(self, filename):
save_to_yaml_file(self, filename)
return self
def load_from_dict(self, d):
self.__dict__ = d
return self
def load_from_file(self, filename):
filetype = json_or_yaml(filename)
if filetype == "json":
self.load_from_json_file(filename)
elif filetype == "yaml":
self.load_from_yaml_file(filename)
return self
def load_from_json_file(self, filename):
self.__dict__ = load_dict_from_json_file(filename)
return self
def load_from_yaml_file(self, filename):
self.__dict__ = load_dict_from_yaml_file(filename)
return self
def load_from_json_str(self, string):
self.__dict__ = load_dict_from_json_str(string)
return self
def dumps_json(self, sort_keys=False, indent=4):
obj = self.__dict__
return json.dumps(obj, indent=indent, sort_keys=sort_keys, cls = NPJSONEncoder)
def deepcopy(self):
return copy.deepcopy(self)
def values(self):
return list(self.__dict__.values())
def keys(self):
return list(self.__dict__.keys())
|
{"hexsha": "f2270ef84bea58417889db3f4209f5316dda10f9", "size": 6355, "ext": "py", "lang": "Python", "max_stars_repo_path": "ezai_util/dict.py", "max_stars_repo_name": "armando-fandango/ezai_util", "max_stars_repo_head_hexsha": "261a0b752a66ac0357e6576b7113ac8e2fff5396", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ezai_util/dict.py", "max_issues_repo_name": "armando-fandango/ezai_util", "max_issues_repo_head_hexsha": "261a0b752a66ac0357e6576b7113ac8e2fff5396", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ezai_util/dict.py", "max_forks_repo_name": "armando-fandango/ezai_util", "max_forks_repo_head_hexsha": "261a0b752a66ac0357e6576b7113ac8e2fff5396", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1513761468, "max_line_length": 104, "alphanum_fraction": 0.6546026751, "include": true, "reason": "import numpy", "num_tokens": 1486}
|
# coding:utf-8
import logging
from xml import sax
from math import radians, cos, sin, asin, sqrt
import networkx as nx
class NodePoint(object):
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
def dist(self, other):
return gc_dist(self.lat, self.lon, other.lat, other.lon)
class Way(object):
def __init__(self, id):
self.id = id
self.node_ids = []
self.tags = {}
self.accessibility = None
def update_accessibility(self):
self.accessibility = WayAccessibility(self.tags)
class WayMap(object):
def __init__(self):
self.nodes = {}
self.ways = {}
def to_networkx(self):
G = nx.DiGraph()
for way in self.ways.values():
if not (way.accessibility.direct_accessible() or way.accessibility.reverse_accessible()):
continue
for n1_id, n2_id in zip(way.node_ids, way.node_ids[1:]):
n1 = self.nodes[n1_id]
n2 = self.nodes[n2_id]
node_dist = n1.dist(n2)
# Nodes
for n_id in (n1_id, n2_id):
if n_id not in G.node:
G.add_node(n_id,
lat=self.nodes[n_id].lat,
lon=self.nodes[n_id].lon)
# Edges
if way.accessibility.direct_accessible():
G.add_edge(n1_id, n2_id,
way=way,
dist=node_dist,
car=way.accessibility.car_direct,
bike=way.accessibility.bike_direct,
foot=way.accessibility.foot)
if way.accessibility.reverse_accessible():
G.add_edge(n2_id, n1_id,
way=way,
dist=node_dist,
car=way.accessibility.car_reverse,
bike=way.accessibility.bike_reverse,
foot=way.accessibility.foot)
return G
def streets(self):
street_names = {}
for way in self.ways.values():
if 'highway' in way.tags and 'name' in way.tags:
street_names[way.tags['name']] = way
return street_names
class WayParserHandler(sax.handler.ContentHandler):
def __init__(self):
self.data = WayMap()
self.in_elem = ""
self.current_way = None
def startElement(self, name, attrs):
if name == "node":
self.data.nodes[int(attrs['id'])] = NodePoint(float(attrs['lat']), float(attrs['lon']))
elif name == "way":
self.current_way = Way(int(attrs['id']))
self.in_elem = "way"
elif self.in_elem == "way":
if name == "nd":
self.current_way.node_ids.append(int(attrs['ref']))
elif name == "tag":
self.current_way.tags[attrs['k']] = attrs['v']
# self.in_elem = name
def endElement(self, name):
if name == 'way' and self.current_way:
self.current_way.update_accessibility()
self.data.ways[self.current_way.id] = self.current_way
self.current_way = None
self.in_elem = None
def gc_dist(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees).
The distance is returned in meters.
"""
# Convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# http://en.wikipedia.org/wiki/Haversine_formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
RADIUS_OF_EARTH_IN_KM = 6367
km = RADIUS_OF_EARTH_IN_KM * c
return km * 1000
class WayAccessibility(object):
"""The code here is adapted from osm4routing
https://github.com/Tristramg/osm4routing
"""
def __init__(self, attrs):
self.bike_direct = None
self.foot = None
self.car_direct = None
self.bike_reverse = None
self.car_reverse = None
for key, val in attrs.items():
self.update_accessibility(key, val)
self.update_unknowns()
def direct_accessible(self, transport='any'):
if transport == 'car':
return self.car_direct != 'car_forbidden'
elif transport == 'bike':
return self.bike_direct != 'bike_forbidden'
elif transport == 'foot':
return self.foot != 'foot_forbidden'
else:
return (self.car_direct != 'car_forbidden') or (self.bike_direct != 'bike_forbidden') \
or (self.foot != 'foot_forbidden')
def reverse_accessible(self, transport='any'):
if transport == 'car':
return self.car_reverse != 'car_forbidden'
elif transport == 'bike':
return self.bike_reverse != 'bike_forbidden'
elif transport == 'foot':
return self.foot != 'foot_forbidden'
else:
return (self.car_reverse != 'car_forbidden') or (self.bike_reverse != 'bike_forbidden') \
or (self.foot != 'foot_forbidden')
def update_unknowns(self):
if not self.car_reverse and self.car_direct:
self.car_reverse = self.car_direct
if not self.bike_reverse and self.bike_direct:
self.bike_reverse = self.bike_reverse
if not self.car_direct:
self.car_direct = 'car_forbidden'
if not self.bike_direct:
self.bike_direct = 'bike_forbidden'
if not self.car_reverse:
self.car_reverse = 'car_forbidden'
if not self.bike_reverse:
self.bike_reverse = 'bike_forbidden'
if not self.foot:
self.foot = 'foot_forbidden'
def update_accessibility(self, key, val):
if key == 'highway':
if val in ("cycleway", "path", "footway", "steps", "pedestrian"):
self.bike_direct = 'bike_track'
self.foot = 'foot_allowed'
elif val in ('primary', 'primary_link'):
self.car_direct = 'car_primary'
self.foot = 'foot_allowed'
self.bike_direct = 'bike_allowed'
elif val == "secondary":
self.car_direct = 'car_secondary'
self.foot = 'foot_allowed'
self.bike_direct = 'bike_allowed'
elif val == "tertiary":
self.car_direct = 'car_tertiary'
self.foot = 'foot_allowed'
self.bike_direct = 'bike_allowed'
elif val in ("unclassified", "residential", "living_street", "road", "service", "track"):
self.car_direct = 'car_residential'
self.foot = 'foot_allowed'
self.bike_direct = 'bike_allowed'
elif val in ("motorway", "motorway_link"):
self.car_direct = 'car_motorway'
self.foot = 'foot_forbidden'
self.bike_direct = 'bike_forbidden'
elif val in ("trunk", "trunk_link"):
self.car_direct = 'car_trunk'
self.foot = 'foot_forbidden'
self.bike_direct = 'bike_forbidden'
elif key == "pedestrian" or key == "foot":
if val in ("yes", "designated", "permissive"):
self.foot = 'foot_allowed'
elif val == "no":
self.foot = 'foot_forbidden'
else:
logging.info("Unhandled key-value pair {}={} ".format(key, val))
# http://wiki.openstreetmap.org/wiki/Cycleway
##// http://wiki.openstreetmap.org/wiki/Map_Features#Cycleway
elif key == "cycleway":
if val in ("lane", "yes", "true", "lane_in_the_middle"):
self.bike_direct = 'bike_lane'
elif val == "track":
self.bike_direct = 'bike_track'
elif val == "opposite_lane":
self.bike_reverse = 'bike_lane'
elif val == "opposite_track":
self.bike_reverse = 'bike_track'
elif val == "opposite":
self.bike_reverse = 'bike_allowed'
elif val == "share_busway":
self.bike_direct = 'bike_busway'
elif val == "lane_left":
self.bike_reverse = 'bike_lane'
else:
self.bike_direct = 'bike_lane'
elif key == "bicycle":
if val in ("yes" , "permissive" , "destination" , "designated" , "private" , "true"):
self.bike_direct = 'bike_allowed'
elif val in ("no", "true"):
self.bike_direct = 'bike_forbidden'
else:
logging.info("Unhandled key-value pair {}={} ".format(key, val))
elif key == "busway":
if val in ("yes" , "track" , "lane"):
self.bike_direct = 'bike_busway'
elif val in ("opposite_lane", "opposite_track"):
self.bike_reverse = 'bike_busway'
else:
self.bike_direct = 'bike_busway'
elif key == "oneway":
if val in ("yes", "true", "1"):
self.car_reverse = 'car_forbidden'
if self.bike_reverse is None:
self.bike_reverse = 'bike_forbidden'
elif key == "junction":
if val == "roundabout":
self.car_reverse = 'car_forbidden'
if self.bike_reverse is None:
self.bike_reverse = 'bike_forbidden'
def read_way_network(filename):
handler = WayParserHandler()
sax.parse(filename, handler)
return handler.data.to_networkx()
# print ways
# for id, way in ways.ways.items()[0:10]:
# print way.node_ids
# print way.tags
# print "direct accessible", way.accessibility.direct_accessible()
# print "reverse accessible", way.accessibility.reverse_accessible()
# G = to_networkx(ways)
# street_names = ways.streets()
# # Find from
# from_name = u'Hollænderdybet'
# to_name = u'Bergthorasgade'
#
# from_node_id = street_names[from_name].node_ids[0]
# to_node_id = street_names[to_name].node_ids[0]
#
# print u"Finding shortest path from {} ({}) to {} ({})".format(from_name, from_node_id, to_name, to_node_id)
# print nx.shortest_path(G, from_node_id, to_node_id, 'dist')
# # #print handler.notecount
# # #return handler.ergebnis
# #
|
{"hexsha": "beb77f11d2112a2af047c9e41e1c921405b12715", "size": 10776, "ext": "py", "lang": "Python", "max_stars_repo_path": "osm.py", "max_stars_repo_name": "Falli0o/map", "max_stars_repo_head_hexsha": "2f1ac57e984d68e2656c547db885ad71fe76f4ba", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "osm.py", "max_issues_repo_name": "Falli0o/map", "max_issues_repo_head_hexsha": "2f1ac57e984d68e2656c547db885ad71fe76f4ba", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "osm.py", "max_forks_repo_name": "Falli0o/map", "max_forks_repo_head_hexsha": "2f1ac57e984d68e2656c547db885ad71fe76f4ba", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8006644518, "max_line_length": 109, "alphanum_fraction": 0.5402746845, "include": true, "reason": "import networkx", "num_tokens": 2454}
|
[STATEMENT]
lemma subst_cv_subst_vb_switch:
shows "(c[bv::=b']\<^sub>c\<^sub>b)[x::=v[bv::=b']\<^sub>v\<^sub>b]\<^sub>c\<^sub>v = c[x::=v]\<^sub>c\<^sub>v[bv::=b']\<^sub>c\<^sub>b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (c[bv::=b']\<^sub>c\<^sub>b)[x::=v[bv::=b']\<^sub>v\<^sub>b]\<^sub>c\<^sub>v = c[x::=v]\<^sub>c\<^sub>v[bv::=b']\<^sub>c\<^sub>b
[PROOF STEP]
by(nominal_induct c rule:c.strong_induct, auto simp add: subst_cev_subst_vb_switch c.fresh)
|
{"llama_tokens": 244, "file": "MiniSail_BTVSubstTypingL", "length": 1}
|
# -*- coding: utf-8 -*-
from nltk.tokenize import RegexpTokenizer
from gensim.matutils import unitvec
from xml.dom import minidom
import numpy as np
str_to_entailment = {'none': 0,
'entailment': 1,
'paraphrase': 2}
entailment_to_str = {v: k for k, v in str_to_entailment.items()}
stop_words = [
'de', 'a', 'o', 'que', 'e', 'do', 'da', 'em', 'um', 'para', 'com', 'uma',
'os', 'no', 'se', 'na', 'por', 'mais', 'as', 'dos', 'como', 'mas', 'ao',
'das', 'à', 'seu', 'sua', 'ou', 'quando', 'muito', 'já', 'também', 'só',
'pelo', 'pela', 'até', 'isso', 'entre', 'depois', 'sem', 'mesmo', 'aos',
'seus', 'quem', 'nas', 'me', 'esse', 'essa', 'num', 'nem', 'suas', 'meu',
'às', 'minha', 'numa', 'pelos', 'qual', 'lhe', 'deles', 'essas', 'esses',
'pelas', 'este', 'dele', 'tu', 'te', 'vocês', 'vos', 'lhes', 'meus',
'minhas', 'teu', 'tua', 'teus', 'tuas', 'nosso', 'nossa', 'nossos', 'nossas',
'dela', 'delas', 'esta', 'estes', 'estas', 'aquele', 'aquela', 'aqueles',
'aquelas', 'isto', 'aquilo',
'então', 'dai', 'daí', 'dum', 'duma', 'nesse', 'nisso', 'pois', 'assim'
]
content_words_tags = ['VAUX', 'V',
'PCP',
'N', 'NPROP',
'ADV', 'PDEN', 'PREP+ADV',
'ADJ']
def tokenize(text):
"""
Script for tokenizing Portuguese text according to the Universal
Dependencies (UD) tokenization standards. This script was not created by
the UD team; it was based on observation of the corpus.
https://gist.github.com/erickrf/d699b1a8c09249c36f74eaaa94690ccb
"""
tokenizer_regexp = r'''(?ux)
# the order of the patterns is important!!
# more structured patterns come first
[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)+| # emails
(?:https?://)?\w{2,}(?:\.\w{2,})+(?:/\w+)*| # URLs
(?:[\#@]\w+)| # Hashtags and twitter user names
(?:[^\W\d_]\.)+| # one letter abbreviations, e.g. E.U.A.
(?:[DSds][Rr][Aa]?)\.| # common abbreviations such as dr., sr., sra., dra.
(?:\B-)?\d+(?:[:.,]\d+)*(?:-?\w)*|
# numbers in format 999.999.999,999, possibly followed by hyphen and alphanumerics
# \B- avoids picks as F-14 as a negative number
\.{3,}| # ellipsis or sequences of dots
\w+| # alphanumerics
-+| # any sequence of dashes
\S # any non-space character
'''
tokenizer = RegexpTokenizer(tokenizer_regexp)
return tokenizer.tokenize(text)
def cosine_similarity(vec1, vec2):
s = np.dot(
unitvec(vec1),
unitvec(vec2))
return s
def get_answers(df_data, model, labels_with_sents):
data_set_predictions = []
for i in df_data.index:
sentence = df_data.loc[i,'sentence']
results_per_sentence = []
for label, orignal_sent in labels_with_sents.items():
y_probas = 0 if model.predict([[sentence, orignal_sent]])[0] == 0 else 1
results_per_sentence.extend([y_probas])
data_set_predictions.append(results_per_sentence)
return np.asarray(data_set_predictions)
def read_xml(xml_path):
xml = minidom.parse(xml_path)
pairs = xml.getElementsByTagName('pair')
train_data_sent1 = []
train_data_sent2 = []
train_y = []
for i in pairs:
train_data_sent1.append(i.getElementsByTagName('t')[0].childNodes[0].data.strip())
train_data_sent2.append(i.getElementsByTagName('h')[0].childNodes[0].data.strip())
train_y.append(str_to_entailment[i.attributes['entailment'].value.lower()])
train_data = []
for src_setences, trg_senteces in zip(train_data_sent1, train_data_sent2):
train_data.append([src_setences, trg_senteces])
train_data = np.asarray(train_data)
return train_data, train_y
|
{"hexsha": "6cde620059cba1f143df380513c3e2671a4daccf", "size": 3959, "ext": "py", "lang": "Python", "max_stars_repo_path": "anaadementia/utils.py", "max_stars_repo_name": "lbsantos/ANAA-Dementia", "max_stars_repo_head_hexsha": "fb1b013f0de45526283d06291e99b70bf858a19d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "anaadementia/utils.py", "max_issues_repo_name": "lbsantos/ANAA-Dementia", "max_issues_repo_head_hexsha": "fb1b013f0de45526283d06291e99b70bf858a19d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "anaadementia/utils.py", "max_forks_repo_name": "lbsantos/ANAA-Dementia", "max_forks_repo_head_hexsha": "fb1b013f0de45526283d06291e99b70bf858a19d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5054945055, "max_line_length": 90, "alphanum_fraction": 0.5610002526, "include": true, "reason": "import numpy", "num_tokens": 1201}
|
# -*- coding: utf-8 -*-
# Krzysztof Joachimiak 2018
# sciquence: Time series & sequences in Pythonn
#
# Binarizers
# Author: Krzysztof Joachimiak
#
# License: MIT
import sys
sys.path.append("..")
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
import copy
#from sciquence.utils.docstring import inherit_docstring
#@inherit_docstring
class ClasswiseBinarizer(BaseEstimator, TransformerMixin):
'''
Performing binarization classwise.
It may be used for binarize independently multiple class in the tagging tasks.
Parameters
----------
thresholds: list of float or numpy.ndarray
Binarization thresholds for all the classes
'''
def __init__(self, thresholds):
# TODO: axis?
self.thresholds=thresholds
def fit(self, X, y=None):
'''Does nothing'''
return self
def transform(self, X, y=None, copy=False):
'''
Perform classwise binarization, i.e. every column has
own specific binarization thresholds.
Parameters
----------
X: numpy.ndarray
Probabilities vector
y: None
Nothing, argument for API compatibility
copy: bool
Copy or make transformation inplace
Returns
-------
binarized_X: numpy.ndarray
Binarized output
Examples
---------
>>> import numpy as np
>>> X = np.array(
>>> [[ 0.04344385 0.24317802 0.81423947],
>>> [ 0.30503777 0.08385118 0.48402043],
>>> [ 0.38695257 0.64501778 0.19023201],
>>> [ 0.49452506 0.35440145 0.74149338],
>>> [ 0.25147325 0.14294654 0.6648142 ],
>>> [ 0.99852846 0.75026559 0.43106003],
>>> [ 0.33369685 0.41158767 0.86865335],
>>> [ 0.07741532 0.90428353 0.87152301],
>>> [ 0.79609158 0.47617837 0.1890651 ],
>>> [ 0.14287567 0.52800364 0.10957203]]
>>> )
>>> X_binarized = ClasswiseBinarizer(thresholds=[.5, .4, .3]).transform(X)
>>> print X_binarized
>>> [[ 0. 0. 1.],
>>> [ 0. 0. 1.],
>>> [ 0. 1. 0.],
>>> [ 0. 0. 1.],
>>> [ 0. 0. 1.],
>>> [ 1. 1. 1.],
>>> [ 0. 1. 1.],
>>> [ 0. 1. 1.],
>>> [ 1. 1. 0.],
>>> [ 0. 1. 0.]]
'''
return (X >= self.thresholds).astype(float)
def binarize_classwise(X, thresholds):
'''
Binarization performed classwise.
Parameters
----------
X: numpy.ndarray
Probabilities vector
thresholds: list of float or numpy.ndarray
Binarization thresholds for all the classes
Examples
--------
>>> import numpy as np
>>> X = np.array(
>>> [[ 0.04344385 0.24317802 0.81423947],
>>> [ 0.30503777 0.08385118 0.48402043],
>>> [ 0.38695257 0.64501778 0.19023201],
>>> [ 0.49452506 0.35440145 0.74149338],
>>> [ 0.25147325 0.14294654 0.6648142 ],
>>> [ 0.99852846 0.75026559 0.43106003],
>>> [ 0.33369685 0.41158767 0.86865335],
>>> [ 0.07741532 0.90428353 0.87152301],
>>> [ 0.79609158 0.47617837 0.1890651 ],
>>> [ 0.14287567 0.52800364 0.10957203]]
>>> )
>>> X_binarized = ClasswiseBinarizer(thresholds=[.5, .4, .3]).transform(X)
>>> print X_binarized
>>> [[ 0. 0. 1.],
>>> [ 0. 0. 1.],
>>> [ 0. 1. 0.],
>>> [ 0. 0. 1.],
>>> [ 0. 0. 1.],
>>> [ 1. 1. 1.],
>>> [ 0. 1. 1.],
>>> [ 0. 1. 1.],
>>> [ 1. 1. 0.],
>>> [ 0. 1. 0.]]
'''
return (X >= thresholds).astype(float)
## TODO: ClasswiseMeanBinarizer
if __name__== '__main__':
# Dummy data
X = np.random.rand(10, 3)
print X
# Binarizing
bX = ClasswiseBinarizer(thresholds=[.5, .4, .3]).transform(X)
print bX
|
{"hexsha": "015b4df28dae5dd25ded6e5f1f9ef738275e312e", "size": 3864, "ext": "py", "lang": "Python", "max_stars_repo_path": "sciquence/postprocessing/binarizer.py", "max_stars_repo_name": "krzjoa/sciquence", "max_stars_repo_head_hexsha": "6a5f758c757200fffeb0fdc9206462f1f89e2444", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-10-23T17:59:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-10T03:01:30.000Z", "max_issues_repo_path": "sciquence/postprocessing/binarizer.py", "max_issues_repo_name": "krzjoa/sciquence", "max_issues_repo_head_hexsha": "6a5f758c757200fffeb0fdc9206462f1f89e2444", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-08-25T19:24:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-05T12:16:10.000Z", "max_forks_repo_path": "sciquence/postprocessing/binarizer.py", "max_forks_repo_name": "krzjoa/sciquence", "max_forks_repo_head_hexsha": "6a5f758c757200fffeb0fdc9206462f1f89e2444", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-02-28T09:47:53.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-25T19:24:16.000Z", "avg_line_length": 25.932885906, "max_line_length": 82, "alphanum_fraction": 0.5287267081, "include": true, "reason": "import numpy", "num_tokens": 1370}
|
import pytest
import ast
from codemodel.type_validation.type_validation import *
class TestTypeValidation(object):
def setup(self):
self.validation = TypeValidation([
StandardValidatorFactory(STANDARD_TYPES_DICT)
])
def test_register(self):
validation = TypeValidation([])
assert validation.factories == []
factory = StandardValidatorFactory(STANDARD_TYPES_DICT)
validation.register(factory)
assert validation.factories == [factory]
def test_getitem(self):
assert self.validation._validators == {}
int_val = self.validation['int']
assert self.validation._validators == {'int': int_val}
int_val_2 = self.validation['int']
assert int_val is int_val_2 # idempotent
def test_getitem_error(self):
with pytest.raises(KeyError):
self.validation['foo']
class ValidatorTester(object):
def setup(self):
self.factory = StandardValidatorFactory(STANDARD_TYPES_DICT)
self.validator = self.factory.create(self.type_str)
def test_validator(self):
for val in self.good_values:
assert self.validator.validate(val)
for val in self.bad_values:
assert not self.validator.validate(val)
def test_raises_error(self):
with pytest.raises(TypeError):
self.validator.validate(self.obj)
def test_is_valid(self):
assert self.validator.is_valid(self.obj)
def test_factory_is_my_type(self):
assert self.factory.is_my_type(self.type_str)
def test_factory_bad_type(self):
assert not self.factory.is_my_type("foo")
class TestIntValidation(ValidatorTester):
def setup(self):
self.type_str = 'int'
self.good_values = ["5"]
self.bad_values = ["5.1", "Five"]
self.obj = 5
super().setup()
def test_ast(self):
expected = ast.Num(5)
result = self.validator.to_ast("5")
assert isinstance(result, ast.Num)
assert result.n == expected.n
class TestFloatValidation(ValidatorTester):
def setup(self):
self.type_str = 'float'
self.good_values = ["5", "5.1"]
self.bad_values = ["Five"]
self.obj = 5.1
super().setup()
def test_ast(self):
expected = ast.Num(5.1)
result = self.validator.to_ast("5.1")
assert isinstance(result, ast.Num)
assert result.n == expected.n
class TestStringValidation(ValidatorTester):
def setup(self):
self.type_str = 'str'
self.good_values = ["5", "5.1", "Five"]
self.bad_values = []
self.obj = "Five"
super().setup()
def test_ast(self):
expected = ast.Str("Five")
result = self.validator.to_ast("Five")
assert isinstance(result, ast.Str)
assert result.s == expected.s
def test_raises_error(self):
pass # strings don't raise errors!
class TestBoolValidation(ValidatorTester):
def setup(self):
self.factory = BoolValidator()
self.validator = self.factory
self.type_str = 'bool'
self.good_values = [True, False]
try:
import numpy as np
except ImportError:
pass
else:
self.good_values.extend([np.True_, np.False_])
self.bad_values = ['True', 'False']
self.obj = True
def test_ast(self):
# AST for bools changes in various Pythons; ast.Name,
# ast.NameConstant, ast.Constant. So we ask for whatever this Python
# version gives.
expected = ast.parse('True').body[0].value
result = self.validator.to_ast(True)
assert result.value == expected.value
def test_raises_error(self):
pass # this one doesn't actually raise an error
def test_create(self):
assert self.factory.create('bool') == self.validator
def test_to_instance(self):
for inst in [True, False]:
assert self.validator.to_instance(inst) == inst
|
{"hexsha": "8beaef7cf98bc4ebb4546eaf1d3f8677793ce071", "size": 4037, "ext": "py", "lang": "Python", "max_stars_repo_path": "codemodel/tests/type_validation/test_type_validation.py", "max_stars_repo_name": "dwhswenson/codemodel", "max_stars_repo_head_hexsha": "f881f81054eaa89e93469135ad8c2ae33ea06841", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-11T19:38:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T12:16:09.000Z", "max_issues_repo_path": "codemodel/tests/type_validation/test_type_validation.py", "max_issues_repo_name": "dwhswenson/codemodel", "max_issues_repo_head_hexsha": "f881f81054eaa89e93469135ad8c2ae33ea06841", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-11-11T18:25:13.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-19T11:55:18.000Z", "max_forks_repo_path": "codemodel/tests/type_validation/test_type_validation.py", "max_forks_repo_name": "dwhswenson/codemodel", "max_forks_repo_head_hexsha": "f881f81054eaa89e93469135ad8c2ae33ea06841", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6838235294, "max_line_length": 76, "alphanum_fraction": 0.6252167451, "include": true, "reason": "import numpy", "num_tokens": 857}
|
import numpy as np
import random
def create_batches(X_train, Y_train, batch_size, classes_num=10):
X_train_F = []
for i in xrange(0, classes_num):
X_train_F.append([])
for i in xrange(0, len(X_train)):
l = np.argmax(Y_train[i]) # l for label (in this case it's basically the index of class value elemenmts)
# (Y_train is one hot encoded. Argmax returns the index for maximum value which should be 1 and
# that index should indicate the value)
X_train_F[l].append(X_train[i])
smoothing_factor = 0.1 # for label smoothing
s = int(batch_size / classes_num) # s denotes samples taken from each class to create the batch.
no_of_batches = int(len(X_train) / batch_size)
shuffled_indices_per_class = []
for i in xrange(0, classes_num):
temp = np.arange(len(X_train_F[i]))
np.random.shuffle(temp)
shuffled_indices_per_class.append(temp)
batches_X = []
batches_Y = []
for i in xrange(no_of_batches):
shuffled_class_indices = np.arange(classes_num)
np.random.shuffle(shuffled_class_indices)
batch_Y = np.zeros((batch_size, classes_num), np.float32)
batch_X = np.zeros((batch_size, 28, 28, 1), np.float32)
for index in xrange(0, classes_num):
class_index = shuffled_class_indices[index]
for j in xrange(0, s):
batch_X[(index * s) + j] = X_train_F[class_index][shuffled_indices_per_class[class_index][
i * s + j]] # Assign the s chosen random samples to the training batch
batch_Y[(index * s) + j][class_index] = 1
batch_Y[(index * s) + j] = (1 - smoothing_factor) * batch_Y[
(index * s) + j] + smoothing_factor / classes_num
rs = batch_size - s * classes_num # rs denotes no. of random samples from random classes to take
# in order to fill the batch if batch isn't divisble by classes_num
# fill the rest of the batch with random data
rand = random.sample(np.arange(len(X_train)), rs)
j = 0
for k in xrange(s * classes_num, batch_size):
batch_X[k] = X_train[int(rand[j])]
batch_Y[k] = Y_train[int(rand[j])]
batch_Y[k] = (1 - smoothing_factor) * batch_Y[k] + smoothing_factor / classes_num
j += 1
batches_X.append(batch_X)
batches_Y.append(batch_Y)
return batches_X, batches_Y
def random_crop(img):
t = 28
# result = np.zeros_like((img))
c = np.random.randint(0, 5)
if c == 0:
crop = img[4:t, 0:-4]
elif c == 1:
crop = img[0:-4, 0:-4]
elif c == 2:
crop = img[2:-2, 2:-2]
elif c == 3:
crop = img[4:t, 4:t]
elif c == 4:
crop = img[0:-4, 4:t]
# translating cropped position
# over the original image
c = np.random.randint(0, 5)
if c == 0:
img[4:t, 0:-4] = crop[:]
elif c == 1:
img[0:-4, 0:-4] = crop[:]
elif c == 2:
img[2:-2, 2:-2] = crop[:]
elif c == 3:
img[4:t, 4:t] = crop[:]
elif c == 4:
img[0:-4, 4:t] = crop[:]
return img
def augment_batch(batch_X): # will be used to modify images realtime during training (real time data augmentation)
aug_batch_X = np.zeros((len(batch_X), 28, 28, 1))
for i in xrange(0, len(batch_X)):
hf = np.random.randint(0, 2)
if hf == 1: # hf denotes horizontal flip. 50-50 random chance to apply horizontal flip on images,
batch_X[i] = np.fliplr(batch_X[i])
# Remove the below cropping to apply random crops. But before that it's better to implement something like mirror padding
# or any form of padding to increase the dimensions beforehand.
c = np.random.randint(0, 3)
if c == 1:
# one in a three chance for cropping
# randomly crop 28x28 portions and translate it.
aug_batch_X[i] = random_crop(batch_X[i])
else:
aug_batch_X[i] = batch_X[i]
return aug_batch_X
def shuffle_batch(batch_X, batch_Y):
shuffle = random.sample(np.arange(0, len(batch_X), 1, 'int'), len(batch_X))
shuffled_batch_X = []
shuffled_batch_Y = []
for i in xrange(0, len(batch_X)):
shuffled_batch_X.append(batch_X[int(shuffle[i])])
shuffled_batch_Y.append(batch_Y[int(shuffle[i])])
shuffled_batch_X = np.array(shuffled_batch_X)
shuffled_batch_Y = np.array(shuffled_batch_Y)
return shuffled_batch_X, shuffled_batch_Y
def getbatch(X_train, Y_train, batch_size, classes_num = 10): # one shortcut function to execute all necessary functions to create a training batch
batches_X, batches_Y = create_batches(X_train, Y_train, batch_size, classes_num)
aug_batches_X = []
for batch in batches_X:
aug_batch_X = augment_batch(batch)
aug_batches_X.append(aug_batch_X)
s_batches_X = []
s_batches_Y = []
for i in xrange(len(aug_batches_X)):
s_batch_X, s_batch_Y = shuffle_batch(aug_batches_X[i], batches_Y[i])
s_batches_X.append(s_batch_X)
s_batches_Y.append(s_batch_Y)
return s_batches_X, s_batches_Y
|
{"hexsha": "c37384e8b0cf4910544f8ec581b6e1009fa4aa0c", "size": 5185, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataUtils2.py", "max_stars_repo_name": "styanddty/Wide-resnet-fashionMnist", "max_stars_repo_head_hexsha": "f281f1e3e3c2b55be20620290d6e0535fcc70d98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-05-12T01:53:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-10T09:35:01.000Z", "max_issues_repo_path": "dataUtils2.py", "max_issues_repo_name": "styanddty/Wide-resnet-fashionMnist", "max_issues_repo_head_hexsha": "f281f1e3e3c2b55be20620290d6e0535fcc70d98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataUtils2.py", "max_forks_repo_name": "styanddty/Wide-resnet-fashionMnist", "max_forks_repo_head_hexsha": "f281f1e3e3c2b55be20620290d6e0535fcc70d98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-25T20:03:56.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-25T20:03:56.000Z", "avg_line_length": 34.5666666667, "max_line_length": 148, "alphanum_fraction": 0.6198649952, "include": true, "reason": "import numpy", "num_tokens": 1372}
|
import chess
import numpy as np
from Game import Game
from utils import encode_board
# encodings and mirror_move based on
# https://github.com/yeekit24/alpha-zero-general/blob/3fa23d76ed618f732e9485bd1bd6a44835e353a5/_chess/ChessGame.py
def from_move(move: chess.Move):
return move.from_square * 64 + move.to_square
def to_move(action: int):
to_sq = action % 64
from_sq = int(action / 64)
return chess.Move(from_sq, to_sq)
def mirror_move(move: chess.Move):
return chess.Move(chess.square_mirror(move.from_square), chess.square_mirror(move.to_square))
class ChessGame(Game):
def __init__(self):
super().__init__()
self.board = chess.Board()
self.encoded_board = encode_board(self.board)
self.action_names = {}
counter = 0
for square_from in chess.SQUARES:
for square_to in chess.SQUARES:
self.action_names[counter] = chess.Move(square_from, square_to)
counter += 1
def getInitBoard(self):
return self.board
def getBoardSize(self):
return self.encoded_board.shape
def getActionSize(self):
# an action is a move from any square to any square
# ergo, total number of actions is 64^2
return pow(8 * 8, 2)
def get_action_names(self):
return self.action_names
def getNextState(self, board: chess.Board, player, action):
decoded_move = to_move(action)
if not board.turn:
decoded_move = mirror_move(decoded_move)
b = board.copy()
b.push(decoded_move)
return b, -player
def getValidMoves(self, board: chess.Board, player):
valid_moves = np.zeros(shape=(self.getActionSize()))
for move in board.legal_moves:
move_index = from_move(move)
valid_moves[move_index] = 1
return valid_moves
def getGameEnded(self, board: chess.Board, player):
if not board.is_game_over():
return 0
else:
if board.outcome().winner is None:
return 0.5
return 1 if board.outcome().winner else -1
def getCanonicalForm(self, board: chess.Board, player):
return board if board.turn else board.mirror()
def getSymmetries(self, board, pi):
return [(board, pi)]
def stringRepresentation(self, board: chess.Board):
return board.fen()
# for i, piece_name in enumerate(["K", "Q", "B", "N", "R", "P"]):
# section = encoded_board[i * 8: i * 8 + 8]
#
# for rank_num, rank in enumerate(section):
# for file_num, file in enumerate(rank):
# square = chess.square(file_num, rank_num)
# if file == 1:
# adapted_piece = piece_name.upper()
# elif file == -1:
# adapted_piece = piece_name.lower()
# else:
# adapted_piece = None
#
# if adapted_piece is not None:
# parsed_piece = chess.Piece.from_symbol(adapted_piece)
# decoded_board.set_piece_at(square, parsed_piece)
#
# decoded_board.turn = player == 1
#
# b = chess.Board()
#
# b.push(chess.Move.from_uci("e2e4"))
# b.push(chess.Move.from_uci("e7e5"))
# print(b, end="\n---\n")
# encoded = encode_board(b)
# print(encoded)
# print(get_board(encoded, 1))
|
{"hexsha": "6dc4245dfb776b36253a4b54f815bb4a76d26e7b", "size": 3405, "ext": "py", "lang": "Python", "max_stars_repo_path": "chess_game/ChessGame.py", "max_stars_repo_name": "TheMessik/alpha-zero-general", "max_stars_repo_head_hexsha": "ef4b65c7c0f2913efa155ce27985ac6dcbecc555", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-16T11:12:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T11:12:05.000Z", "max_issues_repo_path": "chess_game/ChessGame.py", "max_issues_repo_name": "TheMessik/alpha-zero-general", "max_issues_repo_head_hexsha": "ef4b65c7c0f2913efa155ce27985ac6dcbecc555", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chess_game/ChessGame.py", "max_forks_repo_name": "TheMessik/alpha-zero-general", "max_forks_repo_head_hexsha": "ef4b65c7c0f2913efa155ce27985ac6dcbecc555", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6134453782, "max_line_length": 114, "alphanum_fraction": 0.6064610866, "include": true, "reason": "import numpy", "num_tokens": 834}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.